repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
rh-marketingops/dwm | dwm/helpers.py | _RunUserDefinedFunctions_ | python | def _RunUserDefinedFunctions_(config, data, histObj, position, namespace=__name__):
udfConfig = config['userDefinedFunctions']
if position in udfConfig:
posConfig = udfConfig[position]
for udf in posConfig.keys():
posConfigUDF = posConfig[udf]
data, histObj = getattr(sys.modules[namespace], posConfigUDF)(data=data, histObj=histObj)
return data, histObj | Return a single updated data record and history object after running user-defined functions
:param dict config: DWM configuration (see DataDictionary)
:param dict data: single record (dictionary) to which user-defined functions should be applied
:param dict histObj: History object to which changes should be appended
:param string position: position name of which function set from config should be run
:param namespace: namespace of current working script; must be passed if using user-defined functions | train | https://github.com/rh-marketingops/dwm/blob/66c7d18db857afbe5d574478ceaaad6159ae7469/dwm/helpers.py#L63-L86 | null | import re
import sys
## misc fcns that make everything else go smooth
def _CollectHistory_(lookupType, fromVal, toVal, using={}, pattern=''):
"""
Return a dictionary detailing what, if any, change was made to a record field
:param string lookupType: what cleaning rule made the change; one of: genericLookup, genericRegex, fieldSpecificLookup, fieldSpecificRegex, normLookup, normRegex, normIncludes, deriveValue, copyValue, deriveRegex
:param string fromVal: previous field value
:param string toVal: new string value
:param dict using: field values used to derive new values; only applicable for deriveValue, copyValue, deriveRegex
:param string pattern: which regex pattern was matched to make the change; only applicable for genericRegex, fieldSpecificRegex, deriveRegex
"""
histObj = {}
if fromVal != toVal:
histObj[lookupType] = {"from": fromVal, "to": toVal}
if lookupType in ['deriveValue', 'deriveRegex', 'copyValue', 'normIncludes', 'deriveIncludes'] and using!='':
histObj[lookupType]["using"] = using
if lookupType in ['genericRegex', 'fieldSpecificRegex', 'normRegex', 'deriveRegex'] and pattern!='':
histObj[lookupType]["pattern"] = pattern
return histObj
def _CollectHistoryAgg_(contactHist, fieldHistObj, fieldName):
"""
Return updated history dictionary with new field change
:param dict contactHist: Existing contact history dictionary
:param dict fieldHistObj: Output of _CollectHistory_
:param string fieldName: field name
"""
if fieldHistObj!={}:
if fieldName not in contactHist.keys():
contactHist[fieldName] = {}
for lookupType in fieldHistObj.keys():
contactHist[fieldName][lookupType] = fieldHistObj[lookupType]
return contactHist
def _DataClean_(fieldVal):
"""
Return 'cleaned' value to standardize lookups (convert to uppercase, remove leading/trailing whitespace, carriage returns, line breaks, and unprintable characters)
:param string fieldVal: field value
"""
fieldValNew = fieldVal
fieldValNew = fieldValNew.upper()
fieldValNew = fieldValNew.strip()
fieldValNew = re.sub("[\s\n\t]+", " ", fieldValNew)
return fieldValNew
|
rh-marketingops/dwm | dwm/cleaning.py | DataLookup | python | def DataLookup(fieldVal, db, lookupType, fieldName, histObj={}):
if lookupType == 'genericLookup':
lookup_dict = {"find": _DataClean_(fieldVal)}
elif lookupType in ['fieldSpecificLookup', 'normLookup']:
lookup_dict = {"fieldName": fieldName, "find": _DataClean_(fieldVal)}
else:
raise ValueError("Invalid lookupType")
field_val_new = fieldVal
coll = db[lookupType]
l_val = coll.find_one(lookup_dict, ['replace'])
if l_val:
field_val_new = l_val['replace'] if 'replace' in l_val else ''
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj,
fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd | Return new field value based on single-value lookup against MongoDB
:param string fieldVal: input value to lookup
:param MongoClient db: MongoClient instance connected to MongoDB
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'genericLookup', 'fieldSpecificLookup', 'normLookup'
:param string fieldName: Field name to query against
:param dict histObj: History object to which changes should be appended | train | https://github.com/rh-marketingops/dwm/blob/66c7d18db857afbe5d574478ceaaad6159ae7469/dwm/cleaning.py#L16-L51 | [
"def _CollectHistory_(lookupType, fromVal, toVal, using={}, pattern=''):\n \"\"\"\n Return a dictionary detailing what, if any, change was made to a record field\n\n :param string lookupType: what cleaning rule made the change; one of: genericLookup, genericRegex, fieldSpecificLookup, fieldSpecificRegex, n... | """
These set of functions are responsible for determining what the new value of a
field should be, in most cases based on a lookup against MongoDB.
"""
import re
import warnings
from collections import OrderedDict
from .helpers import _CollectHistory_
from .helpers import _CollectHistoryAgg_
from .helpers import _DataClean_
def IncludesLookup(fieldVal, lookupType, db, fieldName, deriveFieldName='',
deriveInput={}, histObj={}, overwrite=False,
blankIfNoMatch=False):
"""
Return new field value based on whether or not original value includes AND
excludes all words in a comma-delimited list queried from MongoDB
:param string fieldVal: input value to lookup
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'normIncludes', 'deriveIncludes'
:param MongoClient db: MongoClient instance connected to MongoDB
:param string fieldName: Field name to query against
:param string deriveFieldName: Field name from which to derive value
:param dict deriveInput: Values to perform lookup against:
{"deriveFieldName": "deriveVal1"}
:param dict histObj: History object to which changes should be appended
:param bool overwrite: Should an existing field value be replaced
:param bool blankIfNoMatch: Should field value be set to blank if
no match is found
"""
lookup_dict = {
'fieldName': fieldName
}
if lookupType == 'normIncludes':
field_val_clean = _DataClean_(fieldVal)
elif lookupType == 'deriveIncludes':
if deriveFieldName == '' or deriveInput == {}:
raise ValueError("for 'deriveIncludes' must specify both \
'deriveFieldName' and 'deriveInput'")
lookup_dict['deriveFieldName'] = deriveFieldName
field_val_clean = _DataClean_(deriveInput[list(deriveInput.keys())[0]])
else:
raise ValueError("Invalid lookupType")
field_val_new = fieldVal
check_match = False
using = {}
coll = db[lookupType]
inc_val = coll.find(lookup_dict, ['includes', 'excludes', 'begins', 'ends',
'replace'])
if inc_val and (lookupType == 'normIncludes' or
(lookupType == 'deriveIncludes' and
(overwrite or fieldVal == ''))):
for row in inc_val:
try:
if (row['includes'] != '' or
row['excludes'] != '' or
row['begins'] != '' or
row['ends'] != ''):
if all((a in field_val_clean)
for a in row['includes'].split(",")):
if all((b not in field_val_clean)
for b in row['excludes'].split(",")) \
or row['excludes'] == '':
if field_val_clean.startswith(row['begins']):
if field_val_clean.endswith(row['ends']):
field_val_new = row['replace']
if lookupType == 'deriveIncludes':
using[deriveFieldName] = deriveInput
using['includes'] = row['includes']
using['excludes'] = row['excludes']
using['begins'] = row['begins']
using['ends'] = row['ends']
check_match = True
break
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
if inc_val:
inc_val.close()
if (field_val_new == fieldVal and blankIfNoMatch and
lookupType == 'deriveIncludes'):
field_val_new = ''
using['blankIfNoMatch'] = 'no match found'
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new, using=using)
histObjUpd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, histObjUpd, check_match
def RegexLookup(fieldVal, db, fieldName, lookupType, histObj={}):
"""
Return a new field value based on match against regex queried from MongoDB
:param string fieldVal: input value to lookup
:param MongoClient db: MongoClient instance connected to MongoDB
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'genericRegex', 'fieldSpecificRegex', 'normRegex'
:param string fieldName: Field name to query against
:param dict histObj: History object to which changes should be appended
"""
if lookupType == 'genericRegex':
lookup_dict = {}
elif lookupType in ['fieldSpecificRegex', 'normRegex']:
lookup_dict = {"fieldName": fieldName}
else:
raise ValueError("Invalid type")
field_val_new = fieldVal
pattern = ''
coll = db[lookupType]
re_val = coll.find(lookup_dict, ['pattern', 'replace'])
for row in re_val:
try:
match = re.match(row['pattern'], _DataClean_(field_val_new),
flags=re.IGNORECASE)
if match:
if 'replace' in row:
field_val_new = re.sub(row['pattern'], row['replace'],
_DataClean_(field_val_new),
flags=re.IGNORECASE)
else:
field_val_new = re.sub(row['pattern'], '',
_DataClean_(field_val_new),
flags=re.IGNORECASE)
pattern = row['pattern']
break
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
if re_val:
re_val.close()
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new, pattern=pattern)
histObjUpd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, histObjUpd
def DeriveDataLookup(fieldName, db, deriveInput, overwrite=True, fieldVal='',
histObj={}, blankIfNoMatch=False):
"""
Return new field value based on single or multi-value lookup against MongoDB
:param string fieldName: Field name to query against
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict deriveInput: Values to perform lookup against:
{"lookupField1": "lookupVal1", "lookupField2": "lookupVal2"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
:param bool blankIfNoMatch: Should field value be set to blank
if no match is found
"""
lookup_vals = OrderedDict()
for val in sorted(deriveInput.keys()):
lookup_vals[val] = _DataClean_(deriveInput[val])
lookup_dict = {
'fieldName': fieldName,
'lookupVals': lookup_vals
}
coll = db['deriveValue']
l_val = coll.find_one(lookup_dict, ['value'])
field_val_new = fieldVal
derive_using = deriveInput
# If match found return True else False
check_match = True if l_val else False
if l_val and (overwrite or (fieldVal == '')):
try:
field_val_new = l_val['value']
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
elif blankIfNoMatch and not l_val:
field_val_new = ''
derive_using = {'blankIfNoMatch': 'no match found'}
change = _CollectHistory_(lookupType='deriveValue', fromVal=fieldVal,
toVal=field_val_new, using=derive_using)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
def DeriveDataCopyValue(fieldName, deriveInput, overwrite, fieldVal, histObj={}):
"""
Return new value based on value from another field
:param string fieldName: Field name to query against
:param dict deriveInput: Values to perform lookup against:
{"copyField1": "copyVal1"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
"""
if len(deriveInput) > 1:
raise Exception("more than one field/value in deriveInput")
field_val_new = fieldVal
row = list(deriveInput.keys())[0]
if deriveInput[row] != '' and (overwrite or (fieldVal == '')):
field_val_new = deriveInput[row]
check_match = True
else:
check_match = False
change = _CollectHistory_(lookupType='copyValue', fromVal=fieldVal,
toVal=field_val_new, using=deriveInput)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
def DeriveDataRegex(fieldName, db, deriveInput, overwrite, fieldVal, histObj={},
blankIfNoMatch=False):
"""
Return a new field value based on match (of another field) against regex
queried from MongoDB
:param string fieldName: Field name to query against
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict deriveInput: Values to perform lookup against:
{"lookupField1": "lookupVal1"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
:param bool blankIfNoMatch: Should field value be set to blank
if no match is found
"""
if len(deriveInput) > 1:
raise Exception("more than one value in deriveInput")
field_val_new = fieldVal
check_match = False
# derive_using = deriveInput
row = list(deriveInput.keys())[0]
pattern = ''
if deriveInput[row] != '' and (overwrite or (fieldVal == '')):
lookup_dict = {
'deriveFieldName': row,
'fieldName': fieldName
}
coll = db['deriveRegex']
re_val = coll.find(lookup_dict, ['pattern', 'replace'])
for l_val in re_val:
try:
match = re.match(l_val['pattern'],
_DataClean_(deriveInput[row]),
flags=re.IGNORECASE)
if match:
field_val_new = re.sub(l_val['pattern'], l_val['replace'],
_DataClean_(deriveInput[row]),
flags=re.IGNORECASE)
pattern = l_val['pattern']
check_match = True
break
except KeyError as key_error_obj:
warnings.warn('schema error', key_error_obj)
if re_val:
re_val.close()
if field_val_new == fieldVal and blankIfNoMatch:
field_val_new = ''
pattern = 'no matching pattern'
# derive_using = {"blankIfNoMatch": "no match found"}
change = _CollectHistory_(lookupType='deriveRegex', fromVal=fieldVal,
toVal=field_val_new, using=deriveInput,
pattern=pattern)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
|
rh-marketingops/dwm | dwm/cleaning.py | IncludesLookup | python | def IncludesLookup(fieldVal, lookupType, db, fieldName, deriveFieldName='',
deriveInput={}, histObj={}, overwrite=False,
blankIfNoMatch=False):
lookup_dict = {
'fieldName': fieldName
}
if lookupType == 'normIncludes':
field_val_clean = _DataClean_(fieldVal)
elif lookupType == 'deriveIncludes':
if deriveFieldName == '' or deriveInput == {}:
raise ValueError("for 'deriveIncludes' must specify both \
'deriveFieldName' and 'deriveInput'")
lookup_dict['deriveFieldName'] = deriveFieldName
field_val_clean = _DataClean_(deriveInput[list(deriveInput.keys())[0]])
else:
raise ValueError("Invalid lookupType")
field_val_new = fieldVal
check_match = False
using = {}
coll = db[lookupType]
inc_val = coll.find(lookup_dict, ['includes', 'excludes', 'begins', 'ends',
'replace'])
if inc_val and (lookupType == 'normIncludes' or
(lookupType == 'deriveIncludes' and
(overwrite or fieldVal == ''))):
for row in inc_val:
try:
if (row['includes'] != '' or
row['excludes'] != '' or
row['begins'] != '' or
row['ends'] != ''):
if all((a in field_val_clean)
for a in row['includes'].split(",")):
if all((b not in field_val_clean)
for b in row['excludes'].split(",")) \
or row['excludes'] == '':
if field_val_clean.startswith(row['begins']):
if field_val_clean.endswith(row['ends']):
field_val_new = row['replace']
if lookupType == 'deriveIncludes':
using[deriveFieldName] = deriveInput
using['includes'] = row['includes']
using['excludes'] = row['excludes']
using['begins'] = row['begins']
using['ends'] = row['ends']
check_match = True
break
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
if inc_val:
inc_val.close()
if (field_val_new == fieldVal and blankIfNoMatch and
lookupType == 'deriveIncludes'):
field_val_new = ''
using['blankIfNoMatch'] = 'no match found'
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new, using=using)
histObjUpd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, histObjUpd, check_match | Return new field value based on whether or not original value includes AND
excludes all words in a comma-delimited list queried from MongoDB
:param string fieldVal: input value to lookup
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'normIncludes', 'deriveIncludes'
:param MongoClient db: MongoClient instance connected to MongoDB
:param string fieldName: Field name to query against
:param string deriveFieldName: Field name from which to derive value
:param dict deriveInput: Values to perform lookup against:
{"deriveFieldName": "deriveVal1"}
:param dict histObj: History object to which changes should be appended
:param bool overwrite: Should an existing field value be replaced
:param bool blankIfNoMatch: Should field value be set to blank if
no match is found | train | https://github.com/rh-marketingops/dwm/blob/66c7d18db857afbe5d574478ceaaad6159ae7469/dwm/cleaning.py#L54-L157 | [
"def _CollectHistory_(lookupType, fromVal, toVal, using={}, pattern=''):\n \"\"\"\n Return a dictionary detailing what, if any, change was made to a record field\n\n :param string lookupType: what cleaning rule made the change; one of: genericLookup, genericRegex, fieldSpecificLookup, fieldSpecificRegex, n... | """
These set of functions are responsible for determining what the new value of a
field should be, in most cases based on a lookup against MongoDB.
"""
import re
import warnings
from collections import OrderedDict
from .helpers import _CollectHistory_
from .helpers import _CollectHistoryAgg_
from .helpers import _DataClean_
def DataLookup(fieldVal, db, lookupType, fieldName, histObj={}):
"""
Return new field value based on single-value lookup against MongoDB
:param string fieldVal: input value to lookup
:param MongoClient db: MongoClient instance connected to MongoDB
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'genericLookup', 'fieldSpecificLookup', 'normLookup'
:param string fieldName: Field name to query against
:param dict histObj: History object to which changes should be appended
"""
if lookupType == 'genericLookup':
lookup_dict = {"find": _DataClean_(fieldVal)}
elif lookupType in ['fieldSpecificLookup', 'normLookup']:
lookup_dict = {"fieldName": fieldName, "find": _DataClean_(fieldVal)}
else:
raise ValueError("Invalid lookupType")
field_val_new = fieldVal
coll = db[lookupType]
l_val = coll.find_one(lookup_dict, ['replace'])
if l_val:
field_val_new = l_val['replace'] if 'replace' in l_val else ''
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj,
fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd
def RegexLookup(fieldVal, db, fieldName, lookupType, histObj={}):
"""
Return a new field value based on match against regex queried from MongoDB
:param string fieldVal: input value to lookup
:param MongoClient db: MongoClient instance connected to MongoDB
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'genericRegex', 'fieldSpecificRegex', 'normRegex'
:param string fieldName: Field name to query against
:param dict histObj: History object to which changes should be appended
"""
if lookupType == 'genericRegex':
lookup_dict = {}
elif lookupType in ['fieldSpecificRegex', 'normRegex']:
lookup_dict = {"fieldName": fieldName}
else:
raise ValueError("Invalid type")
field_val_new = fieldVal
pattern = ''
coll = db[lookupType]
re_val = coll.find(lookup_dict, ['pattern', 'replace'])
for row in re_val:
try:
match = re.match(row['pattern'], _DataClean_(field_val_new),
flags=re.IGNORECASE)
if match:
if 'replace' in row:
field_val_new = re.sub(row['pattern'], row['replace'],
_DataClean_(field_val_new),
flags=re.IGNORECASE)
else:
field_val_new = re.sub(row['pattern'], '',
_DataClean_(field_val_new),
flags=re.IGNORECASE)
pattern = row['pattern']
break
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
if re_val:
re_val.close()
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new, pattern=pattern)
histObjUpd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, histObjUpd
def DeriveDataLookup(fieldName, db, deriveInput, overwrite=True, fieldVal='',
histObj={}, blankIfNoMatch=False):
"""
Return new field value based on single or multi-value lookup against MongoDB
:param string fieldName: Field name to query against
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict deriveInput: Values to perform lookup against:
{"lookupField1": "lookupVal1", "lookupField2": "lookupVal2"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
:param bool blankIfNoMatch: Should field value be set to blank
if no match is found
"""
lookup_vals = OrderedDict()
for val in sorted(deriveInput.keys()):
lookup_vals[val] = _DataClean_(deriveInput[val])
lookup_dict = {
'fieldName': fieldName,
'lookupVals': lookup_vals
}
coll = db['deriveValue']
l_val = coll.find_one(lookup_dict, ['value'])
field_val_new = fieldVal
derive_using = deriveInput
# If match found return True else False
check_match = True if l_val else False
if l_val and (overwrite or (fieldVal == '')):
try:
field_val_new = l_val['value']
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
elif blankIfNoMatch and not l_val:
field_val_new = ''
derive_using = {'blankIfNoMatch': 'no match found'}
change = _CollectHistory_(lookupType='deriveValue', fromVal=fieldVal,
toVal=field_val_new, using=derive_using)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
def DeriveDataCopyValue(fieldName, deriveInput, overwrite, fieldVal, histObj={}):
"""
Return new value based on value from another field
:param string fieldName: Field name to query against
:param dict deriveInput: Values to perform lookup against:
{"copyField1": "copyVal1"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
"""
if len(deriveInput) > 1:
raise Exception("more than one field/value in deriveInput")
field_val_new = fieldVal
row = list(deriveInput.keys())[0]
if deriveInput[row] != '' and (overwrite or (fieldVal == '')):
field_val_new = deriveInput[row]
check_match = True
else:
check_match = False
change = _CollectHistory_(lookupType='copyValue', fromVal=fieldVal,
toVal=field_val_new, using=deriveInput)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
def DeriveDataRegex(fieldName, db, deriveInput, overwrite, fieldVal, histObj={},
blankIfNoMatch=False):
"""
Return a new field value based on match (of another field) against regex
queried from MongoDB
:param string fieldName: Field name to query against
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict deriveInput: Values to perform lookup against:
{"lookupField1": "lookupVal1"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
:param bool blankIfNoMatch: Should field value be set to blank
if no match is found
"""
if len(deriveInput) > 1:
raise Exception("more than one value in deriveInput")
field_val_new = fieldVal
check_match = False
# derive_using = deriveInput
row = list(deriveInput.keys())[0]
pattern = ''
if deriveInput[row] != '' and (overwrite or (fieldVal == '')):
lookup_dict = {
'deriveFieldName': row,
'fieldName': fieldName
}
coll = db['deriveRegex']
re_val = coll.find(lookup_dict, ['pattern', 'replace'])
for l_val in re_val:
try:
match = re.match(l_val['pattern'],
_DataClean_(deriveInput[row]),
flags=re.IGNORECASE)
if match:
field_val_new = re.sub(l_val['pattern'], l_val['replace'],
_DataClean_(deriveInput[row]),
flags=re.IGNORECASE)
pattern = l_val['pattern']
check_match = True
break
except KeyError as key_error_obj:
warnings.warn('schema error', key_error_obj)
if re_val:
re_val.close()
if field_val_new == fieldVal and blankIfNoMatch:
field_val_new = ''
pattern = 'no matching pattern'
# derive_using = {"blankIfNoMatch": "no match found"}
change = _CollectHistory_(lookupType='deriveRegex', fromVal=fieldVal,
toVal=field_val_new, using=deriveInput,
pattern=pattern)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
|
rh-marketingops/dwm | dwm/cleaning.py | RegexLookup | python | def RegexLookup(fieldVal, db, fieldName, lookupType, histObj={}):
if lookupType == 'genericRegex':
lookup_dict = {}
elif lookupType in ['fieldSpecificRegex', 'normRegex']:
lookup_dict = {"fieldName": fieldName}
else:
raise ValueError("Invalid type")
field_val_new = fieldVal
pattern = ''
coll = db[lookupType]
re_val = coll.find(lookup_dict, ['pattern', 'replace'])
for row in re_val:
try:
match = re.match(row['pattern'], _DataClean_(field_val_new),
flags=re.IGNORECASE)
if match:
if 'replace' in row:
field_val_new = re.sub(row['pattern'], row['replace'],
_DataClean_(field_val_new),
flags=re.IGNORECASE)
else:
field_val_new = re.sub(row['pattern'], '',
_DataClean_(field_val_new),
flags=re.IGNORECASE)
pattern = row['pattern']
break
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
if re_val:
re_val.close()
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new, pattern=pattern)
histObjUpd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, histObjUpd | Return a new field value based on match against regex queried from MongoDB
:param string fieldVal: input value to lookup
:param MongoClient db: MongoClient instance connected to MongoDB
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'genericRegex', 'fieldSpecificRegex', 'normRegex'
:param string fieldName: Field name to query against
:param dict histObj: History object to which changes should be appended | train | https://github.com/rh-marketingops/dwm/blob/66c7d18db857afbe5d574478ceaaad6159ae7469/dwm/cleaning.py#L160-L218 | [
"def _CollectHistory_(lookupType, fromVal, toVal, using={}, pattern=''):\n \"\"\"\n Return a dictionary detailing what, if any, change was made to a record field\n\n :param string lookupType: what cleaning rule made the change; one of: genericLookup, genericRegex, fieldSpecificLookup, fieldSpecificRegex, n... | """
These set of functions are responsible for determining what the new value of a
field should be, in most cases based on a lookup against MongoDB.
"""
import re
import warnings
from collections import OrderedDict
from .helpers import _CollectHistory_
from .helpers import _CollectHistoryAgg_
from .helpers import _DataClean_
def DataLookup(fieldVal, db, lookupType, fieldName, histObj={}):
"""
Return new field value based on single-value lookup against MongoDB
:param string fieldVal: input value to lookup
:param MongoClient db: MongoClient instance connected to MongoDB
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'genericLookup', 'fieldSpecificLookup', 'normLookup'
:param string fieldName: Field name to query against
:param dict histObj: History object to which changes should be appended
"""
if lookupType == 'genericLookup':
lookup_dict = {"find": _DataClean_(fieldVal)}
elif lookupType in ['fieldSpecificLookup', 'normLookup']:
lookup_dict = {"fieldName": fieldName, "find": _DataClean_(fieldVal)}
else:
raise ValueError("Invalid lookupType")
field_val_new = fieldVal
coll = db[lookupType]
l_val = coll.find_one(lookup_dict, ['replace'])
if l_val:
field_val_new = l_val['replace'] if 'replace' in l_val else ''
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj,
fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd
def IncludesLookup(fieldVal, lookupType, db, fieldName, deriveFieldName='',
deriveInput={}, histObj={}, overwrite=False,
blankIfNoMatch=False):
"""
Return new field value based on whether or not original value includes AND
excludes all words in a comma-delimited list queried from MongoDB
:param string fieldVal: input value to lookup
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'normIncludes', 'deriveIncludes'
:param MongoClient db: MongoClient instance connected to MongoDB
:param string fieldName: Field name to query against
:param string deriveFieldName: Field name from which to derive value
:param dict deriveInput: Values to perform lookup against:
{"deriveFieldName": "deriveVal1"}
:param dict histObj: History object to which changes should be appended
:param bool overwrite: Should an existing field value be replaced
:param bool blankIfNoMatch: Should field value be set to blank if
no match is found
"""
lookup_dict = {
'fieldName': fieldName
}
if lookupType == 'normIncludes':
field_val_clean = _DataClean_(fieldVal)
elif lookupType == 'deriveIncludes':
if deriveFieldName == '' or deriveInput == {}:
raise ValueError("for 'deriveIncludes' must specify both \
'deriveFieldName' and 'deriveInput'")
lookup_dict['deriveFieldName'] = deriveFieldName
field_val_clean = _DataClean_(deriveInput[list(deriveInput.keys())[0]])
else:
raise ValueError("Invalid lookupType")
field_val_new = fieldVal
check_match = False
using = {}
coll = db[lookupType]
inc_val = coll.find(lookup_dict, ['includes', 'excludes', 'begins', 'ends',
'replace'])
if inc_val and (lookupType == 'normIncludes' or
(lookupType == 'deriveIncludes' and
(overwrite or fieldVal == ''))):
for row in inc_val:
try:
if (row['includes'] != '' or
row['excludes'] != '' or
row['begins'] != '' or
row['ends'] != ''):
if all((a in field_val_clean)
for a in row['includes'].split(",")):
if all((b not in field_val_clean)
for b in row['excludes'].split(",")) \
or row['excludes'] == '':
if field_val_clean.startswith(row['begins']):
if field_val_clean.endswith(row['ends']):
field_val_new = row['replace']
if lookupType == 'deriveIncludes':
using[deriveFieldName] = deriveInput
using['includes'] = row['includes']
using['excludes'] = row['excludes']
using['begins'] = row['begins']
using['ends'] = row['ends']
check_match = True
break
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
if inc_val:
inc_val.close()
if (field_val_new == fieldVal and blankIfNoMatch and
lookupType == 'deriveIncludes'):
field_val_new = ''
using['blankIfNoMatch'] = 'no match found'
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new, using=using)
histObjUpd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, histObjUpd, check_match
def DeriveDataLookup(fieldName, db, deriveInput, overwrite=True, fieldVal='',
histObj={}, blankIfNoMatch=False):
"""
Return new field value based on single or multi-value lookup against MongoDB
:param string fieldName: Field name to query against
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict deriveInput: Values to perform lookup against:
{"lookupField1": "lookupVal1", "lookupField2": "lookupVal2"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
:param bool blankIfNoMatch: Should field value be set to blank
if no match is found
"""
lookup_vals = OrderedDict()
for val in sorted(deriveInput.keys()):
lookup_vals[val] = _DataClean_(deriveInput[val])
lookup_dict = {
'fieldName': fieldName,
'lookupVals': lookup_vals
}
coll = db['deriveValue']
l_val = coll.find_one(lookup_dict, ['value'])
field_val_new = fieldVal
derive_using = deriveInput
# If match found return True else False
check_match = True if l_val else False
if l_val and (overwrite or (fieldVal == '')):
try:
field_val_new = l_val['value']
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
elif blankIfNoMatch and not l_val:
field_val_new = ''
derive_using = {'blankIfNoMatch': 'no match found'}
change = _CollectHistory_(lookupType='deriveValue', fromVal=fieldVal,
toVal=field_val_new, using=derive_using)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
def DeriveDataCopyValue(fieldName, deriveInput, overwrite, fieldVal, histObj={}):
"""
Return new value based on value from another field
:param string fieldName: Field name to query against
:param dict deriveInput: Values to perform lookup against:
{"copyField1": "copyVal1"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
"""
if len(deriveInput) > 1:
raise Exception("more than one field/value in deriveInput")
field_val_new = fieldVal
row = list(deriveInput.keys())[0]
if deriveInput[row] != '' and (overwrite or (fieldVal == '')):
field_val_new = deriveInput[row]
check_match = True
else:
check_match = False
change = _CollectHistory_(lookupType='copyValue', fromVal=fieldVal,
toVal=field_val_new, using=deriveInput)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
def DeriveDataRegex(fieldName, db, deriveInput, overwrite, fieldVal, histObj={},
blankIfNoMatch=False):
"""
Return a new field value based on match (of another field) against regex
queried from MongoDB
:param string fieldName: Field name to query against
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict deriveInput: Values to perform lookup against:
{"lookupField1": "lookupVal1"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
:param bool blankIfNoMatch: Should field value be set to blank
if no match is found
"""
if len(deriveInput) > 1:
raise Exception("more than one value in deriveInput")
field_val_new = fieldVal
check_match = False
# derive_using = deriveInput
row = list(deriveInput.keys())[0]
pattern = ''
if deriveInput[row] != '' and (overwrite or (fieldVal == '')):
lookup_dict = {
'deriveFieldName': row,
'fieldName': fieldName
}
coll = db['deriveRegex']
re_val = coll.find(lookup_dict, ['pattern', 'replace'])
for l_val in re_val:
try:
match = re.match(l_val['pattern'],
_DataClean_(deriveInput[row]),
flags=re.IGNORECASE)
if match:
field_val_new = re.sub(l_val['pattern'], l_val['replace'],
_DataClean_(deriveInput[row]),
flags=re.IGNORECASE)
pattern = l_val['pattern']
check_match = True
break
except KeyError as key_error_obj:
warnings.warn('schema error', key_error_obj)
if re_val:
re_val.close()
if field_val_new == fieldVal and blankIfNoMatch:
field_val_new = ''
pattern = 'no matching pattern'
# derive_using = {"blankIfNoMatch": "no match found"}
change = _CollectHistory_(lookupType='deriveRegex', fromVal=fieldVal,
toVal=field_val_new, using=deriveInput,
pattern=pattern)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
|
rh-marketingops/dwm | dwm/cleaning.py | DeriveDataLookup | python | def DeriveDataLookup(fieldName, db, deriveInput, overwrite=True, fieldVal='',
histObj={}, blankIfNoMatch=False):
lookup_vals = OrderedDict()
for val in sorted(deriveInput.keys()):
lookup_vals[val] = _DataClean_(deriveInput[val])
lookup_dict = {
'fieldName': fieldName,
'lookupVals': lookup_vals
}
coll = db['deriveValue']
l_val = coll.find_one(lookup_dict, ['value'])
field_val_new = fieldVal
derive_using = deriveInput
# If match found return True else False
check_match = True if l_val else False
if l_val and (overwrite or (fieldVal == '')):
try:
field_val_new = l_val['value']
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
elif blankIfNoMatch and not l_val:
field_val_new = ''
derive_using = {'blankIfNoMatch': 'no match found'}
change = _CollectHistory_(lookupType='deriveValue', fromVal=fieldVal,
toVal=field_val_new, using=derive_using)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match | Return new field value based on single or multi-value lookup against MongoDB
:param string fieldName: Field name to query against
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict deriveInput: Values to perform lookup against:
{"lookupField1": "lookupVal1", "lookupField2": "lookupVal2"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
:param bool blankIfNoMatch: Should field value be set to blank
if no match is found | train | https://github.com/rh-marketingops/dwm/blob/66c7d18db857afbe5d574478ceaaad6159ae7469/dwm/cleaning.py#L221-L276 | [
"def _CollectHistory_(lookupType, fromVal, toVal, using={}, pattern=''):\n \"\"\"\n Return a dictionary detailing what, if any, change was made to a record field\n\n :param string lookupType: what cleaning rule made the change; one of: genericLookup, genericRegex, fieldSpecificLookup, fieldSpecificRegex, n... | """
These set of functions are responsible for determining what the new value of a
field should be, in most cases based on a lookup against MongoDB.
"""
import re
import warnings
from collections import OrderedDict
from .helpers import _CollectHistory_
from .helpers import _CollectHistoryAgg_
from .helpers import _DataClean_
def DataLookup(fieldVal, db, lookupType, fieldName, histObj={}):
"""
Return new field value based on single-value lookup against MongoDB
:param string fieldVal: input value to lookup
:param MongoClient db: MongoClient instance connected to MongoDB
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'genericLookup', 'fieldSpecificLookup', 'normLookup'
:param string fieldName: Field name to query against
:param dict histObj: History object to which changes should be appended
"""
if lookupType == 'genericLookup':
lookup_dict = {"find": _DataClean_(fieldVal)}
elif lookupType in ['fieldSpecificLookup', 'normLookup']:
lookup_dict = {"fieldName": fieldName, "find": _DataClean_(fieldVal)}
else:
raise ValueError("Invalid lookupType")
field_val_new = fieldVal
coll = db[lookupType]
l_val = coll.find_one(lookup_dict, ['replace'])
if l_val:
field_val_new = l_val['replace'] if 'replace' in l_val else ''
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj,
fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd
def IncludesLookup(fieldVal, lookupType, db, fieldName, deriveFieldName='',
deriveInput={}, histObj={}, overwrite=False,
blankIfNoMatch=False):
"""
Return new field value based on whether or not original value includes AND
excludes all words in a comma-delimited list queried from MongoDB
:param string fieldVal: input value to lookup
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'normIncludes', 'deriveIncludes'
:param MongoClient db: MongoClient instance connected to MongoDB
:param string fieldName: Field name to query against
:param string deriveFieldName: Field name from which to derive value
:param dict deriveInput: Values to perform lookup against:
{"deriveFieldName": "deriveVal1"}
:param dict histObj: History object to which changes should be appended
:param bool overwrite: Should an existing field value be replaced
:param bool blankIfNoMatch: Should field value be set to blank if
no match is found
"""
lookup_dict = {
'fieldName': fieldName
}
if lookupType == 'normIncludes':
field_val_clean = _DataClean_(fieldVal)
elif lookupType == 'deriveIncludes':
if deriveFieldName == '' or deriveInput == {}:
raise ValueError("for 'deriveIncludes' must specify both \
'deriveFieldName' and 'deriveInput'")
lookup_dict['deriveFieldName'] = deriveFieldName
field_val_clean = _DataClean_(deriveInput[list(deriveInput.keys())[0]])
else:
raise ValueError("Invalid lookupType")
field_val_new = fieldVal
check_match = False
using = {}
coll = db[lookupType]
inc_val = coll.find(lookup_dict, ['includes', 'excludes', 'begins', 'ends',
'replace'])
if inc_val and (lookupType == 'normIncludes' or
(lookupType == 'deriveIncludes' and
(overwrite or fieldVal == ''))):
for row in inc_val:
try:
if (row['includes'] != '' or
row['excludes'] != '' or
row['begins'] != '' or
row['ends'] != ''):
if all((a in field_val_clean)
for a in row['includes'].split(",")):
if all((b not in field_val_clean)
for b in row['excludes'].split(",")) \
or row['excludes'] == '':
if field_val_clean.startswith(row['begins']):
if field_val_clean.endswith(row['ends']):
field_val_new = row['replace']
if lookupType == 'deriveIncludes':
using[deriveFieldName] = deriveInput
using['includes'] = row['includes']
using['excludes'] = row['excludes']
using['begins'] = row['begins']
using['ends'] = row['ends']
check_match = True
break
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
if inc_val:
inc_val.close()
if (field_val_new == fieldVal and blankIfNoMatch and
lookupType == 'deriveIncludes'):
field_val_new = ''
using['blankIfNoMatch'] = 'no match found'
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new, using=using)
histObjUpd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, histObjUpd, check_match
def RegexLookup(fieldVal, db, fieldName, lookupType, histObj={}):
"""
Return a new field value based on match against regex queried from MongoDB
:param string fieldVal: input value to lookup
:param MongoClient db: MongoClient instance connected to MongoDB
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'genericRegex', 'fieldSpecificRegex', 'normRegex'
:param string fieldName: Field name to query against
:param dict histObj: History object to which changes should be appended
"""
if lookupType == 'genericRegex':
lookup_dict = {}
elif lookupType in ['fieldSpecificRegex', 'normRegex']:
lookup_dict = {"fieldName": fieldName}
else:
raise ValueError("Invalid type")
field_val_new = fieldVal
pattern = ''
coll = db[lookupType]
re_val = coll.find(lookup_dict, ['pattern', 'replace'])
for row in re_val:
try:
match = re.match(row['pattern'], _DataClean_(field_val_new),
flags=re.IGNORECASE)
if match:
if 'replace' in row:
field_val_new = re.sub(row['pattern'], row['replace'],
_DataClean_(field_val_new),
flags=re.IGNORECASE)
else:
field_val_new = re.sub(row['pattern'], '',
_DataClean_(field_val_new),
flags=re.IGNORECASE)
pattern = row['pattern']
break
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
if re_val:
re_val.close()
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new, pattern=pattern)
histObjUpd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, histObjUpd
def DeriveDataCopyValue(fieldName, deriveInput, overwrite, fieldVal, histObj={}):
"""
Return new value based on value from another field
:param string fieldName: Field name to query against
:param dict deriveInput: Values to perform lookup against:
{"copyField1": "copyVal1"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
"""
if len(deriveInput) > 1:
raise Exception("more than one field/value in deriveInput")
field_val_new = fieldVal
row = list(deriveInput.keys())[0]
if deriveInput[row] != '' and (overwrite or (fieldVal == '')):
field_val_new = deriveInput[row]
check_match = True
else:
check_match = False
change = _CollectHistory_(lookupType='copyValue', fromVal=fieldVal,
toVal=field_val_new, using=deriveInput)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
def DeriveDataRegex(fieldName, db, deriveInput, overwrite, fieldVal, histObj={},
blankIfNoMatch=False):
"""
Return a new field value based on match (of another field) against regex
queried from MongoDB
:param string fieldName: Field name to query against
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict deriveInput: Values to perform lookup against:
{"lookupField1": "lookupVal1"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
:param bool blankIfNoMatch: Should field value be set to blank
if no match is found
"""
if len(deriveInput) > 1:
raise Exception("more than one value in deriveInput")
field_val_new = fieldVal
check_match = False
# derive_using = deriveInput
row = list(deriveInput.keys())[0]
pattern = ''
if deriveInput[row] != '' and (overwrite or (fieldVal == '')):
lookup_dict = {
'deriveFieldName': row,
'fieldName': fieldName
}
coll = db['deriveRegex']
re_val = coll.find(lookup_dict, ['pattern', 'replace'])
for l_val in re_val:
try:
match = re.match(l_val['pattern'],
_DataClean_(deriveInput[row]),
flags=re.IGNORECASE)
if match:
field_val_new = re.sub(l_val['pattern'], l_val['replace'],
_DataClean_(deriveInput[row]),
flags=re.IGNORECASE)
pattern = l_val['pattern']
check_match = True
break
except KeyError as key_error_obj:
warnings.warn('schema error', key_error_obj)
if re_val:
re_val.close()
if field_val_new == fieldVal and blankIfNoMatch:
field_val_new = ''
pattern = 'no matching pattern'
# derive_using = {"blankIfNoMatch": "no match found"}
change = _CollectHistory_(lookupType='deriveRegex', fromVal=fieldVal,
toVal=field_val_new, using=deriveInput,
pattern=pattern)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
|
rh-marketingops/dwm | dwm/cleaning.py | DeriveDataCopyValue | python | def DeriveDataCopyValue(fieldName, deriveInput, overwrite, fieldVal, histObj={}):
if len(deriveInput) > 1:
raise Exception("more than one field/value in deriveInput")
field_val_new = fieldVal
row = list(deriveInput.keys())[0]
if deriveInput[row] != '' and (overwrite or (fieldVal == '')):
field_val_new = deriveInput[row]
check_match = True
else:
check_match = False
change = _CollectHistory_(lookupType='copyValue', fromVal=fieldVal,
toVal=field_val_new, using=deriveInput)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match | Return new value based on value from another field
:param string fieldName: Field name to query against
:param dict deriveInput: Values to perform lookup against:
{"copyField1": "copyVal1"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended | train | https://github.com/rh-marketingops/dwm/blob/66c7d18db857afbe5d574478ceaaad6159ae7469/dwm/cleaning.py#L279-L310 | [
"def _CollectHistory_(lookupType, fromVal, toVal, using={}, pattern=''):\n \"\"\"\n Return a dictionary detailing what, if any, change was made to a record field\n\n :param string lookupType: what cleaning rule made the change; one of: genericLookup, genericRegex, fieldSpecificLookup, fieldSpecificRegex, n... | """
These set of functions are responsible for determining what the new value of a
field should be, in most cases based on a lookup against MongoDB.
"""
import re
import warnings
from collections import OrderedDict
from .helpers import _CollectHistory_
from .helpers import _CollectHistoryAgg_
from .helpers import _DataClean_
def DataLookup(fieldVal, db, lookupType, fieldName, histObj={}):
"""
Return new field value based on single-value lookup against MongoDB
:param string fieldVal: input value to lookup
:param MongoClient db: MongoClient instance connected to MongoDB
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'genericLookup', 'fieldSpecificLookup', 'normLookup'
:param string fieldName: Field name to query against
:param dict histObj: History object to which changes should be appended
"""
if lookupType == 'genericLookup':
lookup_dict = {"find": _DataClean_(fieldVal)}
elif lookupType in ['fieldSpecificLookup', 'normLookup']:
lookup_dict = {"fieldName": fieldName, "find": _DataClean_(fieldVal)}
else:
raise ValueError("Invalid lookupType")
field_val_new = fieldVal
coll = db[lookupType]
l_val = coll.find_one(lookup_dict, ['replace'])
if l_val:
field_val_new = l_val['replace'] if 'replace' in l_val else ''
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj,
fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd
def IncludesLookup(fieldVal, lookupType, db, fieldName, deriveFieldName='',
deriveInput={}, histObj={}, overwrite=False,
blankIfNoMatch=False):
"""
Return new field value based on whether or not original value includes AND
excludes all words in a comma-delimited list queried from MongoDB
:param string fieldVal: input value to lookup
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'normIncludes', 'deriveIncludes'
:param MongoClient db: MongoClient instance connected to MongoDB
:param string fieldName: Field name to query against
:param string deriveFieldName: Field name from which to derive value
:param dict deriveInput: Values to perform lookup against:
{"deriveFieldName": "deriveVal1"}
:param dict histObj: History object to which changes should be appended
:param bool overwrite: Should an existing field value be replaced
:param bool blankIfNoMatch: Should field value be set to blank if
no match is found
"""
lookup_dict = {
'fieldName': fieldName
}
if lookupType == 'normIncludes':
field_val_clean = _DataClean_(fieldVal)
elif lookupType == 'deriveIncludes':
if deriveFieldName == '' or deriveInput == {}:
raise ValueError("for 'deriveIncludes' must specify both \
'deriveFieldName' and 'deriveInput'")
lookup_dict['deriveFieldName'] = deriveFieldName
field_val_clean = _DataClean_(deriveInput[list(deriveInput.keys())[0]])
else:
raise ValueError("Invalid lookupType")
field_val_new = fieldVal
check_match = False
using = {}
coll = db[lookupType]
inc_val = coll.find(lookup_dict, ['includes', 'excludes', 'begins', 'ends',
'replace'])
if inc_val and (lookupType == 'normIncludes' or
(lookupType == 'deriveIncludes' and
(overwrite or fieldVal == ''))):
for row in inc_val:
try:
if (row['includes'] != '' or
row['excludes'] != '' or
row['begins'] != '' or
row['ends'] != ''):
if all((a in field_val_clean)
for a in row['includes'].split(",")):
if all((b not in field_val_clean)
for b in row['excludes'].split(",")) \
or row['excludes'] == '':
if field_val_clean.startswith(row['begins']):
if field_val_clean.endswith(row['ends']):
field_val_new = row['replace']
if lookupType == 'deriveIncludes':
using[deriveFieldName] = deriveInput
using['includes'] = row['includes']
using['excludes'] = row['excludes']
using['begins'] = row['begins']
using['ends'] = row['ends']
check_match = True
break
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
if inc_val:
inc_val.close()
if (field_val_new == fieldVal and blankIfNoMatch and
lookupType == 'deriveIncludes'):
field_val_new = ''
using['blankIfNoMatch'] = 'no match found'
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new, using=using)
histObjUpd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, histObjUpd, check_match
def RegexLookup(fieldVal, db, fieldName, lookupType, histObj={}):
"""
Return a new field value based on match against regex queried from MongoDB
:param string fieldVal: input value to lookup
:param MongoClient db: MongoClient instance connected to MongoDB
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'genericRegex', 'fieldSpecificRegex', 'normRegex'
:param string fieldName: Field name to query against
:param dict histObj: History object to which changes should be appended
"""
if lookupType == 'genericRegex':
lookup_dict = {}
elif lookupType in ['fieldSpecificRegex', 'normRegex']:
lookup_dict = {"fieldName": fieldName}
else:
raise ValueError("Invalid type")
field_val_new = fieldVal
pattern = ''
coll = db[lookupType]
re_val = coll.find(lookup_dict, ['pattern', 'replace'])
for row in re_val:
try:
match = re.match(row['pattern'], _DataClean_(field_val_new),
flags=re.IGNORECASE)
if match:
if 'replace' in row:
field_val_new = re.sub(row['pattern'], row['replace'],
_DataClean_(field_val_new),
flags=re.IGNORECASE)
else:
field_val_new = re.sub(row['pattern'], '',
_DataClean_(field_val_new),
flags=re.IGNORECASE)
pattern = row['pattern']
break
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
if re_val:
re_val.close()
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new, pattern=pattern)
histObjUpd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, histObjUpd
def DeriveDataLookup(fieldName, db, deriveInput, overwrite=True, fieldVal='',
histObj={}, blankIfNoMatch=False):
"""
Return new field value based on single or multi-value lookup against MongoDB
:param string fieldName: Field name to query against
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict deriveInput: Values to perform lookup against:
{"lookupField1": "lookupVal1", "lookupField2": "lookupVal2"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
:param bool blankIfNoMatch: Should field value be set to blank
if no match is found
"""
lookup_vals = OrderedDict()
for val in sorted(deriveInput.keys()):
lookup_vals[val] = _DataClean_(deriveInput[val])
lookup_dict = {
'fieldName': fieldName,
'lookupVals': lookup_vals
}
coll = db['deriveValue']
l_val = coll.find_one(lookup_dict, ['value'])
field_val_new = fieldVal
derive_using = deriveInput
# If match found return True else False
check_match = True if l_val else False
if l_val and (overwrite or (fieldVal == '')):
try:
field_val_new = l_val['value']
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
elif blankIfNoMatch and not l_val:
field_val_new = ''
derive_using = {'blankIfNoMatch': 'no match found'}
change = _CollectHistory_(lookupType='deriveValue', fromVal=fieldVal,
toVal=field_val_new, using=derive_using)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
def DeriveDataRegex(fieldName, db, deriveInput, overwrite, fieldVal, histObj={},
blankIfNoMatch=False):
"""
Return a new field value based on match (of another field) against regex
queried from MongoDB
:param string fieldName: Field name to query against
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict deriveInput: Values to perform lookup against:
{"lookupField1": "lookupVal1"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
:param bool blankIfNoMatch: Should field value be set to blank
if no match is found
"""
if len(deriveInput) > 1:
raise Exception("more than one value in deriveInput")
field_val_new = fieldVal
check_match = False
# derive_using = deriveInput
row = list(deriveInput.keys())[0]
pattern = ''
if deriveInput[row] != '' and (overwrite or (fieldVal == '')):
lookup_dict = {
'deriveFieldName': row,
'fieldName': fieldName
}
coll = db['deriveRegex']
re_val = coll.find(lookup_dict, ['pattern', 'replace'])
for l_val in re_val:
try:
match = re.match(l_val['pattern'],
_DataClean_(deriveInput[row]),
flags=re.IGNORECASE)
if match:
field_val_new = re.sub(l_val['pattern'], l_val['replace'],
_DataClean_(deriveInput[row]),
flags=re.IGNORECASE)
pattern = l_val['pattern']
check_match = True
break
except KeyError as key_error_obj:
warnings.warn('schema error', key_error_obj)
if re_val:
re_val.close()
if field_val_new == fieldVal and blankIfNoMatch:
field_val_new = ''
pattern = 'no matching pattern'
# derive_using = {"blankIfNoMatch": "no match found"}
change = _CollectHistory_(lookupType='deriveRegex', fromVal=fieldVal,
toVal=field_val_new, using=deriveInput,
pattern=pattern)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
|
rh-marketingops/dwm | dwm/cleaning.py | DeriveDataRegex | python | def DeriveDataRegex(fieldName, db, deriveInput, overwrite, fieldVal, histObj={},
blankIfNoMatch=False):
if len(deriveInput) > 1:
raise Exception("more than one value in deriveInput")
field_val_new = fieldVal
check_match = False
# derive_using = deriveInput
row = list(deriveInput.keys())[0]
pattern = ''
if deriveInput[row] != '' and (overwrite or (fieldVal == '')):
lookup_dict = {
'deriveFieldName': row,
'fieldName': fieldName
}
coll = db['deriveRegex']
re_val = coll.find(lookup_dict, ['pattern', 'replace'])
for l_val in re_val:
try:
match = re.match(l_val['pattern'],
_DataClean_(deriveInput[row]),
flags=re.IGNORECASE)
if match:
field_val_new = re.sub(l_val['pattern'], l_val['replace'],
_DataClean_(deriveInput[row]),
flags=re.IGNORECASE)
pattern = l_val['pattern']
check_match = True
break
except KeyError as key_error_obj:
warnings.warn('schema error', key_error_obj)
if re_val:
re_val.close()
if field_val_new == fieldVal and blankIfNoMatch:
field_val_new = ''
pattern = 'no matching pattern'
# derive_using = {"blankIfNoMatch": "no match found"}
change = _CollectHistory_(lookupType='deriveRegex', fromVal=fieldVal,
toVal=field_val_new, using=deriveInput,
pattern=pattern)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match | Return a new field value based on match (of another field) against regex
queried from MongoDB
:param string fieldName: Field name to query against
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict deriveInput: Values to perform lookup against:
{"lookupField1": "lookupVal1"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
:param bool blankIfNoMatch: Should field value be set to blank
if no match is found | train | https://github.com/rh-marketingops/dwm/blob/66c7d18db857afbe5d574478ceaaad6159ae7469/dwm/cleaning.py#L313-L390 | [
"def _CollectHistory_(lookupType, fromVal, toVal, using={}, pattern=''):\n \"\"\"\n Return a dictionary detailing what, if any, change was made to a record field\n\n :param string lookupType: what cleaning rule made the change; one of: genericLookup, genericRegex, fieldSpecificLookup, fieldSpecificRegex, n... | """
These set of functions are responsible for determining what the new value of a
field should be, in most cases based on a lookup against MongoDB.
"""
import re
import warnings
from collections import OrderedDict
from .helpers import _CollectHistory_
from .helpers import _CollectHistoryAgg_
from .helpers import _DataClean_
def DataLookup(fieldVal, db, lookupType, fieldName, histObj={}):
"""
Return new field value based on single-value lookup against MongoDB
:param string fieldVal: input value to lookup
:param MongoClient db: MongoClient instance connected to MongoDB
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'genericLookup', 'fieldSpecificLookup', 'normLookup'
:param string fieldName: Field name to query against
:param dict histObj: History object to which changes should be appended
"""
if lookupType == 'genericLookup':
lookup_dict = {"find": _DataClean_(fieldVal)}
elif lookupType in ['fieldSpecificLookup', 'normLookup']:
lookup_dict = {"fieldName": fieldName, "find": _DataClean_(fieldVal)}
else:
raise ValueError("Invalid lookupType")
field_val_new = fieldVal
coll = db[lookupType]
l_val = coll.find_one(lookup_dict, ['replace'])
if l_val:
field_val_new = l_val['replace'] if 'replace' in l_val else ''
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj,
fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd
def IncludesLookup(fieldVal, lookupType, db, fieldName, deriveFieldName='',
deriveInput={}, histObj={}, overwrite=False,
blankIfNoMatch=False):
"""
Return new field value based on whether or not original value includes AND
excludes all words in a comma-delimited list queried from MongoDB
:param string fieldVal: input value to lookup
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'normIncludes', 'deriveIncludes'
:param MongoClient db: MongoClient instance connected to MongoDB
:param string fieldName: Field name to query against
:param string deriveFieldName: Field name from which to derive value
:param dict deriveInput: Values to perform lookup against:
{"deriveFieldName": "deriveVal1"}
:param dict histObj: History object to which changes should be appended
:param bool overwrite: Should an existing field value be replaced
:param bool blankIfNoMatch: Should field value be set to blank if
no match is found
"""
lookup_dict = {
'fieldName': fieldName
}
if lookupType == 'normIncludes':
field_val_clean = _DataClean_(fieldVal)
elif lookupType == 'deriveIncludes':
if deriveFieldName == '' or deriveInput == {}:
raise ValueError("for 'deriveIncludes' must specify both \
'deriveFieldName' and 'deriveInput'")
lookup_dict['deriveFieldName'] = deriveFieldName
field_val_clean = _DataClean_(deriveInput[list(deriveInput.keys())[0]])
else:
raise ValueError("Invalid lookupType")
field_val_new = fieldVal
check_match = False
using = {}
coll = db[lookupType]
inc_val = coll.find(lookup_dict, ['includes', 'excludes', 'begins', 'ends',
'replace'])
if inc_val and (lookupType == 'normIncludes' or
(lookupType == 'deriveIncludes' and
(overwrite or fieldVal == ''))):
for row in inc_val:
try:
if (row['includes'] != '' or
row['excludes'] != '' or
row['begins'] != '' or
row['ends'] != ''):
if all((a in field_val_clean)
for a in row['includes'].split(",")):
if all((b not in field_val_clean)
for b in row['excludes'].split(",")) \
or row['excludes'] == '':
if field_val_clean.startswith(row['begins']):
if field_val_clean.endswith(row['ends']):
field_val_new = row['replace']
if lookupType == 'deriveIncludes':
using[deriveFieldName] = deriveInput
using['includes'] = row['includes']
using['excludes'] = row['excludes']
using['begins'] = row['begins']
using['ends'] = row['ends']
check_match = True
break
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
if inc_val:
inc_val.close()
if (field_val_new == fieldVal and blankIfNoMatch and
lookupType == 'deriveIncludes'):
field_val_new = ''
using['blankIfNoMatch'] = 'no match found'
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new, using=using)
histObjUpd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, histObjUpd, check_match
def RegexLookup(fieldVal, db, fieldName, lookupType, histObj={}):
"""
Return a new field value based on match against regex queried from MongoDB
:param string fieldVal: input value to lookup
:param MongoClient db: MongoClient instance connected to MongoDB
:param string lookupType: Type of lookup to perform/MongoDB collection name.
One of 'genericRegex', 'fieldSpecificRegex', 'normRegex'
:param string fieldName: Field name to query against
:param dict histObj: History object to which changes should be appended
"""
if lookupType == 'genericRegex':
lookup_dict = {}
elif lookupType in ['fieldSpecificRegex', 'normRegex']:
lookup_dict = {"fieldName": fieldName}
else:
raise ValueError("Invalid type")
field_val_new = fieldVal
pattern = ''
coll = db[lookupType]
re_val = coll.find(lookup_dict, ['pattern', 'replace'])
for row in re_val:
try:
match = re.match(row['pattern'], _DataClean_(field_val_new),
flags=re.IGNORECASE)
if match:
if 'replace' in row:
field_val_new = re.sub(row['pattern'], row['replace'],
_DataClean_(field_val_new),
flags=re.IGNORECASE)
else:
field_val_new = re.sub(row['pattern'], '',
_DataClean_(field_val_new),
flags=re.IGNORECASE)
pattern = row['pattern']
break
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
if re_val:
re_val.close()
change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal,
toVal=field_val_new, pattern=pattern)
histObjUpd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, histObjUpd
def DeriveDataLookup(fieldName, db, deriveInput, overwrite=True, fieldVal='',
histObj={}, blankIfNoMatch=False):
"""
Return new field value based on single or multi-value lookup against MongoDB
:param string fieldName: Field name to query against
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict deriveInput: Values to perform lookup against:
{"lookupField1": "lookupVal1", "lookupField2": "lookupVal2"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
:param bool blankIfNoMatch: Should field value be set to blank
if no match is found
"""
lookup_vals = OrderedDict()
for val in sorted(deriveInput.keys()):
lookup_vals[val] = _DataClean_(deriveInput[val])
lookup_dict = {
'fieldName': fieldName,
'lookupVals': lookup_vals
}
coll = db['deriveValue']
l_val = coll.find_one(lookup_dict, ['value'])
field_val_new = fieldVal
derive_using = deriveInput
# If match found return True else False
check_match = True if l_val else False
if l_val and (overwrite or (fieldVal == '')):
try:
field_val_new = l_val['value']
except KeyError as Key_error_obj:
warnings.warn('schema error', Key_error_obj)
elif blankIfNoMatch and not l_val:
field_val_new = ''
derive_using = {'blankIfNoMatch': 'no match found'}
change = _CollectHistory_(lookupType='deriveValue', fromVal=fieldVal,
toVal=field_val_new, using=derive_using)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
def DeriveDataCopyValue(fieldName, deriveInput, overwrite, fieldVal, histObj={}):
"""
Return new value based on value from another field
:param string fieldName: Field name to query against
:param dict deriveInput: Values to perform lookup against:
{"copyField1": "copyVal1"}
:param bool overwrite: Should an existing field value be replaced
:param string fieldVal: Current field value
:param dict histObj: History object to which changes should be appended
"""
if len(deriveInput) > 1:
raise Exception("more than one field/value in deriveInput")
field_val_new = fieldVal
row = list(deriveInput.keys())[0]
if deriveInput[row] != '' and (overwrite or (fieldVal == '')):
field_val_new = deriveInput[row]
check_match = True
else:
check_match = False
change = _CollectHistory_(lookupType='copyValue', fromVal=fieldVal,
toVal=field_val_new, using=deriveInput)
hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change,
fieldName=fieldName)
return field_val_new, hist_obj_upd, check_match
|
rh-marketingops/dwm | dwm/dwm.py | dwmAll | python | def dwmAll(data, db, configName='', config={}, udfNamespace=__name__, verbose=False):
if config=={} and configName=='':
raise Exception("Please either specify configName or pass a config")
if config!={} and configName!='':
raise Exception("Please either specify configName or pass a config")
if config=={}:
configColl = db['config']
config = configColl.find_one({"configName": configName})
if not config:
raise Exception("configName '" + configName + "' not found in collection 'config'")
writeContactHistory = config["history"]["writeContactHistory"]
returnHistoryId = config["history"]["returnHistoryId"]
returnHistoryField = config["history"]["returnHistoryField"]
histIdField = config["history"]["histIdField"]
for field in config["fields"]:
config["fields"][field]["derive"] = OrderedDict(sorted(config["fields"][field]["derive"].items()))
for position in config["userDefinedFunctions"]:
config["userDefinedFunctions"][position] = OrderedDict(sorted(config["userDefinedFunctions"][position].items()))
if verbose:
for row in tqdm(data):
row, historyId = dwmOne(data=row, db=db, config=config, writeContactHistory=writeContactHistory, returnHistoryId=returnHistoryId, histIdField=histIdField, udfNamespace=udfNamespace)
if returnHistoryId and writeContactHistory:
row[returnHistoryField] = historyId
else:
for row in data:
row, historyId = dwmOne(data=row, db=db, config=config, writeContactHistory=writeContactHistory, returnHistoryId=returnHistoryId, histIdField=histIdField, udfNamespace=udfNamespace)
if returnHistoryId and writeContactHistory:
row[returnHistoryField] = historyId
return data | Return list of dictionaries after cleaning rules have been applied; optionally with a history record ID appended.
:param list data: list of dictionaries (records) to which cleaning rules should be applied
:param MongoClient db: MongoDB connection
:param string configName: name of configuration to use; will be queried from 'config' collection of MongoDB
:param OrderedDict config: pre-queried config dict
:param namespace udfNamespace: namespace of current working script; must be passed if using user-defined functions
:param bool verbose: use tqdm to display progress of cleaning records | train | https://github.com/rh-marketingops/dwm/blob/66c7d18db857afbe5d574478ceaaad6159ae7469/dwm/dwm.py#L11-L61 | [
"def dwmOne(data, db, config, writeContactHistory=True, returnHistoryId=True, histIdField={\"name\": \"emailAddress\", \"value\": \"emailAddress\"}, udfNamespace=__name__):\n \"\"\"\n Return a single dictionary (record) after cleaning rules have been applied; optionally insert history record to collection 'co... | from pymongo import MongoClient
from datetime import datetime
from tqdm import tqdm
import time
from collections import OrderedDict
from .wrappers import lookupAll, DeriveDataLookupAll
from .helpers import _RunUserDefinedFunctions_, _CollectHistory_, _CollectHistoryAgg_
## DWM on a set of contact records
## DWM order on a single record
def dwmOne(data, db, config, writeContactHistory=True, returnHistoryId=True, histIdField={"name": "emailAddress", "value": "emailAddress"}, udfNamespace=__name__):
"""
Return a single dictionary (record) after cleaning rules have been applied; optionally insert history record to collection 'contactHistory'
:param dict data: single record (dictionary) to which cleaning rules should be applied
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict config: DWM configuration (see DataDictionary)
:param bool writeContactHistory: Write field-level change history to collection 'contactHistory'
:param bool returnHistoryId: If writeContactHistory, return '_id' of history record
:param dict histIdField: Name of identifier for history record: {"name": "emailAddress", "value": "emailAddress"}
:param namespace udfNamespace: namespace of current working script; must be passed if using user-defined functions
"""
# setup history collector
history = {}
# get user-defined function config
udFun = config['userDefinedFunctions']
## Get runtime field configuration
fieldConfig = config['fields']
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeGenericValidation", namespace=udfNamespace)
# Run generic validation lookup
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='genericLookup', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeGenericRegex", namespace=udfNamespace)
# Run generic validation regex
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='genericRegex', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeFieldSpecificValidation", namespace=udfNamespace)
# Run field-specific validation lookup
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='fieldSpecificLookup', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeFieldSpecificRegex", namespace=udfNamespace)
# Run field-specific validation regex
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='fieldSpecificRegex', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeNormalization", namespace=udfNamespace)
# Run normalization lookup
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='normLookup', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeNormalizationRegex", namespace=udfNamespace)
# Run normalization regex
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='normRegex', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeNormalizationIncludes", namespace=udfNamespace)
# Run normalization includes
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='normIncludes', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeDeriveData", namespace=udfNamespace)
# Fill gaps / refresh derived data
data, history = DeriveDataLookupAll(data=data, configFields=fieldConfig, db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="afterProcessing", namespace=udfNamespace)
# check if need to write contact change history
if writeContactHistory:
history['timestamp'] = int(time.time())
history[histIdField['name']] = data[histIdField['value']]
history['configName'] = config['configName']
# Set _current value for most recent contact
history['_current'] = 0
# Increment all _current
db['contactHistory'].update({histIdField['name']: data[histIdField['value']]}, {'$inc': {'_current': 1}}, multi=True)
# Insert into DB
historyId = db['contactHistory'].insert_one(history).inserted_id
if writeContactHistory and returnHistoryId:
return data, historyId
else:
return data, None
##
|
rh-marketingops/dwm | dwm/dwm.py | dwmOne | python | def dwmOne(data, db, config, writeContactHistory=True, returnHistoryId=True, histIdField={"name": "emailAddress", "value": "emailAddress"}, udfNamespace=__name__):
# setup history collector
history = {}
# get user-defined function config
udFun = config['userDefinedFunctions']
## Get runtime field configuration
fieldConfig = config['fields']
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeGenericValidation", namespace=udfNamespace)
# Run generic validation lookup
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='genericLookup', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeGenericRegex", namespace=udfNamespace)
# Run generic validation regex
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='genericRegex', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeFieldSpecificValidation", namespace=udfNamespace)
# Run field-specific validation lookup
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='fieldSpecificLookup', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeFieldSpecificRegex", namespace=udfNamespace)
# Run field-specific validation regex
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='fieldSpecificRegex', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeNormalization", namespace=udfNamespace)
# Run normalization lookup
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='normLookup', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeNormalizationRegex", namespace=udfNamespace)
# Run normalization regex
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='normRegex', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeNormalizationIncludes", namespace=udfNamespace)
# Run normalization includes
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='normIncludes', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeDeriveData", namespace=udfNamespace)
# Fill gaps / refresh derived data
data, history = DeriveDataLookupAll(data=data, configFields=fieldConfig, db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="afterProcessing", namespace=udfNamespace)
# check if need to write contact change history
if writeContactHistory:
history['timestamp'] = int(time.time())
history[histIdField['name']] = data[histIdField['value']]
history['configName'] = config['configName']
# Set _current value for most recent contact
history['_current'] = 0
# Increment all _current
db['contactHistory'].update({histIdField['name']: data[histIdField['value']]}, {'$inc': {'_current': 1}}, multi=True)
# Insert into DB
historyId = db['contactHistory'].insert_one(history).inserted_id
if writeContactHistory and returnHistoryId:
return data, historyId
else:
return data, None | Return a single dictionary (record) after cleaning rules have been applied; optionally insert history record to collection 'contactHistory'
:param dict data: single record (dictionary) to which cleaning rules should be applied
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict config: DWM configuration (see DataDictionary)
:param bool writeContactHistory: Write field-level change history to collection 'contactHistory'
:param bool returnHistoryId: If writeContactHistory, return '_id' of history record
:param dict histIdField: Name of identifier for history record: {"name": "emailAddress", "value": "emailAddress"}
:param namespace udfNamespace: namespace of current working script; must be passed if using user-defined functions | train | https://github.com/rh-marketingops/dwm/blob/66c7d18db857afbe5d574478ceaaad6159ae7469/dwm/dwm.py#L65-L156 | [
"def lookupAll(data, configFields, lookupType, db, histObj={}):\n \"\"\"\n Return a record after having cleaning rules of specified type applied to all fields in the config\n\n :param dict data: single record (dictionary) to which cleaning rules should be applied\n :param dict configFields: \"fields\" o... | from pymongo import MongoClient
from datetime import datetime
from tqdm import tqdm
import time
from collections import OrderedDict
from .wrappers import lookupAll, DeriveDataLookupAll
from .helpers import _RunUserDefinedFunctions_, _CollectHistory_, _CollectHistoryAgg_
## DWM on a set of contact records
def dwmAll(data, db, configName='', config={}, udfNamespace=__name__, verbose=False):
"""
Return list of dictionaries after cleaning rules have been applied; optionally with a history record ID appended.
:param list data: list of dictionaries (records) to which cleaning rules should be applied
:param MongoClient db: MongoDB connection
:param string configName: name of configuration to use; will be queried from 'config' collection of MongoDB
:param OrderedDict config: pre-queried config dict
:param namespace udfNamespace: namespace of current working script; must be passed if using user-defined functions
:param bool verbose: use tqdm to display progress of cleaning records
"""
if config=={} and configName=='':
raise Exception("Please either specify configName or pass a config")
if config!={} and configName!='':
raise Exception("Please either specify configName or pass a config")
if config=={}:
configColl = db['config']
config = configColl.find_one({"configName": configName})
if not config:
raise Exception("configName '" + configName + "' not found in collection 'config'")
writeContactHistory = config["history"]["writeContactHistory"]
returnHistoryId = config["history"]["returnHistoryId"]
returnHistoryField = config["history"]["returnHistoryField"]
histIdField = config["history"]["histIdField"]
for field in config["fields"]:
config["fields"][field]["derive"] = OrderedDict(sorted(config["fields"][field]["derive"].items()))
for position in config["userDefinedFunctions"]:
config["userDefinedFunctions"][position] = OrderedDict(sorted(config["userDefinedFunctions"][position].items()))
if verbose:
for row in tqdm(data):
row, historyId = dwmOne(data=row, db=db, config=config, writeContactHistory=writeContactHistory, returnHistoryId=returnHistoryId, histIdField=histIdField, udfNamespace=udfNamespace)
if returnHistoryId and writeContactHistory:
row[returnHistoryField] = historyId
else:
for row in data:
row, historyId = dwmOne(data=row, db=db, config=config, writeContactHistory=writeContactHistory, returnHistoryId=returnHistoryId, histIdField=histIdField, udfNamespace=udfNamespace)
if returnHistoryId and writeContactHistory:
row[returnHistoryField] = historyId
return data
## DWM order on a single record
def dwmOne(data, db, config, writeContactHistory=True, returnHistoryId=True, histIdField={"name": "emailAddress", "value": "emailAddress"}, udfNamespace=__name__):
"""
Return a single dictionary (record) after cleaning rules have been applied; optionally insert history record to collection 'contactHistory'
:param dict data: single record (dictionary) to which cleaning rules should be applied
:param MongoClient db: MongoClient instance connected to MongoDB
:param dict config: DWM configuration (see DataDictionary)
:param bool writeContactHistory: Write field-level change history to collection 'contactHistory'
:param bool returnHistoryId: If writeContactHistory, return '_id' of history record
:param dict histIdField: Name of identifier for history record: {"name": "emailAddress", "value": "emailAddress"}
:param namespace udfNamespace: namespace of current working script; must be passed if using user-defined functions
"""
# setup history collector
history = {}
# get user-defined function config
udFun = config['userDefinedFunctions']
## Get runtime field configuration
fieldConfig = config['fields']
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeGenericValidation", namespace=udfNamespace)
# Run generic validation lookup
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='genericLookup', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeGenericRegex", namespace=udfNamespace)
# Run generic validation regex
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='genericRegex', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeFieldSpecificValidation", namespace=udfNamespace)
# Run field-specific validation lookup
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='fieldSpecificLookup', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeFieldSpecificRegex", namespace=udfNamespace)
# Run field-specific validation regex
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='fieldSpecificRegex', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeNormalization", namespace=udfNamespace)
# Run normalization lookup
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='normLookup', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeNormalizationRegex", namespace=udfNamespace)
# Run normalization regex
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='normRegex', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeNormalizationIncludes", namespace=udfNamespace)
# Run normalization includes
data, history = lookupAll(data=data, configFields=fieldConfig, lookupType='normIncludes', db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="beforeDeriveData", namespace=udfNamespace)
# Fill gaps / refresh derived data
data, history = DeriveDataLookupAll(data=data, configFields=fieldConfig, db=db, histObj=history)
## Run user-defined functions
data, history = _RunUserDefinedFunctions_(config=config, data=data, histObj=history, position="afterProcessing", namespace=udfNamespace)
# check if need to write contact change history
if writeContactHistory:
history['timestamp'] = int(time.time())
history[histIdField['name']] = data[histIdField['value']]
history['configName'] = config['configName']
# Set _current value for most recent contact
history['_current'] = 0
# Increment all _current
db['contactHistory'].update({histIdField['name']: data[histIdField['value']]}, {'$inc': {'_current': 1}}, multi=True)
# Insert into DB
historyId = db['contactHistory'].insert_one(history).inserted_id
if writeContactHistory and returnHistoryId:
return data, historyId
else:
return data, None
##
|
Parsl/libsubmit | libsubmit/providers/torque/torque.py | TorqueProvider._status | python | def _status(self):
''' Internal: Do not call. Returns the status list for a list of job_ids
Args:
self
Returns:
[status...] : Status list of all jobs
'''
job_id_list = ' '.join(self.resources.keys())
jobs_missing = list(self.resources.keys())
retcode, stdout, stderr = self.channel.execute_wait("qstat {0}".format(job_id_list), 3)
for line in stdout.split('\n'):
parts = line.split()
if not parts or parts[0].upper().startswith('JOB') or parts[0].startswith('---'):
continue
job_id = parts[0]
status = translate_table.get(parts[4], 'UNKNOWN')
self.resources[job_id]['status'] = status
jobs_missing.remove(job_id)
# squeue does not report on jobs that are not running. So we are filling in the
# blanks for missing jobs, we might lose some information about why the jobs failed.
for missing_job in jobs_missing:
if self.resources[missing_job]['status'] in ['PENDING', 'RUNNING']:
self.resources[missing_job]['status'] = translate_table['E'] | Internal: Do not call. Returns the status list for a list of job_ids
Args:
self
Returns:
[status...] : Status list of all jobs | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/torque/torque.py#L110-L138 | null | class TorqueProvider(ClusterProvider, RepresentationMixin):
"""Torque Execution Provider
This provider uses sbatch to submit, squeue for status, and scancel to cancel
jobs. The sbatch script to be used is created from a template file in this
same module.
Parameters
----------
channel : Channel
Channel for accessing this provider. Possible channels include
:class:`~libsubmit.channels.LocalChannel` (the default),
:class:`~libsubmit.channels.SSHChannel`, or
:class:`~libsubmit.channels.SSHInteractiveLoginChannel`.
account : str
Account the job will be charged against.
queue : str
Torque queue to request blocks from.
label : str
Label for this provider.
script_dir : str
Relative or absolute path to a directory where intermediate scripts are placed.
nodes_per_block : int
Nodes to provision per block.
tasks_per_node : int
Tasks to run per node.
init_blocks : int
Number of blocks to provision at the start of the run. Default is 1.
min_blocks : int
Minimum number of blocks to maintain. Default is 0.
max_blocks : int
Maximum number of blocks to maintain.
parallelism : float
Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
scaling where as many resources as possible are used; parallelism close to 0 represents
the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
walltime : str
Walltime requested per block in HH:MM:SS.
overrides : str
String to prepend to the Torque submit script.
launcher : Launcher
Launcher for this provider. Possible launchers include
:class:`~libsubmit.launchers.AprunLauncher` (the default), or
:class:`~libsubmit.launchers.SingleNodeLauncher`,
"""
def __init__(self,
channel=LocalChannel(),
account=None,
queue=None,
overrides='',
label='torque',
script_dir='parsl_scripts',
nodes_per_block=1,
tasks_per_node=1,
init_blocks=1,
min_blocks=0,
max_blocks=100,
parallelism=1,
launcher=AprunLauncher(),
walltime="00:20:00"):
super().__init__(label,
channel,
script_dir,
nodes_per_block,
tasks_per_node,
init_blocks,
min_blocks,
max_blocks,
parallelism,
walltime,
launcher)
self.account = account
self.queue = queue
self.overrides = overrides
self.provisioned_blocks = 0
self.script_dir = script_dir
if not os.path.exists(self.script_dir):
os.makedirs(self.script_dir)
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {}
def submit(self, command, blocksize, job_name="parsl.auto"):
''' Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1 : ! This is illegal. tasks_per_node should be integer
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float)
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
'''
if self.provisioned_blocks >= self.max_blocks:
logger.warn("[%s] at capacity, cannot add more blocks now", self.label)
return None
# Note: Fix this later to avoid confusing behavior.
# We should always allocate blocks in integer counts of node_granularity
if blocksize < self.nodes_per_block:
blocksize = self.nodes_per_block
# Set job name
job_name = "parsl.{0}.{1}".format(job_name, time.time())
# Set script path
script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
logger.debug("Requesting blocksize:%s nodes_per_block:%s tasks_per_node:%s", blocksize, self.nodes_per_block,
self.tasks_per_node)
job_config = {}
# TODO : script_path might need to change to accommodate script dir set via channels
job_config["submit_script_dir"] = self.channel.script_dir
job_config["nodes"] = self.nodes_per_block
job_config["task_blocks"] = self.nodes_per_block * self.tasks_per_node
job_config["nodes_per_block"] = self.nodes_per_block
job_config["tasks_per_node"] = self.tasks_per_node
job_config["walltime"] = self.walltime
job_config["overrides"] = self.overrides
job_config["user_script"] = command
# Wrap the command
job_config["user_script"] = self.launcher(command,
self.tasks_per_node,
self.nodes_per_block)
logger.debug("Writing submit script")
self._write_submit_script(template_string, script_path, job_name, job_config)
channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
submit_options = ''
if self.queue is not None:
submit_options = '{0} -q {1}'.format(submit_options, self.queue)
if self.account is not None:
submit_options = '{0} -A {1}'.format(submit_options, self.account)
launch_cmd = "qsub {0} {1}".format(submit_options, channel_script_path)
retcode, stdout, stderr = self.channel.execute_wait(launch_cmd, 10)
job_id = None
if retcode == 0:
for line in stdout.split('\n'):
if line.strip():
job_id = line.strip()
self.resources[job_id] = {'job_id': job_id, 'status': 'PENDING', 'blocksize': blocksize}
else:
message = "Command '{}' failed with return code {}".format(launch_cmd, retcode)
if (stdout is not None) and (stderr is not None):
message += "\nstderr:{}\nstdout{}".format(stderr.strip(), stdout.strip())
logger.error(message)
return job_id
def cancel(self, job_ids):
''' Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
'''
job_id_list = ' '.join(job_ids)
retcode, stdout, stderr = self.channel.execute_wait("qdel {0}".format(job_id_list), 3)
rets = None
if retcode == 0:
for jid in job_ids:
self.resources[jid]['status'] = translate_table['E'] # Setting state to exiting
rets = [True for i in job_ids]
else:
rets = [False for i in job_ids]
return rets
|
Parsl/libsubmit | libsubmit/providers/torque/torque.py | TorqueProvider.submit | python | def submit(self, command, blocksize, job_name="parsl.auto"):
''' Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1 : ! This is illegal. tasks_per_node should be integer
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float)
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
'''
if self.provisioned_blocks >= self.max_blocks:
logger.warn("[%s] at capacity, cannot add more blocks now", self.label)
return None
# Note: Fix this later to avoid confusing behavior.
# We should always allocate blocks in integer counts of node_granularity
if blocksize < self.nodes_per_block:
blocksize = self.nodes_per_block
# Set job name
job_name = "parsl.{0}.{1}".format(job_name, time.time())
# Set script path
script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
logger.debug("Requesting blocksize:%s nodes_per_block:%s tasks_per_node:%s", blocksize, self.nodes_per_block,
self.tasks_per_node)
job_config = {}
# TODO : script_path might need to change to accommodate script dir set via channels
job_config["submit_script_dir"] = self.channel.script_dir
job_config["nodes"] = self.nodes_per_block
job_config["task_blocks"] = self.nodes_per_block * self.tasks_per_node
job_config["nodes_per_block"] = self.nodes_per_block
job_config["tasks_per_node"] = self.tasks_per_node
job_config["walltime"] = self.walltime
job_config["overrides"] = self.overrides
job_config["user_script"] = command
# Wrap the command
job_config["user_script"] = self.launcher(command,
self.tasks_per_node,
self.nodes_per_block)
logger.debug("Writing submit script")
self._write_submit_script(template_string, script_path, job_name, job_config)
channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
submit_options = ''
if self.queue is not None:
submit_options = '{0} -q {1}'.format(submit_options, self.queue)
if self.account is not None:
submit_options = '{0} -A {1}'.format(submit_options, self.account)
launch_cmd = "qsub {0} {1}".format(submit_options, channel_script_path)
retcode, stdout, stderr = self.channel.execute_wait(launch_cmd, 10)
job_id = None
if retcode == 0:
for line in stdout.split('\n'):
if line.strip():
job_id = line.strip()
self.resources[job_id] = {'job_id': job_id, 'status': 'PENDING', 'blocksize': blocksize}
else:
message = "Command '{}' failed with return code {}".format(launch_cmd, retcode)
if (stdout is not None) and (stderr is not None):
message += "\nstderr:{}\nstdout{}".format(stderr.strip(), stdout.strip())
logger.error(message)
return job_id | Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1 : ! This is illegal. tasks_per_node should be integer
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float)
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/torque/torque.py#L140-L226 | [
"def _write_submit_script(self, template, script_filename, job_name, configs):\n \"\"\"Generate submit script and write it to a file.\n\n Args:\n - template (string) : The template string to be used for the writing submit script\n - script_filename (string) : Name of the submit script\n ... | class TorqueProvider(ClusterProvider, RepresentationMixin):
"""Torque Execution Provider
This provider uses sbatch to submit, squeue for status, and scancel to cancel
jobs. The sbatch script to be used is created from a template file in this
same module.
Parameters
----------
channel : Channel
Channel for accessing this provider. Possible channels include
:class:`~libsubmit.channels.LocalChannel` (the default),
:class:`~libsubmit.channels.SSHChannel`, or
:class:`~libsubmit.channels.SSHInteractiveLoginChannel`.
account : str
Account the job will be charged against.
queue : str
Torque queue to request blocks from.
label : str
Label for this provider.
script_dir : str
Relative or absolute path to a directory where intermediate scripts are placed.
nodes_per_block : int
Nodes to provision per block.
tasks_per_node : int
Tasks to run per node.
init_blocks : int
Number of blocks to provision at the start of the run. Default is 1.
min_blocks : int
Minimum number of blocks to maintain. Default is 0.
max_blocks : int
Maximum number of blocks to maintain.
parallelism : float
Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
scaling where as many resources as possible are used; parallelism close to 0 represents
the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
walltime : str
Walltime requested per block in HH:MM:SS.
overrides : str
String to prepend to the Torque submit script.
launcher : Launcher
Launcher for this provider. Possible launchers include
:class:`~libsubmit.launchers.AprunLauncher` (the default), or
:class:`~libsubmit.launchers.SingleNodeLauncher`,
"""
def __init__(self,
channel=LocalChannel(),
account=None,
queue=None,
overrides='',
label='torque',
script_dir='parsl_scripts',
nodes_per_block=1,
tasks_per_node=1,
init_blocks=1,
min_blocks=0,
max_blocks=100,
parallelism=1,
launcher=AprunLauncher(),
walltime="00:20:00"):
super().__init__(label,
channel,
script_dir,
nodes_per_block,
tasks_per_node,
init_blocks,
min_blocks,
max_blocks,
parallelism,
walltime,
launcher)
self.account = account
self.queue = queue
self.overrides = overrides
self.provisioned_blocks = 0
self.script_dir = script_dir
if not os.path.exists(self.script_dir):
os.makedirs(self.script_dir)
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {}
def _status(self):
''' Internal: Do not call. Returns the status list for a list of job_ids
Args:
self
Returns:
[status...] : Status list of all jobs
'''
job_id_list = ' '.join(self.resources.keys())
jobs_missing = list(self.resources.keys())
retcode, stdout, stderr = self.channel.execute_wait("qstat {0}".format(job_id_list), 3)
for line in stdout.split('\n'):
parts = line.split()
if not parts or parts[0].upper().startswith('JOB') or parts[0].startswith('---'):
continue
job_id = parts[0]
status = translate_table.get(parts[4], 'UNKNOWN')
self.resources[job_id]['status'] = status
jobs_missing.remove(job_id)
# squeue does not report on jobs that are not running. So we are filling in the
# blanks for missing jobs, we might lose some information about why the jobs failed.
for missing_job in jobs_missing:
if self.resources[missing_job]['status'] in ['PENDING', 'RUNNING']:
self.resources[missing_job]['status'] = translate_table['E']
def cancel(self, job_ids):
''' Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
'''
job_id_list = ' '.join(job_ids)
retcode, stdout, stderr = self.channel.execute_wait("qdel {0}".format(job_id_list), 3)
rets = None
if retcode == 0:
for jid in job_ids:
self.resources[jid]['status'] = translate_table['E'] # Setting state to exiting
rets = [True for i in job_ids]
else:
rets = [False for i in job_ids]
return rets
|
Parsl/libsubmit | libsubmit/providers/torque/torque.py | TorqueProvider.cancel | python | def cancel(self, job_ids):
''' Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
'''
job_id_list = ' '.join(job_ids)
retcode, stdout, stderr = self.channel.execute_wait("qdel {0}".format(job_id_list), 3)
rets = None
if retcode == 0:
for jid in job_ids:
self.resources[jid]['status'] = translate_table['E'] # Setting state to exiting
rets = [True for i in job_ids]
else:
rets = [False for i in job_ids]
return rets | Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False. | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/torque/torque.py#L228-L248 | null | class TorqueProvider(ClusterProvider, RepresentationMixin):
"""Torque Execution Provider
This provider uses sbatch to submit, squeue for status, and scancel to cancel
jobs. The sbatch script to be used is created from a template file in this
same module.
Parameters
----------
channel : Channel
Channel for accessing this provider. Possible channels include
:class:`~libsubmit.channels.LocalChannel` (the default),
:class:`~libsubmit.channels.SSHChannel`, or
:class:`~libsubmit.channels.SSHInteractiveLoginChannel`.
account : str
Account the job will be charged against.
queue : str
Torque queue to request blocks from.
label : str
Label for this provider.
script_dir : str
Relative or absolute path to a directory where intermediate scripts are placed.
nodes_per_block : int
Nodes to provision per block.
tasks_per_node : int
Tasks to run per node.
init_blocks : int
Number of blocks to provision at the start of the run. Default is 1.
min_blocks : int
Minimum number of blocks to maintain. Default is 0.
max_blocks : int
Maximum number of blocks to maintain.
parallelism : float
Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
scaling where as many resources as possible are used; parallelism close to 0 represents
the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
walltime : str
Walltime requested per block in HH:MM:SS.
overrides : str
String to prepend to the Torque submit script.
launcher : Launcher
Launcher for this provider. Possible launchers include
:class:`~libsubmit.launchers.AprunLauncher` (the default), or
:class:`~libsubmit.launchers.SingleNodeLauncher`,
"""
def __init__(self,
channel=LocalChannel(),
account=None,
queue=None,
overrides='',
label='torque',
script_dir='parsl_scripts',
nodes_per_block=1,
tasks_per_node=1,
init_blocks=1,
min_blocks=0,
max_blocks=100,
parallelism=1,
launcher=AprunLauncher(),
walltime="00:20:00"):
super().__init__(label,
channel,
script_dir,
nodes_per_block,
tasks_per_node,
init_blocks,
min_blocks,
max_blocks,
parallelism,
walltime,
launcher)
self.account = account
self.queue = queue
self.overrides = overrides
self.provisioned_blocks = 0
self.script_dir = script_dir
if not os.path.exists(self.script_dir):
os.makedirs(self.script_dir)
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {}
def _status(self):
''' Internal: Do not call. Returns the status list for a list of job_ids
Args:
self
Returns:
[status...] : Status list of all jobs
'''
job_id_list = ' '.join(self.resources.keys())
jobs_missing = list(self.resources.keys())
retcode, stdout, stderr = self.channel.execute_wait("qstat {0}".format(job_id_list), 3)
for line in stdout.split('\n'):
parts = line.split()
if not parts or parts[0].upper().startswith('JOB') or parts[0].startswith('---'):
continue
job_id = parts[0]
status = translate_table.get(parts[4], 'UNKNOWN')
self.resources[job_id]['status'] = status
jobs_missing.remove(job_id)
# squeue does not report on jobs that are not running. So we are filling in the
# blanks for missing jobs, we might lose some information about why the jobs failed.
for missing_job in jobs_missing:
if self.resources[missing_job]['status'] in ['PENDING', 'RUNNING']:
self.resources[missing_job]['status'] = translate_table['E']
def submit(self, command, blocksize, job_name="parsl.auto"):
''' Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1 : ! This is illegal. tasks_per_node should be integer
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float)
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
'''
if self.provisioned_blocks >= self.max_blocks:
logger.warn("[%s] at capacity, cannot add more blocks now", self.label)
return None
# Note: Fix this later to avoid confusing behavior.
# We should always allocate blocks in integer counts of node_granularity
if blocksize < self.nodes_per_block:
blocksize = self.nodes_per_block
# Set job name
job_name = "parsl.{0}.{1}".format(job_name, time.time())
# Set script path
script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
logger.debug("Requesting blocksize:%s nodes_per_block:%s tasks_per_node:%s", blocksize, self.nodes_per_block,
self.tasks_per_node)
job_config = {}
# TODO : script_path might need to change to accommodate script dir set via channels
job_config["submit_script_dir"] = self.channel.script_dir
job_config["nodes"] = self.nodes_per_block
job_config["task_blocks"] = self.nodes_per_block * self.tasks_per_node
job_config["nodes_per_block"] = self.nodes_per_block
job_config["tasks_per_node"] = self.tasks_per_node
job_config["walltime"] = self.walltime
job_config["overrides"] = self.overrides
job_config["user_script"] = command
# Wrap the command
job_config["user_script"] = self.launcher(command,
self.tasks_per_node,
self.nodes_per_block)
logger.debug("Writing submit script")
self._write_submit_script(template_string, script_path, job_name, job_config)
channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
submit_options = ''
if self.queue is not None:
submit_options = '{0} -q {1}'.format(submit_options, self.queue)
if self.account is not None:
submit_options = '{0} -A {1}'.format(submit_options, self.account)
launch_cmd = "qsub {0} {1}".format(submit_options, channel_script_path)
retcode, stdout, stderr = self.channel.execute_wait(launch_cmd, 10)
job_id = None
if retcode == 0:
for line in stdout.split('\n'):
if line.strip():
job_id = line.strip()
self.resources[job_id] = {'job_id': job_id, 'status': 'PENDING', 'blocksize': blocksize}
else:
message = "Command '{}' failed with return code {}".format(launch_cmd, retcode)
if (stdout is not None) and (stderr is not None):
message += "\nstderr:{}\nstdout{}".format(stderr.strip(), stdout.strip())
logger.error(message)
return job_id
|
Parsl/libsubmit | libsubmit/providers/grid_engine/grid_engine.py | GridEngineProvider.get_configs | python | def get_configs(self, command):
logger.debug("Requesting one block with {} nodes per block and {} tasks per node".format(
self.nodes_per_block, self.tasks_per_node))
job_config = {}
job_config["submit_script_dir"] = self.channel.script_dir
job_config["nodes"] = self.nodes_per_block
job_config["walltime"] = wtime_to_minutes(self.walltime)
job_config["overrides"] = self.overrides
job_config["user_script"] = command
job_config["user_script"] = self.launcher(command,
self.tasks_per_node,
self.nodes_per_block)
return job_config | Compose a dictionary with information for writing the submit script. | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/grid_engine/grid_engine.py#L98-L114 | [
"def wtime_to_minutes(time_string):\n ''' wtime_to_minutes\n\n Convert standard wallclock time string to minutes.\n\n Args:\n - Time_string in HH:MM:SS format\n\n Returns:\n (int) minutes\n\n '''\n hours, mins, seconds = time_string.split(':')\n return int(hours) * 60 + int(mins) ... | class GridEngineProvider(ClusterProvider, RepresentationMixin):
"""A provider for the Grid Engine scheduler.
Parameters
----------
channel : Channel
Channel for accessing this provider. Possible channels include
:class:`~libsubmit.channels.LocalChannel` (the default),
:class:`~libsubmit.channels.SSHChannel`, or
:class:`~libsubmit.channels.SSHInteractiveLoginChannel`.
label : str
Label for this provider.
script_dir : str
Relative or absolute path to a directory where intermediate scripts are placed.
nodes_per_block : int
Nodes to provision per block.
tasks_per_node : int
Tasks to run per node.
min_blocks : int
Minimum number of blocks to maintain.
max_blocks : int
Maximum number of blocks to maintain.
parallelism : float
Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
scaling where as many resources as possible are used; parallelism close to 0 represents
the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
walltime : str
Walltime requested per block in HH:MM:SS.
overrides : str
String to prepend to the #SBATCH blocks in the submit script to the scheduler.
launcher : Launcher
Launcher for this provider. Possible launchers include
:class:`~libsubmit.launchers.SingleNodeLauncher` (the default),
"""
def __init__(self,
channel=LocalChannel(),
label='grid_engine',
script_dir='parsl_scripts',
nodes_per_block=1,
tasks_per_node=1,
init_blocks=1,
min_blocks=0,
max_blocks=10,
parallelism=1,
walltime="00:10:00",
overrides='',
launcher=SingleNodeLauncher()):
super().__init__(label,
channel,
script_dir,
nodes_per_block,
tasks_per_node,
init_blocks,
min_blocks,
max_blocks,
parallelism,
walltime,
launcher)
self.overrides = overrides
if launcher in ['srun', 'srun_mpi']:
logger.warning("Use of {} launcher is usually appropriate for Slurm providers. "
"Recommended options include 'single_node' or 'aprun'.".format(launcher))
def submit(self, command="", blocksize=1, job_name="parsl.auto"):
''' The submit method takes the command string to be executed upon
instantiation of a resource most often to start a pilot (such as IPP engine
or even Swift-T engines).
Args :
- command (str) : The bash command string to be executed.
- blocksize (int) : Blocksize to be requested
KWargs:
- job_name (str) : Human friendly name to be assigned to the job request
Returns:
- A job identifier, this could be an integer, string etc
Raises:
- ExecutionProviderException or its subclasses
'''
# Note: Fix this later to avoid confusing behavior.
# We should always allocate blocks in integer counts of node_granularity
if blocksize < self.nodes_per_block:
blocksize = self.nodes_per_block
# Set job name
job_name = "{0}.{1}".format(job_name, time.time())
# Set script path
script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
job_config = self.get_configs(command, blocksize)
logger.debug("Writing submit script")
self._write_submit_script(template_string, script_path, job_name, job_config)
channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
cmd = "qsub -terse {0}".format(channel_script_path)
retcode, stdout, stderr = super().execute_wait(cmd, 10)
if retcode == 0:
for line in stdout.split('\n'):
job_id = line.strip()
if not job_id:
continue
self.resources[job_id] = {'job_id': job_id, 'status': 'PENDING', 'blocksize': blocksize}
return job_id
else:
print("[WARNING!!] Submission of command to scale_out failed")
logger.error("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
def _status(self):
''' Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Returns:
- A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',
'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderException or its subclasses
'''
cmd = "qstat"
retcode, stdout, stderr = super().execute_wait(cmd)
# Execute_wait failed. Do no update
if retcode != 0:
return
jobs_missing = list(self.resources.keys())
for line in stdout.split('\n'):
parts = line.split()
if parts and parts[0].lower().lower() != 'job-id' \
and not parts[0].startswith('----'):
job_id = parts[0]
status = translate_table.get(parts[4].lower(), 'UNKNOWN')
if job_id in self.resources:
self.resources[job_id]['status'] = status
jobs_missing.remove(job_id)
# Filling in missing blanks for jobs that might have gone missing
# we might lose some information about why the jobs failed.
for missing_job in jobs_missing:
if self.resources[missing_job]['status'] in ['PENDING', 'RUNNING']:
self.resources[missing_job]['status'] = 'COMPLETED'
def cancel(self, job_ids):
''' Cancels the resources identified by the job_ids provided by the user.
Args:
- job_ids (list): A list of job identifiers
Returns:
- A list of status from cancelling the job which can be True, False
Raises:
- ExecutionProviderException or its subclasses
'''
job_id_list = ' '.join(job_ids)
cmd = "qdel {}".format(job_id_list)
retcode, stdout, stderr = super().execute_wait(cmd, 3)
rets = None
if retcode == 0:
for jid in job_ids:
self.resources[jid]['status'] = "COMPLETED"
rets = [True for i in job_ids]
else:
rets = [False for i in job_ids]
return rets
|
Parsl/libsubmit | libsubmit/providers/grid_engine/grid_engine.py | GridEngineProvider.submit | python | def submit(self, command="", blocksize=1, job_name="parsl.auto"):
''' The submit method takes the command string to be executed upon
instantiation of a resource most often to start a pilot (such as IPP engine
or even Swift-T engines).
Args :
- command (str) : The bash command string to be executed.
- blocksize (int) : Blocksize to be requested
KWargs:
- job_name (str) : Human friendly name to be assigned to the job request
Returns:
- A job identifier, this could be an integer, string etc
Raises:
- ExecutionProviderException or its subclasses
'''
# Note: Fix this later to avoid confusing behavior.
# We should always allocate blocks in integer counts of node_granularity
if blocksize < self.nodes_per_block:
blocksize = self.nodes_per_block
# Set job name
job_name = "{0}.{1}".format(job_name, time.time())
# Set script path
script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
job_config = self.get_configs(command, blocksize)
logger.debug("Writing submit script")
self._write_submit_script(template_string, script_path, job_name, job_config)
channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
cmd = "qsub -terse {0}".format(channel_script_path)
retcode, stdout, stderr = super().execute_wait(cmd, 10)
if retcode == 0:
for line in stdout.split('\n'):
job_id = line.strip()
if not job_id:
continue
self.resources[job_id] = {'job_id': job_id, 'status': 'PENDING', 'blocksize': blocksize}
return job_id
else:
print("[WARNING!!] Submission of command to scale_out failed")
logger.error("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip()) | The submit method takes the command string to be executed upon
instantiation of a resource most often to start a pilot (such as IPP engine
or even Swift-T engines).
Args :
- command (str) : The bash command string to be executed.
- blocksize (int) : Blocksize to be requested
KWargs:
- job_name (str) : Human friendly name to be assigned to the job request
Returns:
- A job identifier, this could be an integer, string etc
Raises:
- ExecutionProviderException or its subclasses | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/grid_engine/grid_engine.py#L116-L165 | [
"def execute_wait(self, cmd, timeout=None):\n t = self.cmd_timeout\n if timeout is not None:\n t = timeout\n return self.channel.execute_wait(cmd, t)\n",
"def _write_submit_script(self, template, script_filename, job_name, configs):\n \"\"\"Generate submit script and write it to a file.\n\n ... | class GridEngineProvider(ClusterProvider, RepresentationMixin):
"""A provider for the Grid Engine scheduler.
Parameters
----------
channel : Channel
Channel for accessing this provider. Possible channels include
:class:`~libsubmit.channels.LocalChannel` (the default),
:class:`~libsubmit.channels.SSHChannel`, or
:class:`~libsubmit.channels.SSHInteractiveLoginChannel`.
label : str
Label for this provider.
script_dir : str
Relative or absolute path to a directory where intermediate scripts are placed.
nodes_per_block : int
Nodes to provision per block.
tasks_per_node : int
Tasks to run per node.
min_blocks : int
Minimum number of blocks to maintain.
max_blocks : int
Maximum number of blocks to maintain.
parallelism : float
Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
scaling where as many resources as possible are used; parallelism close to 0 represents
the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
walltime : str
Walltime requested per block in HH:MM:SS.
overrides : str
String to prepend to the #SBATCH blocks in the submit script to the scheduler.
launcher : Launcher
Launcher for this provider. Possible launchers include
:class:`~libsubmit.launchers.SingleNodeLauncher` (the default),
"""
def __init__(self,
channel=LocalChannel(),
label='grid_engine',
script_dir='parsl_scripts',
nodes_per_block=1,
tasks_per_node=1,
init_blocks=1,
min_blocks=0,
max_blocks=10,
parallelism=1,
walltime="00:10:00",
overrides='',
launcher=SingleNodeLauncher()):
super().__init__(label,
channel,
script_dir,
nodes_per_block,
tasks_per_node,
init_blocks,
min_blocks,
max_blocks,
parallelism,
walltime,
launcher)
self.overrides = overrides
if launcher in ['srun', 'srun_mpi']:
logger.warning("Use of {} launcher is usually appropriate for Slurm providers. "
"Recommended options include 'single_node' or 'aprun'.".format(launcher))
def get_configs(self, command):
"""Compose a dictionary with information for writing the submit script."""
logger.debug("Requesting one block with {} nodes per block and {} tasks per node".format(
self.nodes_per_block, self.tasks_per_node))
job_config = {}
job_config["submit_script_dir"] = self.channel.script_dir
job_config["nodes"] = self.nodes_per_block
job_config["walltime"] = wtime_to_minutes(self.walltime)
job_config["overrides"] = self.overrides
job_config["user_script"] = command
job_config["user_script"] = self.launcher(command,
self.tasks_per_node,
self.nodes_per_block)
return job_config
def _status(self):
''' Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Returns:
- A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',
'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderException or its subclasses
'''
cmd = "qstat"
retcode, stdout, stderr = super().execute_wait(cmd)
# Execute_wait failed. Do no update
if retcode != 0:
return
jobs_missing = list(self.resources.keys())
for line in stdout.split('\n'):
parts = line.split()
if parts and parts[0].lower().lower() != 'job-id' \
and not parts[0].startswith('----'):
job_id = parts[0]
status = translate_table.get(parts[4].lower(), 'UNKNOWN')
if job_id in self.resources:
self.resources[job_id]['status'] = status
jobs_missing.remove(job_id)
# Filling in missing blanks for jobs that might have gone missing
# we might lose some information about why the jobs failed.
for missing_job in jobs_missing:
if self.resources[missing_job]['status'] in ['PENDING', 'RUNNING']:
self.resources[missing_job]['status'] = 'COMPLETED'
def cancel(self, job_ids):
''' Cancels the resources identified by the job_ids provided by the user.
Args:
- job_ids (list): A list of job identifiers
Returns:
- A list of status from cancelling the job which can be True, False
Raises:
- ExecutionProviderException or its subclasses
'''
job_id_list = ' '.join(job_ids)
cmd = "qdel {}".format(job_id_list)
retcode, stdout, stderr = super().execute_wait(cmd, 3)
rets = None
if retcode == 0:
for jid in job_ids:
self.resources[jid]['status'] = "COMPLETED"
rets = [True for i in job_ids]
else:
rets = [False for i in job_ids]
return rets
|
Parsl/libsubmit | libsubmit/providers/googlecloud/googlecloud.py | GoogleCloudProvider.submit | python | def submit(self, command="", blocksize=1, job_name="parsl.auto"):
''' The submit method takes the command string to be executed upon
instantiation of a resource most often to start a pilot.
Args :
- command (str) : The bash command string to be executed.
- blocksize (int) : Blocksize to be requested
KWargs:
- job_name (str) : Human friendly name to be assigned to the job request
Returns:
- A job identifier, this could be an integer, string etc
Raises:
- ExecutionProviderException or its subclasses
'''
instance, name = self.create_instance(command=command)
self.provisioned_blocks += 1
self.resources[name] = {"job_id": name, "status": translate_table[instance['status']]}
return name | The submit method takes the command string to be executed upon
instantiation of a resource most often to start a pilot.
Args :
- command (str) : The bash command string to be executed.
- blocksize (int) : Blocksize to be requested
KWargs:
- job_name (str) : Human friendly name to be assigned to the job request
Returns:
- A job identifier, this could be an integer, string etc
Raises:
- ExecutionProviderException or its subclasses | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/googlecloud/googlecloud.py#L118-L138 | [
"def create_instance(self, command=\"\"):\n name = \"parslauto{}\".format(self.num_instances)\n self.num_instances += 1\n compute = self.client\n project = self.project_id\n image_response = compute.images().getFromFamily(\n project=self.os_project, family=self.os_family).execute()\n source... | class GoogleCloudProvider():
"""A provider for using resources from the Google Compute Engine.
Parameters
----------
project_id : str
Project ID from Google compute engine.
key_file : str
Path to authorization private key json file. This is required for auth.
A new one can be generated here: https://console.cloud.google.com/apis/credentials
region : str
Region in which to start instances
os_project : str
OS project code for Google compute engine.
os_family : str
OS family to request.
label : str
A label for this executor. Default is 'google_cloud'.
google_version : str
Google compute engine version to use. Possibilies include 'v1' (default) or 'beta'.
instance_type: str
'n1-standard-1',
script_dir : str
Relative or absolute path to a directory where intermediate scripts are placed.
init_blocks : int
Number of blocks to provision immediately. Default is 1.
min_blocks : int
Minimum number of blocks to maintain. Default is 0.
max_blocks : int
Maximum number of blocks to maintain. Default is 10.
parallelism : float
Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
scaling where as many resources as possible are used; parallelism close to 0 represents
the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
.. code:: python
+------------------
|
script_string ------->| submit
id <--------|---+
|
[ ids ] ------->| status
[statuses] <--------|----+
|
[ ids ] ------->| cancel
[cancel] <--------|----+
|
[True/False] <--------| scaling_enabled
|
+-------------------
"""
def __init__(self,
project_id,
key_file,
region,
os_project,
os_family,
label='google_cloud',
google_version='v1',
instance_type='n1-standard-1',
script_dir='parsl_scripts',
init_blocks=1,
min_blocks=0,
max_blocks=10,
parallelism=1):
self.project_id = project_id
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = key_file
self.zone = self.get_zone(region)
self.os_project = os_project
self.os_family = os_family
self.label = label
self.client = googleapiclient.discovery.build('compute', google_version)
self.instance_type = instance_type
self.script_dir = script_dir
if not os.path.exists(self.script_dir):
os.makedirs(self.script_dir)
self.init_blocks = init_blocks
self.min_blocks = min_blocks
self.max_blocks = max_blocks
self.parallelism = parallelism
self.num_instances = 0
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {}
self.provisioned_blocks = 0
atexit.register(self.bye)
def status(self, job_ids):
''' Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',
'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderException or its subclasses
'''
statuses = []
for job_id in job_ids:
instance = self.client.instances().get(instance=job_id, project=self.project_id, zone=self.zone).execute()
self.resources[job_id]['status'] = translate_table[instance['status']]
statuses.append(translate_table[instance['status']])
return statuses
def cancel(self, job_ids):
''' Cancels the resources identified by the job_ids provided by the user.
Args:
- job_ids (list): A list of job identifiers
Returns:
- A list of status from cancelling the job which can be True, False
Raises:
- ExecutionProviderException or its subclasses
'''
statuses = []
for job_id in job_ids:
try:
self.delete_instance(job_id)
statuses.append(True)
self.provisioned_blocks -= 1
except Exception:
statuses.append(False)
return statuses
@property
def scaling_enabled(self):
''' Scaling is enabled
Returns:
- Status (Bool)
'''
return True
@property
def current_capacity(self):
"""Returns the number of currently provisioned blocks."""
return self.provisioned_blocks
def bye(self):
self.cancel([i for i in list(self.resources)])
def create_instance(self, command=""):
name = "parslauto{}".format(self.num_instances)
self.num_instances += 1
compute = self.client
project = self.project_id
image_response = compute.images().getFromFamily(
project=self.os_project, family=self.os_family).execute()
source_disk_image = image_response['selfLink']
# Configure the machine
machine_type = "zones/{}/machineTypes/{}".format(self.zone, self.instance_type)
startup_script = command
config = {
'name': name,
'machineType': machine_type,
# Specify the boot disk and the image to use as a source.
'disks': [{
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': source_disk_image,
}
}],
'networkInterfaces': [{
'network': 'global/networks/default',
'accessConfigs': [{
'type': 'ONE_TO_ONE_NAT',
'name': 'External NAT'
}]
}],
'serviceAccounts': [{
'email':
'default',
'scopes': [
'https://www.googleapis.com/auth/devstorage.read_write',
'https://www.googleapis.com/auth/logging.write'
]
}],
'metadata': {
'items': [{
# Startup script is automatically executed by the
# instance upon startup.
'key': 'startup-script',
'value': startup_script
}]
}
}
return compute.instances().insert(project=project, zone=self.zone, body=config).execute(), name
def get_zone(self, region):
res = self.client.zones().list(project=self.project_id).execute()
for zone in res['items']:
if region in zone['name'] and zone['status'] == "UP":
return zone["name"]
def delete_instance(self, name):
compute = self.client
project = self.project_id
zone = self.zone
return compute.instances().delete(project=project, zone=zone, instance=name).execute()
|
Parsl/libsubmit | libsubmit/providers/azure/deployer.py | Deployer.deploy | python | def deploy(self, job_name, command='', blocksize=1):
instances = []
self.client.resource_groups.create_or_update(
self.resource_group,
{
'location': self.location,
}
)
template_path = os.path.join(os.path.dirname(
__file__), 'templates', 'template.json')
with open(template_path, 'r') as template_file_fd:
template = json.load(template_file_fd)
parameters = {
'sshKeyData': self.pub_ssh_key,
'vmName': 'azure-deployment-sample-vm',
'dnsLabelPrefix': self.dns_label_prefix
}
parameters = {k: {'value': v} for k, v in parameters.items()}
deployment_properties = {
'mode': DeploymentMode.incremental,
'template': template,
'parameters': parameters
}
for i in range(blocksize):
deployment_async_operation = self.client.deployments.create_or_update(
self.resource_group,
'azure-sample',
deployment_properties
)
instances.append(deployment_async_operation.wait())
return instances | Deploy the template to a resource group. | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/azure/deployer.py#L39-L74 | null | class Deployer(object):
""" Initialize the deployer class with subscription, resource group and public key.
:raises IOError: If the public key path cannot be read (access or not exists)
:raises KeyError: If AZURE_CLIENT_ID, AZURE_CLIENT_SECRET or AZURE_TENANT_ID env
variables or not defined
"""
config = ""
def __init__(self, subscription_id, resource_group, config,
pub_ssh_key_path='~/.ssh/id_rsa.pub'):
self.config = config
self.subscription_id = subscription_id
self.resource_group = resource_group
self.dns_label_prefix = self.name_generator.haikunate()
self.location = self.config['location']
pub_ssh_key_path = os.path.expanduser(pub_ssh_key_path)
# Will raise if file not exists or not enough permission
with open(pub_ssh_key_path, 'r') as pub_ssh_file_fd:
self.pub_ssh_key = pub_ssh_file_fd.read()
self.credentials = ServicePrincipalCredentials(
client_id=self.config['AZURE_CLIENT_ID'],
secret=self.config['AZURE_CLIENT_SECRET'],
tenant=self.config['AZURE_TENANT_ID']
)
self.client = ResourceManagementClient(
self.credentials, self.subscription_id)
def destroy(self, job_ids):
"""Destroy the given resource group"""
for job_id in job_ids:
self.client.resource_groups.delete(self.resource_group)
def get_vm(self, resource_group_name, vm_name):
'''
you need to retry this just in case the credentials token expires,
that's where the decorator comes in
this will return all the data about the virtual machine
'''
return self.client.virtual_machines.get(
resource_group_name, vm_name, expand='instanceView')
def get_vm_status(self, vm_name, rgn):
'''
this will just return the status of the virtual machine
sometime the status may be unknown as shown by the azure portal;
in that case statuses[1] doesn't exist, hence retrying on IndexError
also, it may take on the order of minutes for the status to become
available so the decorator will bang on it forever
'''
rgn = rgn if rgn else self.resource_group
return self.client.virtual_machines.get(
rgn, vm_name).instance_view.statuses[1].display_status
|
Parsl/libsubmit | libsubmit/providers/azure/deployer.py | Deployer.destroy | python | def destroy(self, job_ids):
for job_id in job_ids:
self.client.resource_groups.delete(self.resource_group) | Destroy the given resource group | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/azure/deployer.py#L76-L79 | null | class Deployer(object):
""" Initialize the deployer class with subscription, resource group and public key.
:raises IOError: If the public key path cannot be read (access or not exists)
:raises KeyError: If AZURE_CLIENT_ID, AZURE_CLIENT_SECRET or AZURE_TENANT_ID env
variables or not defined
"""
config = ""
def __init__(self, subscription_id, resource_group, config,
pub_ssh_key_path='~/.ssh/id_rsa.pub'):
self.config = config
self.subscription_id = subscription_id
self.resource_group = resource_group
self.dns_label_prefix = self.name_generator.haikunate()
self.location = self.config['location']
pub_ssh_key_path = os.path.expanduser(pub_ssh_key_path)
# Will raise if file not exists or not enough permission
with open(pub_ssh_key_path, 'r') as pub_ssh_file_fd:
self.pub_ssh_key = pub_ssh_file_fd.read()
self.credentials = ServicePrincipalCredentials(
client_id=self.config['AZURE_CLIENT_ID'],
secret=self.config['AZURE_CLIENT_SECRET'],
tenant=self.config['AZURE_TENANT_ID']
)
self.client = ResourceManagementClient(
self.credentials, self.subscription_id)
def deploy(self, job_name, command='', blocksize=1):
instances = []
"""Deploy the template to a resource group."""
self.client.resource_groups.create_or_update(
self.resource_group,
{
'location': self.location,
}
)
template_path = os.path.join(os.path.dirname(
__file__), 'templates', 'template.json')
with open(template_path, 'r') as template_file_fd:
template = json.load(template_file_fd)
parameters = {
'sshKeyData': self.pub_ssh_key,
'vmName': 'azure-deployment-sample-vm',
'dnsLabelPrefix': self.dns_label_prefix
}
parameters = {k: {'value': v} for k, v in parameters.items()}
deployment_properties = {
'mode': DeploymentMode.incremental,
'template': template,
'parameters': parameters
}
for i in range(blocksize):
deployment_async_operation = self.client.deployments.create_or_update(
self.resource_group,
'azure-sample',
deployment_properties
)
instances.append(deployment_async_operation.wait())
return instances
def get_vm(self, resource_group_name, vm_name):
'''
you need to retry this just in case the credentials token expires,
that's where the decorator comes in
this will return all the data about the virtual machine
'''
return self.client.virtual_machines.get(
resource_group_name, vm_name, expand='instanceView')
def get_vm_status(self, vm_name, rgn):
'''
this will just return the status of the virtual machine
sometime the status may be unknown as shown by the azure portal;
in that case statuses[1] doesn't exist, hence retrying on IndexError
also, it may take on the order of minutes for the status to become
available so the decorator will bang on it forever
'''
rgn = rgn if rgn else self.resource_group
return self.client.virtual_machines.get(
rgn, vm_name).instance_view.statuses[1].display_status
|
Parsl/libsubmit | libsubmit/providers/azure/deployer.py | Deployer.get_vm | python | def get_vm(self, resource_group_name, vm_name):
'''
you need to retry this just in case the credentials token expires,
that's where the decorator comes in
this will return all the data about the virtual machine
'''
return self.client.virtual_machines.get(
resource_group_name, vm_name, expand='instanceView') | you need to retry this just in case the credentials token expires,
that's where the decorator comes in
this will return all the data about the virtual machine | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/azure/deployer.py#L81-L88 | null | class Deployer(object):
""" Initialize the deployer class with subscription, resource group and public key.
:raises IOError: If the public key path cannot be read (access or not exists)
:raises KeyError: If AZURE_CLIENT_ID, AZURE_CLIENT_SECRET or AZURE_TENANT_ID env
variables or not defined
"""
config = ""
def __init__(self, subscription_id, resource_group, config,
pub_ssh_key_path='~/.ssh/id_rsa.pub'):
self.config = config
self.subscription_id = subscription_id
self.resource_group = resource_group
self.dns_label_prefix = self.name_generator.haikunate()
self.location = self.config['location']
pub_ssh_key_path = os.path.expanduser(pub_ssh_key_path)
# Will raise if file not exists or not enough permission
with open(pub_ssh_key_path, 'r') as pub_ssh_file_fd:
self.pub_ssh_key = pub_ssh_file_fd.read()
self.credentials = ServicePrincipalCredentials(
client_id=self.config['AZURE_CLIENT_ID'],
secret=self.config['AZURE_CLIENT_SECRET'],
tenant=self.config['AZURE_TENANT_ID']
)
self.client = ResourceManagementClient(
self.credentials, self.subscription_id)
def deploy(self, job_name, command='', blocksize=1):
instances = []
"""Deploy the template to a resource group."""
self.client.resource_groups.create_or_update(
self.resource_group,
{
'location': self.location,
}
)
template_path = os.path.join(os.path.dirname(
__file__), 'templates', 'template.json')
with open(template_path, 'r') as template_file_fd:
template = json.load(template_file_fd)
parameters = {
'sshKeyData': self.pub_ssh_key,
'vmName': 'azure-deployment-sample-vm',
'dnsLabelPrefix': self.dns_label_prefix
}
parameters = {k: {'value': v} for k, v in parameters.items()}
deployment_properties = {
'mode': DeploymentMode.incremental,
'template': template,
'parameters': parameters
}
for i in range(blocksize):
deployment_async_operation = self.client.deployments.create_or_update(
self.resource_group,
'azure-sample',
deployment_properties
)
instances.append(deployment_async_operation.wait())
return instances
def destroy(self, job_ids):
"""Destroy the given resource group"""
for job_id in job_ids:
self.client.resource_groups.delete(self.resource_group)
def get_vm_status(self, vm_name, rgn):
'''
this will just return the status of the virtual machine
sometime the status may be unknown as shown by the azure portal;
in that case statuses[1] doesn't exist, hence retrying on IndexError
also, it may take on the order of minutes for the status to become
available so the decorator will bang on it forever
'''
rgn = rgn if rgn else self.resource_group
return self.client.virtual_machines.get(
rgn, vm_name).instance_view.statuses[1].display_status
|
Parsl/libsubmit | libsubmit/providers/azure/deployer.py | Deployer.get_vm_status | python | def get_vm_status(self, vm_name, rgn):
'''
this will just return the status of the virtual machine
sometime the status may be unknown as shown by the azure portal;
in that case statuses[1] doesn't exist, hence retrying on IndexError
also, it may take on the order of minutes for the status to become
available so the decorator will bang on it forever
'''
rgn = rgn if rgn else self.resource_group
return self.client.virtual_machines.get(
rgn, vm_name).instance_view.statuses[1].display_status | this will just return the status of the virtual machine
sometime the status may be unknown as shown by the azure portal;
in that case statuses[1] doesn't exist, hence retrying on IndexError
also, it may take on the order of minutes for the status to become
available so the decorator will bang on it forever | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/azure/deployer.py#L90-L100 | null | class Deployer(object):
""" Initialize the deployer class with subscription, resource group and public key.
:raises IOError: If the public key path cannot be read (access or not exists)
:raises KeyError: If AZURE_CLIENT_ID, AZURE_CLIENT_SECRET or AZURE_TENANT_ID env
variables or not defined
"""
config = ""
def __init__(self, subscription_id, resource_group, config,
pub_ssh_key_path='~/.ssh/id_rsa.pub'):
self.config = config
self.subscription_id = subscription_id
self.resource_group = resource_group
self.dns_label_prefix = self.name_generator.haikunate()
self.location = self.config['location']
pub_ssh_key_path = os.path.expanduser(pub_ssh_key_path)
# Will raise if file not exists or not enough permission
with open(pub_ssh_key_path, 'r') as pub_ssh_file_fd:
self.pub_ssh_key = pub_ssh_file_fd.read()
self.credentials = ServicePrincipalCredentials(
client_id=self.config['AZURE_CLIENT_ID'],
secret=self.config['AZURE_CLIENT_SECRET'],
tenant=self.config['AZURE_TENANT_ID']
)
self.client = ResourceManagementClient(
self.credentials, self.subscription_id)
def deploy(self, job_name, command='', blocksize=1):
instances = []
"""Deploy the template to a resource group."""
self.client.resource_groups.create_or_update(
self.resource_group,
{
'location': self.location,
}
)
template_path = os.path.join(os.path.dirname(
__file__), 'templates', 'template.json')
with open(template_path, 'r') as template_file_fd:
template = json.load(template_file_fd)
parameters = {
'sshKeyData': self.pub_ssh_key,
'vmName': 'azure-deployment-sample-vm',
'dnsLabelPrefix': self.dns_label_prefix
}
parameters = {k: {'value': v} for k, v in parameters.items()}
deployment_properties = {
'mode': DeploymentMode.incremental,
'template': template,
'parameters': parameters
}
for i in range(blocksize):
deployment_async_operation = self.client.deployments.create_or_update(
self.resource_group,
'azure-sample',
deployment_properties
)
instances.append(deployment_async_operation.wait())
return instances
def destroy(self, job_ids):
"""Destroy the given resource group"""
for job_id in job_ids:
self.client.resource_groups.delete(self.resource_group)
def get_vm(self, resource_group_name, vm_name):
'''
you need to retry this just in case the credentials token expires,
that's where the decorator comes in
this will return all the data about the virtual machine
'''
return self.client.virtual_machines.get(
resource_group_name, vm_name, expand='instanceView')
|
Parsl/libsubmit | libsubmit/providers/azure/azure.py | AzureProvider.submit | python | def submit(self, command='sleep 1', blocksize=1, job_name="parsl.auto"):
job_name = "parsl.auto.{0}".format(time.time())
[instance, *rest] = self.deployer.deploy(command=command, job_name=job_name, blocksize=1)
if not instance:
logger.error("Failed to submit request to Azure")
return None
logger.debug("Started instance_id: {0}".format(instance.instance_id))
state = translate_table.get(instance.state['Name'], "PENDING")
self.resources[instance.instance_id] = {"job_id": instance.instance_id, "instance": instance, "status": state}
return instance.instance_id | Submit command to an Azure instance.
Submit returns an ID that corresponds to the task that was just submitted.
Parameters
----------
command : str
Command to be invoked on the remote side.
blocksize : int
Number of blocks requested.
job_name : str
Prefix for job name.
Returns
-------
None or str
If at capacity (no more can be provisioned), None is returned. Otherwise,
an identifier for the job is returned. | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/azure/azure.py#L112-L146 | null | class AzureProvider(ExecutionProvider, RepresentationMixin):
"""A provider for using Azure resources.
Parameters
----------
profile : str
Profile to be used if different from the standard Azure config file ~/.azure/config.
template_file : str
Location of template file for Azure instance. Default is 'templates/template.json'.
walltime : str
Walltime requested per block in HH:MM:SS.
azure_template_file : str
Path to the template file for the Azure instance.
init_blocks : int
Number of blocks to provision at the start of the run. Default is 1.
min_blocks : int
Minimum number of blocks to maintain. Default is 0.
max_blocks : int
Maximum number of blocks to maintain. Default is 10.
nodes_per_block : int
Nodes to provision per block. Default is 1.
"""
def __init__(self,
subscription_id,
username,
password,
label='azure',
template_file='template.json',
init_blocks=1,
min_blocks=0,
max_blocks=1,
nodes_per_block=1,
state_file=None):
self.configure_logger()
if not _azure_enabled:
raise OptionalModuleMissing(['azure'], "Azure Provider requires the azure module.")
credentials = UserPassCredentials(username, password)
self.resource_client = ResourceManagementClient(credentials, subscription_id)
self.storage_client = StorageManagementClient(credentials, subscription_id)
self.resource_group_name = 'my_resource_group'
self.deployer = Deployer(subscription_id, self.resource_group_name, self.read_configs(config))
self.channel = channel
self.config = config
self.provisioned_blocks = 0
self.resources = {}
self.instances = []
self.max_nodes = max_blocks * nodes_per_block
try:
self.initialize_boto_client()
except Exception as e:
logger.error("Azure '{}' failed to initialize.".format(self.label))
raise e
try:
if state_file is None:
state_file = '.azure_{}.json'.format(self.label)
self.read_state_file(state_file)
except Exception:
self.create_vpc().id
logger.info("No State File. Cannot load previous options. Creating new infrastructure.")
self.write_state_file()
def status(self, job_ids):
"""Get the status of a list of jobs identified by their ids.
Parameters
----------
job_ids : list of str
Identifiers for the jobs.
Returns
-------
list of int
Status codes for each requested job.
"""
states = []
statuses = self.deployer.get_vm_status([self.resources.get(job_id) for job_id in job_ids])
for status in statuses:
states.append(translate_table.get(status.state['Name'], "PENDING"))
return states
def cancel(self, job_ids):
"""Cancel jobs specified by a list of job ids.
Parameters
----------
list of str
List of identifiers of jobs which should be canceled.
Returns
-------
list of bool
For each entry, True if the cancel operation is successful, otherwise False.
"""
for job_id in job_ids:
try:
self.deployer.destroy(self.resources.get(job_id))
return True
except e:
logger.error("Failed to cancel {}".format(repr(job_id)))
logger.error(e)
return False
@property
def scaling_enabled():
return True
@property
def current_capacity(self):
"""Returns the current blocksize."""
return len(self.instances)
|
Parsl/libsubmit | libsubmit/providers/azure/azure.py | AzureProvider.status | python | def status(self, job_ids):
states = []
statuses = self.deployer.get_vm_status([self.resources.get(job_id) for job_id in job_ids])
for status in statuses:
states.append(translate_table.get(status.state['Name'], "PENDING"))
return states | Get the status of a list of jobs identified by their ids.
Parameters
----------
job_ids : list of str
Identifiers for the jobs.
Returns
-------
list of int
Status codes for each requested job. | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/azure/azure.py#L148-L165 | null | class AzureProvider(ExecutionProvider, RepresentationMixin):
"""A provider for using Azure resources.
Parameters
----------
profile : str
Profile to be used if different from the standard Azure config file ~/.azure/config.
template_file : str
Location of template file for Azure instance. Default is 'templates/template.json'.
walltime : str
Walltime requested per block in HH:MM:SS.
azure_template_file : str
Path to the template file for the Azure instance.
init_blocks : int
Number of blocks to provision at the start of the run. Default is 1.
min_blocks : int
Minimum number of blocks to maintain. Default is 0.
max_blocks : int
Maximum number of blocks to maintain. Default is 10.
nodes_per_block : int
Nodes to provision per block. Default is 1.
"""
def __init__(self,
subscription_id,
username,
password,
label='azure',
template_file='template.json',
init_blocks=1,
min_blocks=0,
max_blocks=1,
nodes_per_block=1,
state_file=None):
self.configure_logger()
if not _azure_enabled:
raise OptionalModuleMissing(['azure'], "Azure Provider requires the azure module.")
credentials = UserPassCredentials(username, password)
self.resource_client = ResourceManagementClient(credentials, subscription_id)
self.storage_client = StorageManagementClient(credentials, subscription_id)
self.resource_group_name = 'my_resource_group'
self.deployer = Deployer(subscription_id, self.resource_group_name, self.read_configs(config))
self.channel = channel
self.config = config
self.provisioned_blocks = 0
self.resources = {}
self.instances = []
self.max_nodes = max_blocks * nodes_per_block
try:
self.initialize_boto_client()
except Exception as e:
logger.error("Azure '{}' failed to initialize.".format(self.label))
raise e
try:
if state_file is None:
state_file = '.azure_{}.json'.format(self.label)
self.read_state_file(state_file)
except Exception:
self.create_vpc().id
logger.info("No State File. Cannot load previous options. Creating new infrastructure.")
self.write_state_file()
def submit(self, command='sleep 1', blocksize=1, job_name="parsl.auto"):
"""Submit command to an Azure instance.
Submit returns an ID that corresponds to the task that was just submitted.
Parameters
----------
command : str
Command to be invoked on the remote side.
blocksize : int
Number of blocks requested.
job_name : str
Prefix for job name.
Returns
-------
None or str
If at capacity (no more can be provisioned), None is returned. Otherwise,
an identifier for the job is returned.
"""
job_name = "parsl.auto.{0}".format(time.time())
[instance, *rest] = self.deployer.deploy(command=command, job_name=job_name, blocksize=1)
if not instance:
logger.error("Failed to submit request to Azure")
return None
logger.debug("Started instance_id: {0}".format(instance.instance_id))
state = translate_table.get(instance.state['Name'], "PENDING")
self.resources[instance.instance_id] = {"job_id": instance.instance_id, "instance": instance, "status": state}
return instance.instance_id
def cancel(self, job_ids):
"""Cancel jobs specified by a list of job ids.
Parameters
----------
list of str
List of identifiers of jobs which should be canceled.
Returns
-------
list of bool
For each entry, True if the cancel operation is successful, otherwise False.
"""
for job_id in job_ids:
try:
self.deployer.destroy(self.resources.get(job_id))
return True
except e:
logger.error("Failed to cancel {}".format(repr(job_id)))
logger.error(e)
return False
@property
def scaling_enabled():
return True
@property
def current_capacity(self):
"""Returns the current blocksize."""
return len(self.instances)
|
Parsl/libsubmit | libsubmit/providers/azure/azure.py | AzureProvider.cancel | python | def cancel(self, job_ids):
for job_id in job_ids:
try:
self.deployer.destroy(self.resources.get(job_id))
return True
except e:
logger.error("Failed to cancel {}".format(repr(job_id)))
logger.error(e)
return False | Cancel jobs specified by a list of job ids.
Parameters
----------
list of str
List of identifiers of jobs which should be canceled.
Returns
-------
list of bool
For each entry, True if the cancel operation is successful, otherwise False. | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/azure/azure.py#L167-L187 | null | class AzureProvider(ExecutionProvider, RepresentationMixin):
"""A provider for using Azure resources.
Parameters
----------
profile : str
Profile to be used if different from the standard Azure config file ~/.azure/config.
template_file : str
Location of template file for Azure instance. Default is 'templates/template.json'.
walltime : str
Walltime requested per block in HH:MM:SS.
azure_template_file : str
Path to the template file for the Azure instance.
init_blocks : int
Number of blocks to provision at the start of the run. Default is 1.
min_blocks : int
Minimum number of blocks to maintain. Default is 0.
max_blocks : int
Maximum number of blocks to maintain. Default is 10.
nodes_per_block : int
Nodes to provision per block. Default is 1.
"""
def __init__(self,
subscription_id,
username,
password,
label='azure',
template_file='template.json',
init_blocks=1,
min_blocks=0,
max_blocks=1,
nodes_per_block=1,
state_file=None):
self.configure_logger()
if not _azure_enabled:
raise OptionalModuleMissing(['azure'], "Azure Provider requires the azure module.")
credentials = UserPassCredentials(username, password)
self.resource_client = ResourceManagementClient(credentials, subscription_id)
self.storage_client = StorageManagementClient(credentials, subscription_id)
self.resource_group_name = 'my_resource_group'
self.deployer = Deployer(subscription_id, self.resource_group_name, self.read_configs(config))
self.channel = channel
self.config = config
self.provisioned_blocks = 0
self.resources = {}
self.instances = []
self.max_nodes = max_blocks * nodes_per_block
try:
self.initialize_boto_client()
except Exception as e:
logger.error("Azure '{}' failed to initialize.".format(self.label))
raise e
try:
if state_file is None:
state_file = '.azure_{}.json'.format(self.label)
self.read_state_file(state_file)
except Exception:
self.create_vpc().id
logger.info("No State File. Cannot load previous options. Creating new infrastructure.")
self.write_state_file()
def submit(self, command='sleep 1', blocksize=1, job_name="parsl.auto"):
"""Submit command to an Azure instance.
Submit returns an ID that corresponds to the task that was just submitted.
Parameters
----------
command : str
Command to be invoked on the remote side.
blocksize : int
Number of blocks requested.
job_name : str
Prefix for job name.
Returns
-------
None or str
If at capacity (no more can be provisioned), None is returned. Otherwise,
an identifier for the job is returned.
"""
job_name = "parsl.auto.{0}".format(time.time())
[instance, *rest] = self.deployer.deploy(command=command, job_name=job_name, blocksize=1)
if not instance:
logger.error("Failed to submit request to Azure")
return None
logger.debug("Started instance_id: {0}".format(instance.instance_id))
state = translate_table.get(instance.state['Name'], "PENDING")
self.resources[instance.instance_id] = {"job_id": instance.instance_id, "instance": instance, "status": state}
return instance.instance_id
def status(self, job_ids):
"""Get the status of a list of jobs identified by their ids.
Parameters
----------
job_ids : list of str
Identifiers for the jobs.
Returns
-------
list of int
Status codes for each requested job.
"""
states = []
statuses = self.deployer.get_vm_status([self.resources.get(job_id) for job_id in job_ids])
for status in statuses:
states.append(translate_table.get(status.state['Name'], "PENDING"))
return states
@property
def scaling_enabled():
return True
@property
def current_capacity(self):
"""Returns the current blocksize."""
return len(self.instances)
|
Parsl/libsubmit | libsubmit/utils.py | wtime_to_minutes | python | def wtime_to_minutes(time_string):
''' wtime_to_minutes
Convert standard wallclock time string to minutes.
Args:
- Time_string in HH:MM:SS format
Returns:
(int) minutes
'''
hours, mins, seconds = time_string.split(':')
return int(hours) * 60 + int(mins) + 1 | wtime_to_minutes
Convert standard wallclock time string to minutes.
Args:
- Time_string in HH:MM:SS format
Returns:
(int) minutes | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/utils.py#L4-L17 | null | import inspect
class RepresentationMixin(object):
"""A mixin class for adding a __repr__ method.
The __repr__ method will return a string equivalent to the code used to instantiate
the child class, with any defaults included explicitly. The __max_width__ class variable
controls the maximum width of the representation string. If this width is exceeded,
the representation string will be split up, with one argument or keyword argument per line.
Any arguments or keyword arguments in the constructor must be defined as attributes, or
an AttributeError will be raised.
Examples
--------
>>> from libsubmit.utils import RepresentationMixin
>>> class Foo(RepresentationMixin):
def __init__(self, first, second, third='three', fourth='fourth'):
self.first = first
self.second = second
self.third = third
self.fourth = fourth
>>> bar = Foo(1, 'two', fourth='baz')
>>> bar
Foo(1, 'two', third='three', fourth='baz')
"""
__max_width__ = 80
def __repr__(self):
argspec = inspect.getargspec(self.__init__)
if len(argspec.args) > 1:
defaults = dict(zip(reversed(argspec.args), reversed(argspec.defaults)))
else:
defaults = []
for arg in argspec.args[1:]:
if not hasattr(self, arg):
template = 'class {} uses {} in the constructor, but does not define it as an attribute'
raise AttributeError(template.format(self.__class__.__name__, arg))
args = [getattr(self, a) for a in argspec.args[1:-len(defaults)]]
kwargs = {key: getattr(self, key) for key in defaults}
def assemble_multiline(args, kwargs):
def indent(text):
lines = text.splitlines()
if len(lines) <= 1:
return text
return "\n".join(" " + l for l in lines).strip()
args = ["\n {},".format(indent(repr(a))) for a in args]
kwargs = ["\n {}={}".format(k, indent(repr(v)))
for k, v in sorted(kwargs.items())]
info = "".join(args) + ", ".join(kwargs)
return self.__class__.__name__ + "({}\n)".format(info)
def assemble_line(args, kwargs):
kwargs = ['{}={}'.format(k, repr(v)) for k, v in sorted(kwargs.items())]
info = ", ".join([repr(a) for a in args] + kwargs)
return self.__class__.__name__ + "({})".format(info)
if len(assemble_line(args, kwargs)) <= self.__class__.__max_width__:
return assemble_line(args, kwargs)
else:
return assemble_multiline(args, kwargs)
|
Parsl/libsubmit | libsubmit/providers/kubernetes/kube.py | KubernetesProvider.submit | python | def submit(self, cmd_string, blocksize, job_name="parsl.auto"):
if not self.resources:
job_name = "{0}-{1}".format(job_name, time.time()).split(".")[0]
self.deployment_name = '{}-{}-deployment'.format(job_name,
str(time.time()).split('.')[0])
formatted_cmd = template_string.format(command=cmd_string,
overrides=self.config["execution"]["block"]["options"].get("overrides", ''))
print("Creating replicas :", self.init_blocks)
self.deployment_obj = self._create_deployment_object(job_name,
self.image,
self.deployment_name,
cmd_string=formatted_cmd,
replicas=self.init_blocks)
logger.debug("Deployment name :{}".format(self.deployment_name))
self._create_deployment(self.deployment_obj)
self.resources[self.deployment_name] = {'status': 'RUNNING',
'pods': self.init_blocks}
return self.deployment_name | Submit a job
Args:
- cmd_string :(String) - Name of the container to initiate
- blocksize :(float) - Number of replicas
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/kubernetes/kube.py#L69-L104 | [
"def _create_deployment_object(self, job_name, job_image,\n deployment_name, port=80,\n replicas=1,\n cmd_string=None,\n engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json'... | class KubernetesProvider(ExecutionProvider):
""" Kubernetes execution provider:
TODO: put in a config
"""
def __repr__(self):
return "<Kubernetes Execution Provider for site:{0}>".format(self.sitename)
def __init__(self, config, channel=None):
""" Initialize the Kubernetes execution provider class
Args:
- Config (dict): Dictionary with all the config options.
KWargs :
- channel (channel object) : default=None A channel object
"""
self.channel = channel
if not _kubernetes_enabled:
raise OptionalModuleMissing(['kubernetes'],
"Kubernetes provider requires kubernetes module and config.")
self.kube_client = client.ExtensionsV1beta1Api()
self.config = config
self.sitename = self.config['site']
self.namespace = self.config['execution']['namespace']
self.image = self.config['execution']['image']
self.init_blocks = self.config["execution"]["block"]["initBlocks"]
self.min_blocks = self.config["execution"]["block"]["minBlocks"]
self.max_blocks = self.config["execution"]["block"]["maxBlocks"]
self.user_id = None
self.group_id = None
self.run_as_non_root = None
if 'security' in self.config['execution']:
self.user_id = self.config["execution"]['security']["user_id"]
self.group_id = self.config["execution"]['security']["group_id"]
self.run_as_non_root = self.config["execution"]['security']["run_as_non_root"]
self.secret = None
if 'secret' in self.config['execution']:
self.secret = self.config['execution']['secret']
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {}
def status(self, job_ids):
""" Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',
'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderExceptions or its subclasses
"""
self._status()
# This is a hack
return ['RUNNING' for jid in job_ids]
def cancel(self, job_ids):
""" Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
"""
for job in job_ids:
logger.debug("Terminating job/proc_id : {0}".format(job))
# Here we are assuming that for local, the job_ids are the process id's
self._delete_deployment(job)
self.resources[job]['status'] = 'CANCELLED'
rets = [True for i in job_ids]
return rets
def _status(self):
""" Internal: Do not call. Returns the status list for a list of job_ids
Args:
self
Returns:
[status...] : Status list of all jobs
"""
jobs_ids = list(self.resources.keys())
# TODO: fix this
return jobs_ids
# do something to get the deployment's status
def _create_deployment_object(self, job_name, job_image,
deployment_name, port=80,
replicas=1,
cmd_string=None,
engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json',
engine_dir='.'):
""" Create a kubernetes deployment for the job.
Args:
- job_name (string) : Name of the job and deployment
- job_image (string) : Docker image to launch
KWargs:
- port (integer) : Container port
- replicas : Number of replica containers to maintain
Returns:
- True: The deployment object to launch
"""
# sorry, quick hack that doesn't pass this stuff through to test it works.
# TODO it also doesn't only add what is set :(
security_context = None
if 'security' in self.config['execution']:
security_context = client.V1SecurityContext(run_as_group=self.group_id,
run_as_user=self.user_id,
run_as_non_root=self.run_as_non_root)
# self.user_id = None
# self.group_id = None
# self.run_as_non_root = None
# Create the enviornment variables and command to initiate IPP
environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA")
launch_args = ["-c", "{0}; /app/deploy.sh;".format(cmd_string)]
print(launch_args)
# Configureate Pod template container
container = None
if security_context:
container = client.V1Container(
name=job_name,
image=job_image,
ports=[client.V1ContainerPort(container_port=port)],
command=['/bin/bash'],
args=launch_args,
env=[environment_vars],
security_context=security_context)
else:
container = client.V1Container(
name=job_name,
image=job_image,
ports=[client.V1ContainerPort(container_port=port)],
command=['/bin/bash'],
args=launch_args,
env=[environment_vars])
# Create a secret to enable pulling images from secure repositories
secret = None
if self.secret:
secret = client.V1LocalObjectReference(name=self.secret)
# Create and configurate a spec section
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={"app": job_name}),
spec=client.V1PodSpec(containers=[container], image_pull_secrets=[secret]))
# Create the specification of deployment
spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas,
template=template)
# Instantiate the deployment object
deployment = client.ExtensionsV1beta1Deployment(
api_version="extensions/v1beta1",
kind="Deployment",
metadata=client.V1ObjectMeta(name=deployment_name),
spec=spec)
return deployment
def _create_deployment(self, deployment):
""" Create the kubernetes deployment """
api_response = self.kube_client.create_namespaced_deployment(
body=deployment,
namespace=self.namespace)
logger.debug("Deployment created. status='{0}'".format(str(api_response.status)))
def _delete_deployment(self, deployment_name):
""" Delete deployment """
api_response = self.kube_client.delete_namespaced_deployment(
name=deployment_name,
namespace=self.namespace,
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5))
logger.debug("Deployment deleted. status='{0}'".format(
str(api_response.status)))
@property
def scaling_enabled(self):
return False
@property
def channels_required(self):
return False
|
Parsl/libsubmit | libsubmit/providers/kubernetes/kube.py | KubernetesProvider.cancel | python | def cancel(self, job_ids):
for job in job_ids:
logger.debug("Terminating job/proc_id : {0}".format(job))
# Here we are assuming that for local, the job_ids are the process id's
self._delete_deployment(job)
self.resources[job]['status'] = 'CANCELLED'
rets = [True for i in job_ids]
return rets | Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False. | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/kubernetes/kube.py#L125-L142 | [
"def _delete_deployment(self, deployment_name):\n \"\"\" Delete deployment \"\"\"\n\n api_response = self.kube_client.delete_namespaced_deployment(\n name=deployment_name,\n namespace=self.namespace,\n body=client.V1DeleteOptions(\n propagation_policy='Foreground',\n ... | class KubernetesProvider(ExecutionProvider):
""" Kubernetes execution provider:
TODO: put in a config
"""
def __repr__(self):
return "<Kubernetes Execution Provider for site:{0}>".format(self.sitename)
def __init__(self, config, channel=None):
""" Initialize the Kubernetes execution provider class
Args:
- Config (dict): Dictionary with all the config options.
KWargs :
- channel (channel object) : default=None A channel object
"""
self.channel = channel
if not _kubernetes_enabled:
raise OptionalModuleMissing(['kubernetes'],
"Kubernetes provider requires kubernetes module and config.")
self.kube_client = client.ExtensionsV1beta1Api()
self.config = config
self.sitename = self.config['site']
self.namespace = self.config['execution']['namespace']
self.image = self.config['execution']['image']
self.init_blocks = self.config["execution"]["block"]["initBlocks"]
self.min_blocks = self.config["execution"]["block"]["minBlocks"]
self.max_blocks = self.config["execution"]["block"]["maxBlocks"]
self.user_id = None
self.group_id = None
self.run_as_non_root = None
if 'security' in self.config['execution']:
self.user_id = self.config["execution"]['security']["user_id"]
self.group_id = self.config["execution"]['security']["group_id"]
self.run_as_non_root = self.config["execution"]['security']["run_as_non_root"]
self.secret = None
if 'secret' in self.config['execution']:
self.secret = self.config['execution']['secret']
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {}
def submit(self, cmd_string, blocksize, job_name="parsl.auto"):
""" Submit a job
Args:
- cmd_string :(String) - Name of the container to initiate
- blocksize :(float) - Number of replicas
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
"""
if not self.resources:
job_name = "{0}-{1}".format(job_name, time.time()).split(".")[0]
self.deployment_name = '{}-{}-deployment'.format(job_name,
str(time.time()).split('.')[0])
formatted_cmd = template_string.format(command=cmd_string,
overrides=self.config["execution"]["block"]["options"].get("overrides", ''))
print("Creating replicas :", self.init_blocks)
self.deployment_obj = self._create_deployment_object(job_name,
self.image,
self.deployment_name,
cmd_string=formatted_cmd,
replicas=self.init_blocks)
logger.debug("Deployment name :{}".format(self.deployment_name))
self._create_deployment(self.deployment_obj)
self.resources[self.deployment_name] = {'status': 'RUNNING',
'pods': self.init_blocks}
return self.deployment_name
def status(self, job_ids):
""" Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',
'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderExceptions or its subclasses
"""
self._status()
# This is a hack
return ['RUNNING' for jid in job_ids]
def _status(self):
""" Internal: Do not call. Returns the status list for a list of job_ids
Args:
self
Returns:
[status...] : Status list of all jobs
"""
jobs_ids = list(self.resources.keys())
# TODO: fix this
return jobs_ids
# do something to get the deployment's status
def _create_deployment_object(self, job_name, job_image,
deployment_name, port=80,
replicas=1,
cmd_string=None,
engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json',
engine_dir='.'):
""" Create a kubernetes deployment for the job.
Args:
- job_name (string) : Name of the job and deployment
- job_image (string) : Docker image to launch
KWargs:
- port (integer) : Container port
- replicas : Number of replica containers to maintain
Returns:
- True: The deployment object to launch
"""
# sorry, quick hack that doesn't pass this stuff through to test it works.
# TODO it also doesn't only add what is set :(
security_context = None
if 'security' in self.config['execution']:
security_context = client.V1SecurityContext(run_as_group=self.group_id,
run_as_user=self.user_id,
run_as_non_root=self.run_as_non_root)
# self.user_id = None
# self.group_id = None
# self.run_as_non_root = None
# Create the enviornment variables and command to initiate IPP
environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA")
launch_args = ["-c", "{0}; /app/deploy.sh;".format(cmd_string)]
print(launch_args)
# Configureate Pod template container
container = None
if security_context:
container = client.V1Container(
name=job_name,
image=job_image,
ports=[client.V1ContainerPort(container_port=port)],
command=['/bin/bash'],
args=launch_args,
env=[environment_vars],
security_context=security_context)
else:
container = client.V1Container(
name=job_name,
image=job_image,
ports=[client.V1ContainerPort(container_port=port)],
command=['/bin/bash'],
args=launch_args,
env=[environment_vars])
# Create a secret to enable pulling images from secure repositories
secret = None
if self.secret:
secret = client.V1LocalObjectReference(name=self.secret)
# Create and configurate a spec section
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={"app": job_name}),
spec=client.V1PodSpec(containers=[container], image_pull_secrets=[secret]))
# Create the specification of deployment
spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas,
template=template)
# Instantiate the deployment object
deployment = client.ExtensionsV1beta1Deployment(
api_version="extensions/v1beta1",
kind="Deployment",
metadata=client.V1ObjectMeta(name=deployment_name),
spec=spec)
return deployment
def _create_deployment(self, deployment):
""" Create the kubernetes deployment """
api_response = self.kube_client.create_namespaced_deployment(
body=deployment,
namespace=self.namespace)
logger.debug("Deployment created. status='{0}'".format(str(api_response.status)))
def _delete_deployment(self, deployment_name):
""" Delete deployment """
api_response = self.kube_client.delete_namespaced_deployment(
name=deployment_name,
namespace=self.namespace,
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5))
logger.debug("Deployment deleted. status='{0}'".format(
str(api_response.status)))
@property
def scaling_enabled(self):
return False
@property
def channels_required(self):
return False
|
Parsl/libsubmit | libsubmit/providers/kubernetes/kube.py | KubernetesProvider._create_deployment_object | python | def _create_deployment_object(self, job_name, job_image,
deployment_name, port=80,
replicas=1,
cmd_string=None,
engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json',
engine_dir='.'):
# sorry, quick hack that doesn't pass this stuff through to test it works.
# TODO it also doesn't only add what is set :(
security_context = None
if 'security' in self.config['execution']:
security_context = client.V1SecurityContext(run_as_group=self.group_id,
run_as_user=self.user_id,
run_as_non_root=self.run_as_non_root)
# self.user_id = None
# self.group_id = None
# self.run_as_non_root = None
# Create the enviornment variables and command to initiate IPP
environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA")
launch_args = ["-c", "{0}; /app/deploy.sh;".format(cmd_string)]
print(launch_args)
# Configureate Pod template container
container = None
if security_context:
container = client.V1Container(
name=job_name,
image=job_image,
ports=[client.V1ContainerPort(container_port=port)],
command=['/bin/bash'],
args=launch_args,
env=[environment_vars],
security_context=security_context)
else:
container = client.V1Container(
name=job_name,
image=job_image,
ports=[client.V1ContainerPort(container_port=port)],
command=['/bin/bash'],
args=launch_args,
env=[environment_vars])
# Create a secret to enable pulling images from secure repositories
secret = None
if self.secret:
secret = client.V1LocalObjectReference(name=self.secret)
# Create and configurate a spec section
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={"app": job_name}),
spec=client.V1PodSpec(containers=[container], image_pull_secrets=[secret]))
# Create the specification of deployment
spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas,
template=template)
# Instantiate the deployment object
deployment = client.ExtensionsV1beta1Deployment(
api_version="extensions/v1beta1",
kind="Deployment",
metadata=client.V1ObjectMeta(name=deployment_name),
spec=spec)
return deployment | Create a kubernetes deployment for the job.
Args:
- job_name (string) : Name of the job and deployment
- job_image (string) : Docker image to launch
KWargs:
- port (integer) : Container port
- replicas : Number of replica containers to maintain
Returns:
- True: The deployment object to launch | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/kubernetes/kube.py#L159-L235 | null | class KubernetesProvider(ExecutionProvider):
""" Kubernetes execution provider:
TODO: put in a config
"""
def __repr__(self):
return "<Kubernetes Execution Provider for site:{0}>".format(self.sitename)
def __init__(self, config, channel=None):
""" Initialize the Kubernetes execution provider class
Args:
- Config (dict): Dictionary with all the config options.
KWargs :
- channel (channel object) : default=None A channel object
"""
self.channel = channel
if not _kubernetes_enabled:
raise OptionalModuleMissing(['kubernetes'],
"Kubernetes provider requires kubernetes module and config.")
self.kube_client = client.ExtensionsV1beta1Api()
self.config = config
self.sitename = self.config['site']
self.namespace = self.config['execution']['namespace']
self.image = self.config['execution']['image']
self.init_blocks = self.config["execution"]["block"]["initBlocks"]
self.min_blocks = self.config["execution"]["block"]["minBlocks"]
self.max_blocks = self.config["execution"]["block"]["maxBlocks"]
self.user_id = None
self.group_id = None
self.run_as_non_root = None
if 'security' in self.config['execution']:
self.user_id = self.config["execution"]['security']["user_id"]
self.group_id = self.config["execution"]['security']["group_id"]
self.run_as_non_root = self.config["execution"]['security']["run_as_non_root"]
self.secret = None
if 'secret' in self.config['execution']:
self.secret = self.config['execution']['secret']
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {}
def submit(self, cmd_string, blocksize, job_name="parsl.auto"):
""" Submit a job
Args:
- cmd_string :(String) - Name of the container to initiate
- blocksize :(float) - Number of replicas
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
"""
if not self.resources:
job_name = "{0}-{1}".format(job_name, time.time()).split(".")[0]
self.deployment_name = '{}-{}-deployment'.format(job_name,
str(time.time()).split('.')[0])
formatted_cmd = template_string.format(command=cmd_string,
overrides=self.config["execution"]["block"]["options"].get("overrides", ''))
print("Creating replicas :", self.init_blocks)
self.deployment_obj = self._create_deployment_object(job_name,
self.image,
self.deployment_name,
cmd_string=formatted_cmd,
replicas=self.init_blocks)
logger.debug("Deployment name :{}".format(self.deployment_name))
self._create_deployment(self.deployment_obj)
self.resources[self.deployment_name] = {'status': 'RUNNING',
'pods': self.init_blocks}
return self.deployment_name
def status(self, job_ids):
""" Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',
'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderExceptions or its subclasses
"""
self._status()
# This is a hack
return ['RUNNING' for jid in job_ids]
def cancel(self, job_ids):
""" Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
"""
for job in job_ids:
logger.debug("Terminating job/proc_id : {0}".format(job))
# Here we are assuming that for local, the job_ids are the process id's
self._delete_deployment(job)
self.resources[job]['status'] = 'CANCELLED'
rets = [True for i in job_ids]
return rets
def _status(self):
""" Internal: Do not call. Returns the status list for a list of job_ids
Args:
self
Returns:
[status...] : Status list of all jobs
"""
jobs_ids = list(self.resources.keys())
# TODO: fix this
return jobs_ids
# do something to get the deployment's status
def _create_deployment(self, deployment):
""" Create the kubernetes deployment """
api_response = self.kube_client.create_namespaced_deployment(
body=deployment,
namespace=self.namespace)
logger.debug("Deployment created. status='{0}'".format(str(api_response.status)))
def _delete_deployment(self, deployment_name):
""" Delete deployment """
api_response = self.kube_client.delete_namespaced_deployment(
name=deployment_name,
namespace=self.namespace,
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5))
logger.debug("Deployment deleted. status='{0}'".format(
str(api_response.status)))
@property
def scaling_enabled(self):
return False
@property
def channels_required(self):
return False
|
Parsl/libsubmit | libsubmit/providers/local/local.py | LocalProvider.status | python | def status(self, job_ids):
''' Get the status of a list of jobs identified by their ids.
Args:
- job_ids (List of ids) : List of identifiers for the jobs
Returns:
- List of status codes.
'''
logging.debug("Checking status of : {0}".format(job_ids))
for job_id in self.resources:
poll_code = self.resources[job_id]['proc'].poll()
if self.resources[job_id]['status'] in ['COMPLETED', 'FAILED']:
continue
if poll_code is None:
self.resources[job_id]['status'] = 'RUNNING'
elif poll_code == 0 and self.resources[job_id]['status'] != 'RUNNING':
self.resources[job_id]['status'] = 'COMPLETED'
elif poll_code < 0 and self.resources[job_id]['status'] != 'RUNNING':
self.resources[job_id]['status'] = 'FAILED'
return [self.resources[jid]['status'] for jid in job_ids] | Get the status of a list of jobs identified by their ids.
Args:
- job_ids (List of ids) : List of identifiers for the jobs
Returns:
- List of status codes. | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/local/local.py#L77-L101 | null | class LocalProvider(ExecutionProvider, RepresentationMixin):
""" Local Execution Provider
This provider is used to provide execution resources from the localhost.
Parameters
----------
min_blocks : int
Minimum number of blocks to maintain.
max_blocks : int
Maximum number of blocks to maintain.
parallelism : float
Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
scaling where as many resources as possible are used; parallelism close to 0 represents
the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
"""
def __init__(self,
channel=LocalChannel(),
label='local',
script_dir='parsl_scripts',
tasks_per_node=1,
nodes_per_block=1,
launcher=SingleNodeLauncher(),
init_blocks=4,
min_blocks=0,
max_blocks=10,
walltime="00:15:00",
parallelism=1):
self.channel = channel
self.label = label
if not os.path.exists(script_dir):
os.makedirs(script_dir)
self.script_dir = script_dir
self.provisioned_blocks = 0
self.nodes_per_block = nodes_per_block
self.tasks_per_node = tasks_per_node
self.launcher = launcher
self.init_blocks = init_blocks
self.min_blocks = min_blocks
self.max_blocks = max_blocks
self.parallelism = parallelism
self.walltime = walltime
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {}
def _write_submit_script(self, script_string, script_filename):
'''
Load the template string with config values and write the generated submit script to
a submit script file.
Args:
- template_string (string) : The template string to be used for the writing submit script
- script_filename (string) : Name of the submit script
Returns:
- True: on success
Raises:
SchedulerMissingArgs : If template is missing args
ScriptPathError : Unable to write submit script out
'''
try:
with open(script_filename, 'w') as f:
f.write(script_string)
except KeyError as e:
logger.error("Missing keys for submit script : %s", e)
raise (ep_error.SchedulerMissingArgs(e.args, self.label))
except IOError as e:
logger.error("Failed writing to submit script: %s", script_filename)
raise (ep_error.ScriptPathError(script_filename, e))
return True
def submit(self, command, blocksize, job_name="parsl.auto"):
''' Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1:
1/tasks_per_node is provisioned
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float) - Not really used for local
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
'''
job_name = "{0}.{1}".format(job_name, time.time())
# Set script path
script_path = "{0}/{1}.sh".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
wrap_command = self.launcher(command, self.tasks_per_node, self.nodes_per_block)
self._write_submit_script(wrap_command, script_path)
job_id, proc = self.channel.execute_no_wait('bash {0}'.format(script_path), 3)
self.resources[job_id] = {'job_id': job_id, 'status': 'RUNNING', 'blocksize': blocksize, 'proc': proc}
return job_id
def cancel(self, job_ids):
''' Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
'''
for job in job_ids:
logger.debug("Terminating job/proc_id : {0}".format(job))
# Here we are assuming that for local, the job_ids are the process id's
proc = self.resources[job]['proc']
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
self.resources[job]['status'] = 'CANCELLED'
rets = [True for i in job_ids]
return rets
@property
def scaling_enabled(self):
return True
@property
def current_capacity(self):
return len(self.resources)
|
Parsl/libsubmit | libsubmit/providers/local/local.py | LocalProvider._write_submit_script | python | def _write_submit_script(self, script_string, script_filename):
'''
Load the template string with config values and write the generated submit script to
a submit script file.
Args:
- template_string (string) : The template string to be used for the writing submit script
- script_filename (string) : Name of the submit script
Returns:
- True: on success
Raises:
SchedulerMissingArgs : If template is missing args
ScriptPathError : Unable to write submit script out
'''
try:
with open(script_filename, 'w') as f:
f.write(script_string)
except KeyError as e:
logger.error("Missing keys for submit script : %s", e)
raise (ep_error.SchedulerMissingArgs(e.args, self.label))
except IOError as e:
logger.error("Failed writing to submit script: %s", script_filename)
raise (ep_error.ScriptPathError(script_filename, e))
return True | Load the template string with config values and write the generated submit script to
a submit script file.
Args:
- template_string (string) : The template string to be used for the writing submit script
- script_filename (string) : Name of the submit script
Returns:
- True: on success
Raises:
SchedulerMissingArgs : If template is missing args
ScriptPathError : Unable to write submit script out | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/local/local.py#L103-L132 | null | class LocalProvider(ExecutionProvider, RepresentationMixin):
""" Local Execution Provider
This provider is used to provide execution resources from the localhost.
Parameters
----------
min_blocks : int
Minimum number of blocks to maintain.
max_blocks : int
Maximum number of blocks to maintain.
parallelism : float
Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
scaling where as many resources as possible are used; parallelism close to 0 represents
the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
"""
def __init__(self,
channel=LocalChannel(),
label='local',
script_dir='parsl_scripts',
tasks_per_node=1,
nodes_per_block=1,
launcher=SingleNodeLauncher(),
init_blocks=4,
min_blocks=0,
max_blocks=10,
walltime="00:15:00",
parallelism=1):
self.channel = channel
self.label = label
if not os.path.exists(script_dir):
os.makedirs(script_dir)
self.script_dir = script_dir
self.provisioned_blocks = 0
self.nodes_per_block = nodes_per_block
self.tasks_per_node = tasks_per_node
self.launcher = launcher
self.init_blocks = init_blocks
self.min_blocks = min_blocks
self.max_blocks = max_blocks
self.parallelism = parallelism
self.walltime = walltime
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {}
def status(self, job_ids):
''' Get the status of a list of jobs identified by their ids.
Args:
- job_ids (List of ids) : List of identifiers for the jobs
Returns:
- List of status codes.
'''
logging.debug("Checking status of : {0}".format(job_ids))
for job_id in self.resources:
poll_code = self.resources[job_id]['proc'].poll()
if self.resources[job_id]['status'] in ['COMPLETED', 'FAILED']:
continue
if poll_code is None:
self.resources[job_id]['status'] = 'RUNNING'
elif poll_code == 0 and self.resources[job_id]['status'] != 'RUNNING':
self.resources[job_id]['status'] = 'COMPLETED'
elif poll_code < 0 and self.resources[job_id]['status'] != 'RUNNING':
self.resources[job_id]['status'] = 'FAILED'
return [self.resources[jid]['status'] for jid in job_ids]
def submit(self, command, blocksize, job_name="parsl.auto"):
''' Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1:
1/tasks_per_node is provisioned
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float) - Not really used for local
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
'''
job_name = "{0}.{1}".format(job_name, time.time())
# Set script path
script_path = "{0}/{1}.sh".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
wrap_command = self.launcher(command, self.tasks_per_node, self.nodes_per_block)
self._write_submit_script(wrap_command, script_path)
job_id, proc = self.channel.execute_no_wait('bash {0}'.format(script_path), 3)
self.resources[job_id] = {'job_id': job_id, 'status': 'RUNNING', 'blocksize': blocksize, 'proc': proc}
return job_id
def cancel(self, job_ids):
''' Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
'''
for job in job_ids:
logger.debug("Terminating job/proc_id : {0}".format(job))
# Here we are assuming that for local, the job_ids are the process id's
proc = self.resources[job]['proc']
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
self.resources[job]['status'] = 'CANCELLED'
rets = [True for i in job_ids]
return rets
@property
def scaling_enabled(self):
return True
@property
def current_capacity(self):
return len(self.resources)
|
Parsl/libsubmit | libsubmit/providers/local/local.py | LocalProvider.submit | python | def submit(self, command, blocksize, job_name="parsl.auto"):
''' Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1:
1/tasks_per_node is provisioned
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float) - Not really used for local
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
'''
job_name = "{0}.{1}".format(job_name, time.time())
# Set script path
script_path = "{0}/{1}.sh".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
wrap_command = self.launcher(command, self.tasks_per_node, self.nodes_per_block)
self._write_submit_script(wrap_command, script_path)
job_id, proc = self.channel.execute_no_wait('bash {0}'.format(script_path), 3)
self.resources[job_id] = {'job_id': job_id, 'status': 'RUNNING', 'blocksize': blocksize, 'proc': proc}
return job_id | Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1:
1/tasks_per_node is provisioned
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float) - Not really used for local
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/local/local.py#L134-L173 | [
"def _write_submit_script(self, script_string, script_filename):\n '''\n Load the template string with config values and write the generated submit script to\n a submit script file.\n\n Args:\n - template_string (string) : The template string to be used for the writing submit script\n ... | class LocalProvider(ExecutionProvider, RepresentationMixin):
""" Local Execution Provider
This provider is used to provide execution resources from the localhost.
Parameters
----------
min_blocks : int
Minimum number of blocks to maintain.
max_blocks : int
Maximum number of blocks to maintain.
parallelism : float
Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
scaling where as many resources as possible are used; parallelism close to 0 represents
the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
"""
def __init__(self,
channel=LocalChannel(),
label='local',
script_dir='parsl_scripts',
tasks_per_node=1,
nodes_per_block=1,
launcher=SingleNodeLauncher(),
init_blocks=4,
min_blocks=0,
max_blocks=10,
walltime="00:15:00",
parallelism=1):
self.channel = channel
self.label = label
if not os.path.exists(script_dir):
os.makedirs(script_dir)
self.script_dir = script_dir
self.provisioned_blocks = 0
self.nodes_per_block = nodes_per_block
self.tasks_per_node = tasks_per_node
self.launcher = launcher
self.init_blocks = init_blocks
self.min_blocks = min_blocks
self.max_blocks = max_blocks
self.parallelism = parallelism
self.walltime = walltime
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {}
def status(self, job_ids):
''' Get the status of a list of jobs identified by their ids.
Args:
- job_ids (List of ids) : List of identifiers for the jobs
Returns:
- List of status codes.
'''
logging.debug("Checking status of : {0}".format(job_ids))
for job_id in self.resources:
poll_code = self.resources[job_id]['proc'].poll()
if self.resources[job_id]['status'] in ['COMPLETED', 'FAILED']:
continue
if poll_code is None:
self.resources[job_id]['status'] = 'RUNNING'
elif poll_code == 0 and self.resources[job_id]['status'] != 'RUNNING':
self.resources[job_id]['status'] = 'COMPLETED'
elif poll_code < 0 and self.resources[job_id]['status'] != 'RUNNING':
self.resources[job_id]['status'] = 'FAILED'
return [self.resources[jid]['status'] for jid in job_ids]
def _write_submit_script(self, script_string, script_filename):
'''
Load the template string with config values and write the generated submit script to
a submit script file.
Args:
- template_string (string) : The template string to be used for the writing submit script
- script_filename (string) : Name of the submit script
Returns:
- True: on success
Raises:
SchedulerMissingArgs : If template is missing args
ScriptPathError : Unable to write submit script out
'''
try:
with open(script_filename, 'w') as f:
f.write(script_string)
except KeyError as e:
logger.error("Missing keys for submit script : %s", e)
raise (ep_error.SchedulerMissingArgs(e.args, self.label))
except IOError as e:
logger.error("Failed writing to submit script: %s", script_filename)
raise (ep_error.ScriptPathError(script_filename, e))
return True
def cancel(self, job_ids):
''' Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
'''
for job in job_ids:
logger.debug("Terminating job/proc_id : {0}".format(job))
# Here we are assuming that for local, the job_ids are the process id's
proc = self.resources[job]['proc']
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
self.resources[job]['status'] = 'CANCELLED'
rets = [True for i in job_ids]
return rets
@property
def scaling_enabled(self):
return True
@property
def current_capacity(self):
return len(self.resources)
|
Parsl/libsubmit | libsubmit/providers/local/local.py | LocalProvider.cancel | python | def cancel(self, job_ids):
''' Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
'''
for job in job_ids:
logger.debug("Terminating job/proc_id : {0}".format(job))
# Here we are assuming that for local, the job_ids are the process id's
proc = self.resources[job]['proc']
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
self.resources[job]['status'] = 'CANCELLED'
rets = [True for i in job_ids]
return rets | Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False. | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/local/local.py#L175-L193 | null | class LocalProvider(ExecutionProvider, RepresentationMixin):
""" Local Execution Provider
This provider is used to provide execution resources from the localhost.
Parameters
----------
min_blocks : int
Minimum number of blocks to maintain.
max_blocks : int
Maximum number of blocks to maintain.
parallelism : float
Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
scaling where as many resources as possible are used; parallelism close to 0 represents
the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
"""
def __init__(self,
channel=LocalChannel(),
label='local',
script_dir='parsl_scripts',
tasks_per_node=1,
nodes_per_block=1,
launcher=SingleNodeLauncher(),
init_blocks=4,
min_blocks=0,
max_blocks=10,
walltime="00:15:00",
parallelism=1):
self.channel = channel
self.label = label
if not os.path.exists(script_dir):
os.makedirs(script_dir)
self.script_dir = script_dir
self.provisioned_blocks = 0
self.nodes_per_block = nodes_per_block
self.tasks_per_node = tasks_per_node
self.launcher = launcher
self.init_blocks = init_blocks
self.min_blocks = min_blocks
self.max_blocks = max_blocks
self.parallelism = parallelism
self.walltime = walltime
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {}
def status(self, job_ids):
''' Get the status of a list of jobs identified by their ids.
Args:
- job_ids (List of ids) : List of identifiers for the jobs
Returns:
- List of status codes.
'''
logging.debug("Checking status of : {0}".format(job_ids))
for job_id in self.resources:
poll_code = self.resources[job_id]['proc'].poll()
if self.resources[job_id]['status'] in ['COMPLETED', 'FAILED']:
continue
if poll_code is None:
self.resources[job_id]['status'] = 'RUNNING'
elif poll_code == 0 and self.resources[job_id]['status'] != 'RUNNING':
self.resources[job_id]['status'] = 'COMPLETED'
elif poll_code < 0 and self.resources[job_id]['status'] != 'RUNNING':
self.resources[job_id]['status'] = 'FAILED'
return [self.resources[jid]['status'] for jid in job_ids]
def _write_submit_script(self, script_string, script_filename):
'''
Load the template string with config values and write the generated submit script to
a submit script file.
Args:
- template_string (string) : The template string to be used for the writing submit script
- script_filename (string) : Name of the submit script
Returns:
- True: on success
Raises:
SchedulerMissingArgs : If template is missing args
ScriptPathError : Unable to write submit script out
'''
try:
with open(script_filename, 'w') as f:
f.write(script_string)
except KeyError as e:
logger.error("Missing keys for submit script : %s", e)
raise (ep_error.SchedulerMissingArgs(e.args, self.label))
except IOError as e:
logger.error("Failed writing to submit script: %s", script_filename)
raise (ep_error.ScriptPathError(script_filename, e))
return True
def submit(self, command, blocksize, job_name="parsl.auto"):
''' Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1:
1/tasks_per_node is provisioned
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float) - Not really used for local
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
'''
job_name = "{0}.{1}".format(job_name, time.time())
# Set script path
script_path = "{0}/{1}.sh".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
wrap_command = self.launcher(command, self.tasks_per_node, self.nodes_per_block)
self._write_submit_script(wrap_command, script_path)
job_id, proc = self.channel.execute_no_wait('bash {0}'.format(script_path), 3)
self.resources[job_id] = {'job_id': job_id, 'status': 'RUNNING', 'blocksize': blocksize, 'proc': proc}
return job_id
@property
def scaling_enabled(self):
return True
@property
def current_capacity(self):
return len(self.resources)
|
Parsl/libsubmit | libsubmit/providers/slurm/slurm.py | SlurmProvider.submit | python | def submit(self, command, blocksize, job_name="parsl.auto"):
if self.provisioned_blocks >= self.max_blocks:
logger.warn("Slurm provider '{}' is at capacity (no more blocks will be added)".format(self.label))
return None
job_name = "{0}.{1}".format(job_name, time.time())
script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
logger.debug("Requesting one block with {} nodes".format(self.nodes_per_block))
job_config = {}
job_config["submit_script_dir"] = self.channel.script_dir
job_config["nodes"] = self.nodes_per_block
job_config["tasks_per_node"] = self.tasks_per_node
job_config["walltime"] = wtime_to_minutes(self.walltime)
job_config["overrides"] = self.overrides
job_config["partition"] = self.partition
job_config["user_script"] = command
# Wrap the command
job_config["user_script"] = self.launcher(command,
self.tasks_per_node,
self.nodes_per_block)
logger.debug("Writing submit script")
self._write_submit_script(template_string, script_path, job_name, job_config)
channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
retcode, stdout, stderr = super().execute_wait("sbatch {0}".format(channel_script_path))
job_id = None
if retcode == 0:
for line in stdout.split('\n'):
if line.startswith("Submitted batch job"):
job_id = line.split("Submitted batch job")[1].strip()
self.resources[job_id] = {'job_id': job_id, 'status': 'PENDING', 'blocksize': blocksize}
else:
print("Submission of command to scale_out failed")
logger.error("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
return job_id | Submit the command as a slurm job of blocksize parallel elements.
Parameters
----------
command : str
Command to be made on the remote side.
blocksize : int
Not implemented.
job_name : str
Name for the job (must be unique).
Returns
-------
None or str
If at capacity, returns None; otherwise, a string identifier for the job | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/providers/slurm/slurm.py#L134-L193 | [
"def wtime_to_minutes(time_string):\n ''' wtime_to_minutes\n\n Convert standard wallclock time string to minutes.\n\n Args:\n - Time_string in HH:MM:SS format\n\n Returns:\n (int) minutes\n\n '''\n hours, mins, seconds = time_string.split(':')\n return int(hours) * 60 + int(mins) ... | class SlurmProvider(ClusterProvider, RepresentationMixin):
"""Slurm Execution Provider
This provider uses sbatch to submit, squeue for status and scancel to cancel
jobs. The sbatch script to be used is created from a template file in this
same module.
Parameters
----------
partition : str
Slurm partition to request blocks from.
label : str
Label for this provider.
channel : Channel
Channel for accessing this provider. Possible channels include
:class:`~libsubmit.channels.LocalChannel` (the default),
:class:`~libsubmit.channels.SSHChannel`, or
:class:`~libsubmit.channels.SSHInteractiveLoginChannel`.
script_dir : str
Relative or absolute path to a directory where intermediate scripts are placed.
nodes_per_block : int
Nodes to provision per block.
tasks_per_node : int
Tasks to run per node.
min_blocks : int
Minimum number of blocks to maintain.
max_blocks : int
Maximum number of blocks to maintain.
parallelism : float
Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
scaling where as many resources as possible are used; parallelism close to 0 represents
the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
walltime : str
Walltime requested per block in HH:MM:SS.
overrides : str
String to prepend to the #SBATCH blocks in the submit script to the scheduler.
launcher : Launcher
Launcher for this provider. Possible launchers include
:class:`~libsubmit.launchers.SingleNodeLauncher` (the default),
:class:`~libsubmit.launchers.SrunLauncher`, or
:class:`~libsubmit.launchers.AprunLauncher`
"""
def __init__(self,
partition,
label='slurm',
channel=LocalChannel(),
script_dir='parsl_scripts',
nodes_per_block=1,
tasks_per_node=1,
init_blocks=1,
min_blocks=0,
max_blocks=10,
parallelism=1,
walltime="00:10:00",
overrides='',
cmd_timeout=10,
launcher=SingleNodeLauncher()):
super().__init__(label,
channel,
script_dir,
nodes_per_block,
tasks_per_node,
init_blocks,
min_blocks,
max_blocks,
parallelism,
walltime,
cmd_timeout=cmd_timeout,
launcher=launcher)
self.partition = partition
self.overrides = overrides
def _status(self):
''' Internal: Do not call. Returns the status list for a list of job_ids
Args:
self
Returns:
[status...] : Status list of all jobs
'''
job_id_list = ','.join(self.resources.keys())
cmd = "squeue --job {0}".format(job_id_list)
retcode, stdout, stderr = super().execute_wait(cmd)
# Execute_wait failed. Do no update
if retcode != 0:
return
jobs_missing = list(self.resources.keys())
for line in stdout.split('\n'):
parts = line.split()
if parts and parts[0] != 'JOBID':
job_id = parts[0]
status = translate_table.get(parts[4], 'UNKNOWN')
self.resources[job_id]['status'] = status
jobs_missing.remove(job_id)
# squeue does not report on jobs that are not running. So we are filling in the
# blanks for missing jobs, we might lose some information about why the jobs failed.
for missing_job in jobs_missing:
if self.resources[missing_job]['status'] in ['PENDING', 'RUNNING']:
self.resources[missing_job]['status'] = 'COMPLETED'
def cancel(self, job_ids):
''' Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
'''
job_id_list = ' '.join(job_ids)
retcode, stdout, stderr = super().execute_wait("scancel {0}".format(job_id_list))
rets = None
if retcode == 0:
for jid in job_ids:
self.resources[jid]['status'] = translate_table['CA'] # Setting state to cancelled
rets = [True for i in job_ids]
else:
rets = [False for i in job_ids]
return rets
def _test_add_resource(self, job_id):
self.resources.extend([{'job_id': job_id, 'status': 'PENDING', 'size': 1}])
return True
|
Parsl/libsubmit | libsubmit/channels/local/local.py | LocalChannel.execute_no_wait | python | def execute_no_wait(self, cmd, walltime, envs={}):
''' Synchronously execute a commandline string on the shell.
Args:
- cmd (string) : Commandline string to execute
- walltime (int) : walltime in seconds, this is not really used now.
Returns:
- retcode : Return code from the execution, -1 on fail
- stdout : stdout string
- stderr : stderr string
Raises:
None.
'''
current_env = copy.deepcopy(self._envs)
current_env.update(envs)
try:
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.userhome,
env=current_env,
shell=True,
preexec_fn=os.setpgrp
)
pid = proc.pid
except Exception as e:
print("Caught exception : {0}".format(e))
logger.warn("Execution of command [%s] failed due to \n %s ", (cmd, e))
return pid, proc | Synchronously execute a commandline string on the shell.
Args:
- cmd (string) : Commandline string to execute
- walltime (int) : walltime in seconds, this is not really used now.
Returns:
- retcode : Return code from the execution, -1 on fail
- stdout : stdout string
- stderr : stderr string
Raises:
None. | train | https://github.com/Parsl/libsubmit/blob/27a41c16dd6f1c16d830a9ce1c97804920a59f64/libsubmit/channels/local/local.py#L96-L131 | null | class LocalChannel(Channel, RepresentationMixin):
''' This is not even really a channel, since opening a local shell is not heavy
and done so infrequently that they do not need a persistent channel
'''
def __init__(self, userhome=".", envs={}, script_dir="./.scripts", **kwargs):
''' Initialize the local channel. script_dir is required by set to a default.
KwArgs:
- userhome (string): (default='.') This is provided as a way to override and set a specific userhome
- envs (dict) : A dictionary of env variables to be set when launching the shell
- script_dir (string): (default="./.scripts") Directory to place scripts
'''
self.userhome = os.path.abspath(userhome)
self.hostname = "localhost"
self.envs = envs
local_env = os.environ.copy()
self._envs = copy.deepcopy(local_env)
self._envs.update(envs)
self._script_dir = os.path.abspath(script_dir)
try:
os.makedirs(self._script_dir)
except OSError as e:
if e.errno != errno.EEXIST:
logger.error("Failed to create script_dir : {0}".format(script_dir))
raise BadScriptPath(e, self.hostname)
@property
def script_dir(self):
return self._script_dir
def execute_wait(self, cmd, walltime, envs={}):
''' Synchronously execute a commandline string on the shell.
Args:
- cmd (string) : Commandline string to execute
- walltime (int) : walltime in seconds, this is not really used now.
Kwargs:
- envs (dict) : Dictionary of env variables. This will be used
to override the envs set at channel initialization.
Returns:
- retcode : Return code from the execution, -1 on fail
- stdout : stdout string
- stderr : stderr string
Raises:
None.
'''
retcode = -1
stdout = None
stderr = None
current_env = copy.deepcopy(self._envs)
current_env.update(envs)
try:
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.userhome,
env=current_env,
shell=True
)
proc.wait(timeout=walltime)
stdout = proc.stdout.read()
stderr = proc.stderr.read()
retcode = proc.returncode
except Exception as e:
print("Caught exception : {0}".format(e))
logger.warn("Execution of command [%s] failed due to \n %s ", cmd, e)
# Set retcode to non-zero so that this can be handled in the provider.
if retcode == 0:
retcode = -1
return (retcode, None, None)
return (retcode, stdout.decode("utf-8"), stderr.decode("utf-8"))
def push_file(self, source, dest_dir):
''' If the source files dirpath is the same as dest_dir, a copy
is not necessary, and nothing is done. Else a copy is made.
Args:
- source (string) : Path to the source file
- dest_dir (string) : Path to the directory to which the files is to be copied
Returns:
- destination_path (String) : Absolute path of the destination file
Raises:
- FileCopyException : If file copy failed.
'''
local_dest = dest_dir + '/' + os.path.basename(source)
# Only attempt to copy if the target dir and source dir are different
if os.path.dirname(source) != dest_dir:
try:
shutil.copyfile(source, local_dest)
os.chmod(local_dest, 0o777)
except OSError as e:
raise FileCopyException(e, self.hostname)
return local_dest
def close(self):
''' There's nothing to close here, and this really doesn't do anything
Returns:
- False, because it really did not "close" this channel.
'''
return False
|
array-split/array_split | setup.py | read_readme | python | def read_readme():
text = open("README.rst", "rt").read()
text_lines = text.split("\n")
ld_i_beg = 0
while text_lines[ld_i_beg].find("start long description") < 0:
ld_i_beg += 1
ld_i_beg += 1
ld_i_end = ld_i_beg
while text_lines[ld_i_end].find("end long description") < 0:
ld_i_end += 1
ld_text = "\n".join(text_lines[ld_i_beg:ld_i_end])
return ld_text | Reads part of the README.rst for use as long_description in setup(). | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/setup.py#L9-L25 | null | #!/usr/bin/env python
from setuptools import setup, find_packages
import sys
import os
import os.path
import subprocess
class CalledProcessError(subprocess.CalledProcessError):
"""
Adds :samp:`output` attribute to :obj:`subprocess.CalledProcessError`.
"""
def __init__(self, *args, **kwargs):
"""
Adds :samp:`output` attribute to :samp:`self` if it doesn't exist.
"""
subprocess.CalledProcessError.__init__(self, *args, **kwargs)
if not hasattr(self, "output"):
self.output = None
def create_git_describe():
try:
cmd = ["git", "describe"]
p = \
subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
p.wait()
if p.returncode != 0:
e = \
CalledProcessError(
returncode=p.returncode,
cmd=cmd
)
setattr(e, "output", " ".join([i.decode() for i in p.communicate()]))
raise e
# Write the git describe to text file
open(os.path.join("array_split", "git_describe.txt"), "wt").write(
p.communicate()[0].decode()
)
except (Exception,) as e:
# Try and make up a git-describe like string.
output = ""
if hasattr(e, "output"):
output = e.output
print("Problem with '%s': %s: %s" % (" ".join(cmd), e, output))
version_str = open(os.path.join("array_split", "version.txt"), "rt").read().strip()
if ("TRAVIS_TAG" in os.environ.keys()) and (len(os.environ["TRAVIS_TAG"]) > 0):
version_str = os.environ["TRAVIS_TAG"]
else:
if ("TRAVIS_BRANCH" in os.environ.keys()) and (len(os.environ["TRAVIS_BRANCH"]) > 0):
version_str += os.environ["TRAVIS_BRANCH"]
if ("TRAVIS_COMMIT" in os.environ.keys()) and (len(os.environ["TRAVIS_COMMIT"]) > 0):
version_str += "-" + \
os.environ["TRAVIS_COMMIT"][0:min([7, len(os.environ["TRAVIS_COMMIT"])])]
open(os.path.join("array_split", "git_describe.txt"), "wt").write(version_str)
create_git_describe()
_long_description = read_readme()
sphinx_requires = []
# Only require sphinx for CI and readthedocs.org.
if (
(os.environ.get('READTHEDOCS', None) is not None)
or
(os.environ.get('CI', None) is not None)
or
(os.environ.get('TRAVIS', None) is not None)
or
(os.environ.get('APPVEYOR', None) is not None)
):
sphinx_requires = ["sphinx>=1.4,<1.6", "sphinx_rtd_theme", ]
if (
(int(sys.version_info[0]) < 2)
or
((int(sys.version_info[0]) == 2) and (int(sys.version_info[1]) <= 6))
or
((int(sys.version_info[0]) == 3) and (int(sys.version_info[1]) <= 3))
):
sphinx_requires = []
setup(
name="array_split",
version=open(os.path.join("array_split", "version.txt"), "rt").read().strip(),
packages=find_packages(),
# metadata for upload to PyPI
author="Shane J. Latham",
author_email="array.split@gmail.com",
description=(
"Python package for splitting arrays into sub-arrays "
+
"(i.e. rectangular-tiling and rectangular-domain-decomposition), "
+
"similar to ``numpy.array_split``."
),
long_description=_long_description,
license="MIT",
keywords=(
"multi-dimendional-array array sub-array tile tiling splitting split partition"
+
"partitioning scipy numpy ndarray domain-decomposition array-decomposition"
),
url="http://github.com/array-split/array_split", # project home page
classifiers=[
# How mature is this project? Common values are
# 2 - Pre-Alpha
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English',
],
install_requires=["numpy>=1.6", ] + sphinx_requires,
package_data={
"array_split": ["version.txt", "git_describe.txt", "copyright.txt", "license.txt"]
},
test_suite="array_split.tests",
# could also include download_url, etc.
)
|
array-split/array_split | array_split/split.py | pad_with_object | python | def pad_with_object(sequence, new_length, obj=None):
if len(sequence) < new_length:
sequence = \
list(sequence) + [obj, ] * (new_length - len(sequence))
elif len(sequence) > new_length:
raise ValueError(
"Got len(sequence)=%s which exceeds new_length=%s"
%
(len(sequence), new_length)
)
return sequence | Returns :samp:`sequence` :obj:`list` end-padded with :samp:`{obj}`
elements so that the length of the returned list equals :samp:`{new_length}`.
:type sequence: iterable
:param sequence: Return *listified* sequence which has been end-padded.
:type new_length: :obj:`int`
:param new_length: The length of the returned list.
:type obj: :obj:`object`
:param obj: Object used as padding elements.
:rtype: :obj:`list`
:return: A :obj:`list` of length :samp:`{new_length}`.
:raises ValueError: if :samp:`len({sequence}) > {new_length})`.
Example::
>>> pad_with_object([1, 2, 3], 5, obj=0)
[1, 2, 3, 0, 0]
>>> pad_with_object([1, 2, 3], 5, obj=None)
[1, 2, 3, None, None] | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L126-L159 | null | """
===================================
The :mod:`array_split.split` Module
===================================
.. currentmodule:: array_split.split
Defines array splitting functions and classes.
Classes and Functions
=====================
.. autosummary::
:toctree: generated/
shape_factors - Compute *largest* factors of a given integer.
calculate_num_slices_per_axis - Computes per-axis divisions for a multi-dimensional shape.
calculate_tile_shape_for_max_bytes - Calculate a tile shape subject to max bytes restriction.
convert_halo_to_array_form - converts halo argument to :samp:`(ndim, 2)` shaped array.
ShapeSplitter - Splits a given shape into slices.
shape_split - Splits a specified shape and returns :obj:`numpy.ndarray` of :obj:`slice` elements.
array_split - Equivalent to :func:`numpy.array_split`.
Attributes
==========
.. autodata:: ARRAY_BOUNDS
.. autodata:: NO_BOUNDS
Utilities
=========
.. autosummary::
:toctree: generated/
is_scalar - Return :samp:`True` if argument is numeric scalar.
is_sequence - Return :samp:`True` if argument is a sequence.
is_indices - Return :samp:`True` if argument is a sequence.
pad_with_object - End pads a sequence with specified object.
pad_with_none - End pads a sequence with :samp:`None` elements.
"""
from __future__ import absolute_import
import numpy as _np
from .license import license as _license, copyright as _copyright, version as _version
from . import logging as _logging
__copyright__ = _copyright()
__version__ = _version()
__author__ = "Shane J. Latham"
__license__ = _license()
def is_scalar(obj):
"""
Returns :samp:`True` if argument :samp:`{obj}` is
a numeric type.
:type obj: :obj:`object`
:param obj: Return :samp:`True` if this is a scalar.
:rtype: :obj:`bool`
:return: :samp:`True` if :samp:`{obj}` is a numeric scalar.
Example::
>>> is_scalar(5)
True
>>> is_scalar(2.0)
True
>>> import numpy as np
>>> is_scalar(np.ones((10,), dtype="uint16")[0])
True
>>> is_scalar([1, 2, 3])
False
>>> is_scalar([i for i in range(0, 3)])
False
"""
return hasattr(obj, "__int__") or hasattr(obj, "__long__")
def is_sequence(obj):
"""
Returns :samp:`True` if argument :samp:`{obj}` is
a sequence (e.g. a :obj:`list` or :obj:`tuple`, etc).
:type obj: :obj:`object`
:param obj: Return :samp:`True` if this is a sequence.
:rtype: :obj:`bool`
:return: :samp:`True` if :samp:`{obj}` is a sequence.
Example::
>>> is_sequence([1, 2, 3])
True
>>> is_sequence([i for i in range(0, 3)])
True
>>> is_sequence(5)
False
"""
return (
hasattr(obj, "__len__")
or
hasattr(obj, "__getitem__")
or
hasattr(obj, "__iter__")
)
def is_indices(indices_or_sections):
"""
Test for the :samp:`{indices_or_sections}` argument of :meth:`ShapeSplitter.__init__`
to determine whether it is specifying *total number of tiles* or sequence of
*cut* indices. Returns :samp:`True` if argument :samp:`{indices_or_sections}` is
a sequence (e.g. a :obj:`list` or :obj:`tuple`, etc).
:type indices_or_sections: :obj:`object`
:param indices_or_sections: Return :samp:`True` if this is a sequence.
:rtype: :obj:`bool`
:return: :samp:`is_sequence({indices_or_sections})`.
"""
return is_sequence(indices_or_sections)
def pad_with_none(sequence, new_length):
"""
Returns :samp:`sequence` :obj:`list` end-padded with :samp:`None`
elements so that the length of the returned list equals :samp:`{new_length}`.
:type sequence: iterable
:param sequence: Return *listified* sequence which has been end-padded.
:type new_length: :obj:`int`
:param new_length: The length of the returned list.
:rtype: :obj:`list`
:return: A :obj:`list` of length :samp:`{new_length}`.
:raises ValueError: if :samp:`len({sequence}) > {new_length})`.
"""
return pad_with_object(sequence, new_length, obj=None)
def shape_factors(n, dim=2):
"""
Returns a :obj:`numpy.ndarray` of factors :samp:`f` such
that :samp:`(len(f) == {dim}) and (numpy.product(f) == {n})`.
The returned factors are as *square* (*cubic*, etc) as possible.
For example::
>>> shape_factors(24, 1)
array([24])
>>> shape_factors(24, 2)
array([4, 6])
>>> shape_factors(24, 3)
array([2, 3, 4])
>>> shape_factors(24, 4)
array([2, 2, 2, 3])
>>> shape_factors(24, 5)
array([1, 2, 2, 2, 3])
>>> shape_factors(24, 6)
array([1, 1, 2, 2, 2, 3])
:type n: :obj:`int`
:param n: Integer which is factored into :samp:`{dim}` factors.
:type dim: :obj:`int`
:param dim: Number of factors.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({dim},)` shaped array of integers which are factors of :samp:`{n}`.
"""
if dim <= 1:
factors = [n, ]
else:
for f in range(int(n ** (1.0 / float(dim))) + 1, 0, -1):
if (n % f) == 0:
factors = [f, ] + list(shape_factors(n // f, dim=dim - 1))
break
factors.sort()
return _np.array(factors)
def calculate_tile_shape_for_max_bytes(
array_shape,
array_itemsize,
max_tile_bytes,
max_tile_shape=None,
sub_tile_shape=None,
halo=None
):
"""
Returns a tile shape :samp:`tile_shape`
such that :samp:`numpy.product(tile_shape)*numpy.sum({array_itemsize}) <= {max_tile_bytes}`.
Also, if :samp:`{max_tile_shape} is not None`
then :samp:`numpy.all(tile_shape <= {max_tile_shape}) is True` and
if :samp:`{sub_tile_shape} is not None`
the :samp:`numpy.all((tile_shape % {sub_tile_shape}) == 0) is True`.
:type array_shape: sequence of :obj:`int`
:param array_shape: Shape of the array which is to be split into tiles.
:type array_itemsize: :obj:`int`
:param array_itemsize: The number of bytes per element of the array to be tiled.
:type max_tile_bytes: :obj:`int`
:param max_tile_bytes: The maximum number of bytes for the returned :samp:`tile_shape`.
:type max_tile_shape: sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the returned :samp:`tile_shape`.
:type sub_tile_shape: sequence of :obj:`int`
:param sub_tile_shape: The returned :samp:`tile_shape` will be an even multiple
of this sub-tile shape.
:type halo: :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended in each axis direction with *halo*
elements. See :ref:`the-halo-parameter-examples` for meaning of :samp:`{halo}` values.
:rtype: :obj:`numpy.ndarray`
:return: A 1D array of shape :samp:`(len(array_shape),)` indicating a *tile shape*
which will (approximately) uniformly divide the given :samp:`{array_shape}` into
tiles (sub-arrays).
Examples::
>>> from array_split.split import calculate_tile_shape_for_max_bytes
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512
... )
array([512])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=2, # Doubling the itemsize halves the tile size.
... max_tile_bytes=512
... )
array([256])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512-1 # tile shape will now be halved
... )
array([256])
"""
logger = _logging.getLogger(__name__ + ".calculate_tile_shape_for_max_bytes")
logger.debug("calculate_tile_shape_for_max_bytes: enter:")
logger.debug("array_shape=%s", array_shape)
logger.debug("array_itemsize=%s", array_itemsize)
logger.debug("max_tile_bytes=%s", max_tile_bytes)
logger.debug("max_tile_shape=%s", max_tile_shape)
logger.debug("sub_tile_shape=%s", sub_tile_shape)
logger.debug("halo=%s", halo)
array_shape = _np.array(array_shape, dtype="int64")
array_itemsize = _np.sum(array_itemsize, dtype="int64")
if max_tile_shape is None:
max_tile_shape = _np.array(array_shape, copy=True)
max_tile_shape = \
_np.array(_np.minimum(max_tile_shape, array_shape), copy=True, dtype=array_shape.dtype)
if sub_tile_shape is None:
sub_tile_shape = _np.ones((len(array_shape),), dtype="int64")
sub_tile_shape = _np.array(sub_tile_shape, dtype="int64")
halo = convert_halo_to_array_form(halo=halo, ndim=len(array_shape))
if _np.any(array_shape < sub_tile_shape):
raise ValueError(
"Got array_shape=%s element less than corresponding sub_tile_shape=%s element."
%
(
array_shape,
sub_tile_shape
)
)
logger.debug("max_tile_shape=%s", max_tile_shape)
logger.debug("sub_tile_shape=%s", sub_tile_shape)
logger.debug("halo=%s", halo)
array_sub_tile_split_shape = ((array_shape - 1) // sub_tile_shape) + 1
tile_sub_tile_split_shape = array_shape // sub_tile_shape
if len(tile_sub_tile_split_shape) <= 1:
tile_sub_tile_split_shape[0] = \
int(_np.floor(
(
(max_tile_bytes / float(array_itemsize))
-
_np.sum(halo)
)
/
float(sub_tile_shape[0])
))
tile_sub_tile_split_shape = \
_np.minimum(
tile_sub_tile_split_shape,
max_tile_shape // sub_tile_shape
)
logger.debug("Pre loop: tile_sub_tile_split_shape=%s", tile_sub_tile_split_shape)
current_axis = 0
while (
(current_axis < len(tile_sub_tile_split_shape))
and
(
(
_np.product(tile_sub_tile_split_shape * sub_tile_shape + _np.sum(halo, axis=1))
*
array_itemsize
)
>
max_tile_bytes
)
):
if current_axis < (len(tile_sub_tile_split_shape) - 1):
tile_sub_tile_split_shape[current_axis] = 1
tile_sub_tile_split_shape[current_axis] = \
(
max_tile_bytes
//
(
_np.product(
tile_sub_tile_split_shape *
sub_tile_shape +
_np.sum(
halo,
axis=1))
*
array_itemsize
)
)
tile_sub_tile_split_shape[current_axis] = \
max([1, tile_sub_tile_split_shape[current_axis]])
else:
sub_tile_shape_h = sub_tile_shape.copy()
sub_tile_shape_h[0:current_axis] += _np.sum(halo[0:current_axis, :], axis=1)
tile_sub_tile_split_shape[current_axis] = \
int(_np.floor(
(
(max_tile_bytes / float(array_itemsize))
-
_np.sum(halo[current_axis]) * _np.product(sub_tile_shape_h[0:current_axis])
)
/
float(_np.product(sub_tile_shape_h))
))
current_axis += 1
logger.debug("Post loop: tile_sub_tile_split_shape=%s", tile_sub_tile_split_shape)
tile_shape = _np.minimum(array_shape, tile_sub_tile_split_shape * sub_tile_shape)
logger.debug("pre cannonicalise tile_shape=%s", tile_shape)
tile_split_shape = ((array_shape - 1) // tile_shape) + 1
logger.debug("tile_split_shape=%s", tile_split_shape)
tile_shape = (((array_sub_tile_split_shape - 1) // tile_split_shape) + 1) * sub_tile_shape
logger.debug("post cannonicalise tile_shape=%s", tile_shape)
return tile_shape
def calculate_num_slices_per_axis(num_slices_per_axis, num_slices, max_slices_per_axis=None):
"""
Returns a :obj:`numpy.ndarray` (:samp:`return_array` say) where non-positive elements of
the :samp:`{num_slices_per_axis}` sequence have been replaced with
positive integer values such that :samp:`numpy.product(return_array) == num_slices`
and::
numpy.all(
return_array[numpy.where(num_slices_per_axis <= 0)]
<=
max_slices_per_axis[numpy.where(num_slices_per_axis <= 0)]
) is True
:type num_slices_per_axis: sequence of :obj:`int`
:param num_slices_per_axis: Constraint for per-axis sub-divisions.
Non-positive elements indicate values to be replaced in the
returned array. Positive values are identical to the corresponding
element in the returned array.
:type num_slices: integer
:param num_slices: Indicates the number of slices (rectangular sub-arrays)
formed by performing sub-divisions per axis. The returned array :samp:`return_array`
has elements assigned such that :samp:`numpy.product(return_array) == {num_slices}`.
:type max_slices_per_axis: sequence of :obj:`int` (or :samp:`None`)
:param max_slices_per_axis: Constraint specifying maximum number of per-axis sub-divisions.
If :samp:`None` defaults to :samp:`numpy.array([numpy.inf,]*len({num_slices_per_axis}))`.
:rtype: :obj:`numpy.ndarray`
:return: An array :samp:`return_array`
such that :samp:`numpy.product(return_array) == num_slices`.
Examples::
>>> from array_split.split import calculate_num_slices_per_axis
>>>
>>> calculate_num_slices_per_axis([0, 0, 0], 16)
array([4, 2, 2])
>>> calculate_num_slices_per_axis([1, 0, 0], 16)
array([1, 4, 4])
>>> calculate_num_slices_per_axis([1, 0, 0], 16, [2, 2, 16])
array([1, 2, 8])
"""
logger = _logging.getLogger(__name__)
ret_array = _np.array(num_slices_per_axis, copy=True)
if max_slices_per_axis is None:
max_slices_per_axis = _np.array([_np.inf, ] * len(num_slices_per_axis))
max_slices_per_axis = _np.array(max_slices_per_axis)
if _np.any(max_slices_per_axis <= 0):
raise ValueError("Got non-positive value in max_slices_per_axis=%s" % max_slices_per_axis)
while _np.any(ret_array <= 0):
prd = _np.product(ret_array[_np.where(ret_array > 0)]) # returns 1 for zero-length array
if (num_slices < prd) or ((num_slices % prd) > 0):
raise ValueError(
(
"Unable to construct grid of num_slices=%s elements from "
+
"num_slices_per_axis=%s (with max_slices_per_axis=%s)"
)
%
(num_slices, num_slices_per_axis, max_slices_per_axis)
)
ridx = _np.where(ret_array <= 0)
f = shape_factors(num_slices // prd, ridx[0].shape[0])[::-1]
if _np.all(f < max_slices_per_axis[ridx]):
ret_array[ridx] = f
else:
for i in range(ridx[0].shape[0]):
if f[i] >= max_slices_per_axis[ridx[0][i]]:
ret_array[ridx[0][i]] = max_slices_per_axis[ridx[0][i]]
prd = _np.product(ret_array[_np.where(ret_array > 0)])
while (num_slices % prd) > 0:
ret_array[ridx[0][i]] -= 1
prd = _np.product(ret_array[_np.where(ret_array > 0)])
logger.debug(
"ridx=%s, f=%s, ret_array=%s, max_slices_per_axis=%s",
ridx, f, ret_array, max_slices_per_axis
)
return ret_array
_array_shape_param_doc =\
"""
:type array_shape: sequence of :obj:`int`
:param array_shape: The shape to be *split*.
"""
_array_start_param_doc =\
"""
:type array_start: :samp:`None` or sequence of :obj:`int`
:param array_start: The start index. Defaults to :samp:`[0,]*len(array_shape)`.
The array indexing extents are assumed to range from :samp:`{array_start}`
to :samp:`{array_start} + {array_shape}`.
See :ref:`the-array_start-parameter-examples` examples.
"""
_array_itemsize_param_doc =\
"""
:type array_itemsize: int or sequence of :obj:`int`
:param array_itemsize: Number of bytes per array element.
Only relevant when :samp:`{max_tile_bytes}` is specified.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
"""
_array_tile_bounds_policy_param_doc =\
"""
:type tile_bounds_policy: :obj:`str`
:param tile_bounds_policy: Specifies whether tiles can extend beyond the array boundaries.
Only relevant for halo values greater than one. If :samp:`{tile_bounds_policy}`
is :data:`ARRAY_BOUNDS`
then the calculated tiles will not extend beyond the array
extents :samp:`{array_start}` and :samp:`{array_start} + {array_shape}`.
If :samp:`{tile_bounds_policy}` is :data:`NO_BOUNDS`
then the returned tiles will extend beyond
the :samp:`{array_start}` and :samp:`{array_start} + {array_shape}` extend
for positive :samp:`{halo}` values. See :ref:`the-halo-parameter-examples` examples.
"""
_ShapeSplitter__init__params_doc =\
"""
:type indices_or_sections: :samp:`None`, :obj:`int` or sequence of :obj:`int`
:param indices_or_sections: If an integer, indicates the number of
elements in the calculated *split* array. If a sequence, indicates
the indices (per axis) at which the splits occur.
See :ref:`splitting-by-number-of-tiles-examples` examples.
:type axis: :samp:`None`, :obj:`int` or sequence of :obj:`int`
:param axis: If an integer, indicates the axis which is to be split.
If a sequence integers, indicates the number of slices per axis,
i.e. if :samp:`{axis} = [3, 5]` then axis :samp:`0` is cut into
3 slices and axis :samp:`1` is cut into 5 slices for a total
of 15 (:samp:`3*5`) rectangular slices in the returned :samp:`(3, 5)`
shaped split.
See :ref:`splitting-by-number-of-tiles-examples` examples
and :ref:`splitting-by-per-axis-split-indices-examples` examples.
%s%s
:type tile_shape: :samp:`None` or sequence of :obj:`int`
:param tile_shape: When not :samp:`None`, specifies explicit shape for tiles.
Should be same length as :samp:`{array_shape}`.
See :ref:`splitting-by-tile-shape-examples` examples.
:type max_tile_bytes: :samp:`None` or :obj:`int`
:param max_tile_bytes: The maximum number of bytes for calculated :samp:`tile_shape`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
:type max_tile_shape: :samp:`None` or sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the calculated :samp:`tile_shape`.
Only relevant when :samp:`{max_tile_bytes}` is specified. Should be same length
as :samp:`{array_shape}`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
:type sub_tile_shape: :samp:`None` or sequence of :obj:`int`
:param sub_tile_shape: When not :samp:`None`, the calculated :samp:`tile_shape` will
be an even multiple of this sub-tile shape. Only relevant when :samp:`{max_tile_bytes}`
is specified. Should be same length as :samp:`{array_shape}`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.%s%s
"""
_halo_param_doc =\
"""
:type halo: :samp:`None`, :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended per axis in -ve and +ve directions with *halo*
elements. See :ref:`the-halo-parameter-examples` examples.
"""
#: Indicates that tiles are always within the array bounds.
#: See :ref:`the-halo-parameter-examples` examples.
__ARRAY_BOUNDS = "array_bounds"
@property
def ARRAY_BOUNDS(): # pylint: disable=invalid-name
"""
Indicates that tiles are always within the array bounds,
resulting in tiles which have truncated halos.
See :ref:`the-halo-parameter-examples` examples.
"""
return __ARRAY_BOUNDS
#: Indicates that tiles may extend beyond the array bounds.
#: See :ref:`the-halo-parameter-examples` examples.
__NO_BOUNDS = "no_bounds"
@property
def NO_BOUNDS(): # pylint: disable=invalid-name
"""
Indicates that tiles may have halos which extend beyond the array bounds.
See :ref:`the-halo-parameter-examples` examples.
"""
return __NO_BOUNDS
def convert_halo_to_array_form(halo, ndim):
"""
Converts the :samp:`{halo}` argument to a :samp:`(ndim, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, an :samp:`{ndim}` length sequence
of :samp:`int` or :samp:`({ndim}, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`({ndim}, 2)` shaped array form.
:type ndim: :obj:`int`
:param ndim: Number of dimensions.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({ndim}, 2)` shaped array of :obj:`numpy.int64` elements.
Examples::
>>> convert_halo_to_array_form(halo=2, ndim=4)
array([[2, 2],
[2, 2],
[2, 2],
[2, 2]])
>>> convert_halo_to_array_form(halo=[0, 1, 2], ndim=3)
array([[0, 0],
[1, 1],
[2, 2]])
>>> convert_halo_to_array_form(halo=[[0, 1], [2, 3], [3, 4]], ndim=3)
array([[0, 1],
[2, 3],
[3, 4]])
"""
dtyp = _np.int64
if halo is None:
halo = _np.zeros((ndim, 2), dtype=dtyp)
elif is_scalar(halo):
halo = _np.zeros((ndim, 2), dtype=dtyp) + halo
elif (ndim == 1) and (_np.array(halo).shape == (2,)):
halo = _np.array([halo, ], copy=True, dtype=dtyp)
elif len(_np.array(halo).shape) == 1:
halo = _np.array([halo, halo], dtype=dtyp).T.copy()
else:
halo = _np.array(halo, copy=True, dtype=dtyp)
if halo.shape[0] != ndim:
raise ValueError(
"Got halo.shape=%s, expecting halo.shape=(%s, 2)"
%
(halo.shape, ndim)
)
return halo
class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
ShapeSplitter([0, ]).__init__.__func__.__doc__ = \
"""
Initialises parameters which define a split.
%s
%s
.. seealso:: :ref:`array_split-examples`
""" % (
_array_shape_param_doc,
(
_ShapeSplitter__init__params_doc
%
(
_array_start_param_doc,
"\n" + _array_itemsize_param_doc,
_halo_param_doc,
_array_tile_bounds_policy_param_doc,
)
)
)
def shape_split(array_shape, *args, **kwargs):
"To be replaced."
return \
ShapeSplitter(
array_shape,
*args,
**kwargs
).calculate_split()
shape_split.__doc__ =\
"""
Splits specified :samp:`{array_shape}` in tiles, returns array of :obj:`slice` tuples.
%s
%s
:rtype: :obj:`numpy.ndarray`
:return: Array of :obj:`tuple` objects. Each :obj:`tuple` element
is a :obj:`slice` object so that each :obj:`tuple` defines
a multi-dimensional slice of an array of shape :samp:`{array_shape}`.
.. seealso:: :func:`array_split.array_split`, :meth:`array_split.ShapeSplitter`,
:ref:`array_split-examples`
""" % (
_array_shape_param_doc,
(
_ShapeSplitter__init__params_doc
%
(
_array_start_param_doc,
"\n" + _array_itemsize_param_doc,
_halo_param_doc,
_array_tile_bounds_policy_param_doc,
)
)
)
def array_split(
ary,
indices_or_sections=None,
axis=None,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None
):
"To be replaced."
return [
ary[slyce]
for slyce in
shape_split(
array_shape=ary.shape,
indices_or_sections=indices_or_sections,
axis=axis,
array_start=None,
array_itemsize=ary.itemsize,
tile_shape=tile_shape,
max_tile_bytes=max_tile_bytes,
max_tile_shape=max_tile_shape,
sub_tile_shape=sub_tile_shape,
halo=halo,
tile_bounds_policy=ARRAY_BOUNDS
).flatten()
]
array_split.__doc__ =\
"""
Splits the specified array :samp:`{ary}` into sub-arrays, returns list of :obj:`numpy.ndarray`.
:type ary: :obj:`numpy.ndarray`
:param ary: Array which is split into sub-arrays.
%s
:rtype: :obj:`list`
:return: List of :obj:`numpy.ndarray` elements, where each element is
a *slice* from :samp:`{ary}` (potentially an empty slice).
.. seealso:: :func:`array_split.shape_split`, :meth:`array_split.ShapeSplitter`,
:ref:`array_split-examples`
""" % (
_ShapeSplitter__init__params_doc
%
(
"",
"",
_halo_param_doc.replace("len({array_shape})", "len({ary}.shape)"),
""
)
)
__all__ = [s for s in dir() if not s.startswith('_')]
|
array-split/array_split | array_split/split.py | shape_factors | python | def shape_factors(n, dim=2):
if dim <= 1:
factors = [n, ]
else:
for f in range(int(n ** (1.0 / float(dim))) + 1, 0, -1):
if (n % f) == 0:
factors = [f, ] + list(shape_factors(n // f, dim=dim - 1))
break
factors.sort()
return _np.array(factors) | Returns a :obj:`numpy.ndarray` of factors :samp:`f` such
that :samp:`(len(f) == {dim}) and (numpy.product(f) == {n})`.
The returned factors are as *square* (*cubic*, etc) as possible.
For example::
>>> shape_factors(24, 1)
array([24])
>>> shape_factors(24, 2)
array([4, 6])
>>> shape_factors(24, 3)
array([2, 3, 4])
>>> shape_factors(24, 4)
array([2, 2, 2, 3])
>>> shape_factors(24, 5)
array([1, 2, 2, 2, 3])
>>> shape_factors(24, 6)
array([1, 1, 2, 2, 2, 3])
:type n: :obj:`int`
:param n: Integer which is factored into :samp:`{dim}` factors.
:type dim: :obj:`int`
:param dim: Number of factors.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({dim},)` shaped array of integers which are factors of :samp:`{n}`. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L179-L215 | [
"def shape_factors(n, dim=2):\n \"\"\"\n Returns a :obj:`numpy.ndarray` of factors :samp:`f` such\n that :samp:`(len(f) == {dim}) and (numpy.product(f) == {n})`.\n The returned factors are as *square* (*cubic*, etc) as possible.\n For example::\n\n >>> shape_factors(24, 1)\n array([24])\n... | """
===================================
The :mod:`array_split.split` Module
===================================
.. currentmodule:: array_split.split
Defines array splitting functions and classes.
Classes and Functions
=====================
.. autosummary::
:toctree: generated/
shape_factors - Compute *largest* factors of a given integer.
calculate_num_slices_per_axis - Computes per-axis divisions for a multi-dimensional shape.
calculate_tile_shape_for_max_bytes - Calculate a tile shape subject to max bytes restriction.
convert_halo_to_array_form - converts halo argument to :samp:`(ndim, 2)` shaped array.
ShapeSplitter - Splits a given shape into slices.
shape_split - Splits a specified shape and returns :obj:`numpy.ndarray` of :obj:`slice` elements.
array_split - Equivalent to :func:`numpy.array_split`.
Attributes
==========
.. autodata:: ARRAY_BOUNDS
.. autodata:: NO_BOUNDS
Utilities
=========
.. autosummary::
:toctree: generated/
is_scalar - Return :samp:`True` if argument is numeric scalar.
is_sequence - Return :samp:`True` if argument is a sequence.
is_indices - Return :samp:`True` if argument is a sequence.
pad_with_object - End pads a sequence with specified object.
pad_with_none - End pads a sequence with :samp:`None` elements.
"""
from __future__ import absolute_import
import numpy as _np
from .license import license as _license, copyright as _copyright, version as _version
from . import logging as _logging
__copyright__ = _copyright()
__version__ = _version()
__author__ = "Shane J. Latham"
__license__ = _license()
def is_scalar(obj):
"""
Returns :samp:`True` if argument :samp:`{obj}` is
a numeric type.
:type obj: :obj:`object`
:param obj: Return :samp:`True` if this is a scalar.
:rtype: :obj:`bool`
:return: :samp:`True` if :samp:`{obj}` is a numeric scalar.
Example::
>>> is_scalar(5)
True
>>> is_scalar(2.0)
True
>>> import numpy as np
>>> is_scalar(np.ones((10,), dtype="uint16")[0])
True
>>> is_scalar([1, 2, 3])
False
>>> is_scalar([i for i in range(0, 3)])
False
"""
return hasattr(obj, "__int__") or hasattr(obj, "__long__")
def is_sequence(obj):
"""
Returns :samp:`True` if argument :samp:`{obj}` is
a sequence (e.g. a :obj:`list` or :obj:`tuple`, etc).
:type obj: :obj:`object`
:param obj: Return :samp:`True` if this is a sequence.
:rtype: :obj:`bool`
:return: :samp:`True` if :samp:`{obj}` is a sequence.
Example::
>>> is_sequence([1, 2, 3])
True
>>> is_sequence([i for i in range(0, 3)])
True
>>> is_sequence(5)
False
"""
return (
hasattr(obj, "__len__")
or
hasattr(obj, "__getitem__")
or
hasattr(obj, "__iter__")
)
def is_indices(indices_or_sections):
"""
Test for the :samp:`{indices_or_sections}` argument of :meth:`ShapeSplitter.__init__`
to determine whether it is specifying *total number of tiles* or sequence of
*cut* indices. Returns :samp:`True` if argument :samp:`{indices_or_sections}` is
a sequence (e.g. a :obj:`list` or :obj:`tuple`, etc).
:type indices_or_sections: :obj:`object`
:param indices_or_sections: Return :samp:`True` if this is a sequence.
:rtype: :obj:`bool`
:return: :samp:`is_sequence({indices_or_sections})`.
"""
return is_sequence(indices_or_sections)
def pad_with_object(sequence, new_length, obj=None):
"""
Returns :samp:`sequence` :obj:`list` end-padded with :samp:`{obj}`
elements so that the length of the returned list equals :samp:`{new_length}`.
:type sequence: iterable
:param sequence: Return *listified* sequence which has been end-padded.
:type new_length: :obj:`int`
:param new_length: The length of the returned list.
:type obj: :obj:`object`
:param obj: Object used as padding elements.
:rtype: :obj:`list`
:return: A :obj:`list` of length :samp:`{new_length}`.
:raises ValueError: if :samp:`len({sequence}) > {new_length})`.
Example::
>>> pad_with_object([1, 2, 3], 5, obj=0)
[1, 2, 3, 0, 0]
>>> pad_with_object([1, 2, 3], 5, obj=None)
[1, 2, 3, None, None]
"""
if len(sequence) < new_length:
sequence = \
list(sequence) + [obj, ] * (new_length - len(sequence))
elif len(sequence) > new_length:
raise ValueError(
"Got len(sequence)=%s which exceeds new_length=%s"
%
(len(sequence), new_length)
)
return sequence
def pad_with_none(sequence, new_length):
"""
Returns :samp:`sequence` :obj:`list` end-padded with :samp:`None`
elements so that the length of the returned list equals :samp:`{new_length}`.
:type sequence: iterable
:param sequence: Return *listified* sequence which has been end-padded.
:type new_length: :obj:`int`
:param new_length: The length of the returned list.
:rtype: :obj:`list`
:return: A :obj:`list` of length :samp:`{new_length}`.
:raises ValueError: if :samp:`len({sequence}) > {new_length})`.
"""
return pad_with_object(sequence, new_length, obj=None)
def calculate_tile_shape_for_max_bytes(
array_shape,
array_itemsize,
max_tile_bytes,
max_tile_shape=None,
sub_tile_shape=None,
halo=None
):
"""
Returns a tile shape :samp:`tile_shape`
such that :samp:`numpy.product(tile_shape)*numpy.sum({array_itemsize}) <= {max_tile_bytes}`.
Also, if :samp:`{max_tile_shape} is not None`
then :samp:`numpy.all(tile_shape <= {max_tile_shape}) is True` and
if :samp:`{sub_tile_shape} is not None`
the :samp:`numpy.all((tile_shape % {sub_tile_shape}) == 0) is True`.
:type array_shape: sequence of :obj:`int`
:param array_shape: Shape of the array which is to be split into tiles.
:type array_itemsize: :obj:`int`
:param array_itemsize: The number of bytes per element of the array to be tiled.
:type max_tile_bytes: :obj:`int`
:param max_tile_bytes: The maximum number of bytes for the returned :samp:`tile_shape`.
:type max_tile_shape: sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the returned :samp:`tile_shape`.
:type sub_tile_shape: sequence of :obj:`int`
:param sub_tile_shape: The returned :samp:`tile_shape` will be an even multiple
of this sub-tile shape.
:type halo: :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended in each axis direction with *halo*
elements. See :ref:`the-halo-parameter-examples` for meaning of :samp:`{halo}` values.
:rtype: :obj:`numpy.ndarray`
:return: A 1D array of shape :samp:`(len(array_shape),)` indicating a *tile shape*
which will (approximately) uniformly divide the given :samp:`{array_shape}` into
tiles (sub-arrays).
Examples::
>>> from array_split.split import calculate_tile_shape_for_max_bytes
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512
... )
array([512])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=2, # Doubling the itemsize halves the tile size.
... max_tile_bytes=512
... )
array([256])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512-1 # tile shape will now be halved
... )
array([256])
"""
logger = _logging.getLogger(__name__ + ".calculate_tile_shape_for_max_bytes")
logger.debug("calculate_tile_shape_for_max_bytes: enter:")
logger.debug("array_shape=%s", array_shape)
logger.debug("array_itemsize=%s", array_itemsize)
logger.debug("max_tile_bytes=%s", max_tile_bytes)
logger.debug("max_tile_shape=%s", max_tile_shape)
logger.debug("sub_tile_shape=%s", sub_tile_shape)
logger.debug("halo=%s", halo)
array_shape = _np.array(array_shape, dtype="int64")
array_itemsize = _np.sum(array_itemsize, dtype="int64")
if max_tile_shape is None:
max_tile_shape = _np.array(array_shape, copy=True)
max_tile_shape = \
_np.array(_np.minimum(max_tile_shape, array_shape), copy=True, dtype=array_shape.dtype)
if sub_tile_shape is None:
sub_tile_shape = _np.ones((len(array_shape),), dtype="int64")
sub_tile_shape = _np.array(sub_tile_shape, dtype="int64")
halo = convert_halo_to_array_form(halo=halo, ndim=len(array_shape))
if _np.any(array_shape < sub_tile_shape):
raise ValueError(
"Got array_shape=%s element less than corresponding sub_tile_shape=%s element."
%
(
array_shape,
sub_tile_shape
)
)
logger.debug("max_tile_shape=%s", max_tile_shape)
logger.debug("sub_tile_shape=%s", sub_tile_shape)
logger.debug("halo=%s", halo)
array_sub_tile_split_shape = ((array_shape - 1) // sub_tile_shape) + 1
tile_sub_tile_split_shape = array_shape // sub_tile_shape
if len(tile_sub_tile_split_shape) <= 1:
tile_sub_tile_split_shape[0] = \
int(_np.floor(
(
(max_tile_bytes / float(array_itemsize))
-
_np.sum(halo)
)
/
float(sub_tile_shape[0])
))
tile_sub_tile_split_shape = \
_np.minimum(
tile_sub_tile_split_shape,
max_tile_shape // sub_tile_shape
)
logger.debug("Pre loop: tile_sub_tile_split_shape=%s", tile_sub_tile_split_shape)
current_axis = 0
while (
(current_axis < len(tile_sub_tile_split_shape))
and
(
(
_np.product(tile_sub_tile_split_shape * sub_tile_shape + _np.sum(halo, axis=1))
*
array_itemsize
)
>
max_tile_bytes
)
):
if current_axis < (len(tile_sub_tile_split_shape) - 1):
tile_sub_tile_split_shape[current_axis] = 1
tile_sub_tile_split_shape[current_axis] = \
(
max_tile_bytes
//
(
_np.product(
tile_sub_tile_split_shape *
sub_tile_shape +
_np.sum(
halo,
axis=1))
*
array_itemsize
)
)
tile_sub_tile_split_shape[current_axis] = \
max([1, tile_sub_tile_split_shape[current_axis]])
else:
sub_tile_shape_h = sub_tile_shape.copy()
sub_tile_shape_h[0:current_axis] += _np.sum(halo[0:current_axis, :], axis=1)
tile_sub_tile_split_shape[current_axis] = \
int(_np.floor(
(
(max_tile_bytes / float(array_itemsize))
-
_np.sum(halo[current_axis]) * _np.product(sub_tile_shape_h[0:current_axis])
)
/
float(_np.product(sub_tile_shape_h))
))
current_axis += 1
logger.debug("Post loop: tile_sub_tile_split_shape=%s", tile_sub_tile_split_shape)
tile_shape = _np.minimum(array_shape, tile_sub_tile_split_shape * sub_tile_shape)
logger.debug("pre cannonicalise tile_shape=%s", tile_shape)
tile_split_shape = ((array_shape - 1) // tile_shape) + 1
logger.debug("tile_split_shape=%s", tile_split_shape)
tile_shape = (((array_sub_tile_split_shape - 1) // tile_split_shape) + 1) * sub_tile_shape
logger.debug("post cannonicalise tile_shape=%s", tile_shape)
return tile_shape
def calculate_num_slices_per_axis(num_slices_per_axis, num_slices, max_slices_per_axis=None):
"""
Returns a :obj:`numpy.ndarray` (:samp:`return_array` say) where non-positive elements of
the :samp:`{num_slices_per_axis}` sequence have been replaced with
positive integer values such that :samp:`numpy.product(return_array) == num_slices`
and::
numpy.all(
return_array[numpy.where(num_slices_per_axis <= 0)]
<=
max_slices_per_axis[numpy.where(num_slices_per_axis <= 0)]
) is True
:type num_slices_per_axis: sequence of :obj:`int`
:param num_slices_per_axis: Constraint for per-axis sub-divisions.
Non-positive elements indicate values to be replaced in the
returned array. Positive values are identical to the corresponding
element in the returned array.
:type num_slices: integer
:param num_slices: Indicates the number of slices (rectangular sub-arrays)
formed by performing sub-divisions per axis. The returned array :samp:`return_array`
has elements assigned such that :samp:`numpy.product(return_array) == {num_slices}`.
:type max_slices_per_axis: sequence of :obj:`int` (or :samp:`None`)
:param max_slices_per_axis: Constraint specifying maximum number of per-axis sub-divisions.
If :samp:`None` defaults to :samp:`numpy.array([numpy.inf,]*len({num_slices_per_axis}))`.
:rtype: :obj:`numpy.ndarray`
:return: An array :samp:`return_array`
such that :samp:`numpy.product(return_array) == num_slices`.
Examples::
>>> from array_split.split import calculate_num_slices_per_axis
>>>
>>> calculate_num_slices_per_axis([0, 0, 0], 16)
array([4, 2, 2])
>>> calculate_num_slices_per_axis([1, 0, 0], 16)
array([1, 4, 4])
>>> calculate_num_slices_per_axis([1, 0, 0], 16, [2, 2, 16])
array([1, 2, 8])
"""
logger = _logging.getLogger(__name__)
ret_array = _np.array(num_slices_per_axis, copy=True)
if max_slices_per_axis is None:
max_slices_per_axis = _np.array([_np.inf, ] * len(num_slices_per_axis))
max_slices_per_axis = _np.array(max_slices_per_axis)
if _np.any(max_slices_per_axis <= 0):
raise ValueError("Got non-positive value in max_slices_per_axis=%s" % max_slices_per_axis)
while _np.any(ret_array <= 0):
prd = _np.product(ret_array[_np.where(ret_array > 0)]) # returns 1 for zero-length array
if (num_slices < prd) or ((num_slices % prd) > 0):
raise ValueError(
(
"Unable to construct grid of num_slices=%s elements from "
+
"num_slices_per_axis=%s (with max_slices_per_axis=%s)"
)
%
(num_slices, num_slices_per_axis, max_slices_per_axis)
)
ridx = _np.where(ret_array <= 0)
f = shape_factors(num_slices // prd, ridx[0].shape[0])[::-1]
if _np.all(f < max_slices_per_axis[ridx]):
ret_array[ridx] = f
else:
for i in range(ridx[0].shape[0]):
if f[i] >= max_slices_per_axis[ridx[0][i]]:
ret_array[ridx[0][i]] = max_slices_per_axis[ridx[0][i]]
prd = _np.product(ret_array[_np.where(ret_array > 0)])
while (num_slices % prd) > 0:
ret_array[ridx[0][i]] -= 1
prd = _np.product(ret_array[_np.where(ret_array > 0)])
logger.debug(
"ridx=%s, f=%s, ret_array=%s, max_slices_per_axis=%s",
ridx, f, ret_array, max_slices_per_axis
)
return ret_array
_array_shape_param_doc =\
"""
:type array_shape: sequence of :obj:`int`
:param array_shape: The shape to be *split*.
"""
_array_start_param_doc =\
"""
:type array_start: :samp:`None` or sequence of :obj:`int`
:param array_start: The start index. Defaults to :samp:`[0,]*len(array_shape)`.
The array indexing extents are assumed to range from :samp:`{array_start}`
to :samp:`{array_start} + {array_shape}`.
See :ref:`the-array_start-parameter-examples` examples.
"""
_array_itemsize_param_doc =\
"""
:type array_itemsize: int or sequence of :obj:`int`
:param array_itemsize: Number of bytes per array element.
Only relevant when :samp:`{max_tile_bytes}` is specified.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
"""
_array_tile_bounds_policy_param_doc =\
"""
:type tile_bounds_policy: :obj:`str`
:param tile_bounds_policy: Specifies whether tiles can extend beyond the array boundaries.
Only relevant for halo values greater than one. If :samp:`{tile_bounds_policy}`
is :data:`ARRAY_BOUNDS`
then the calculated tiles will not extend beyond the array
extents :samp:`{array_start}` and :samp:`{array_start} + {array_shape}`.
If :samp:`{tile_bounds_policy}` is :data:`NO_BOUNDS`
then the returned tiles will extend beyond
the :samp:`{array_start}` and :samp:`{array_start} + {array_shape}` extend
for positive :samp:`{halo}` values. See :ref:`the-halo-parameter-examples` examples.
"""
_ShapeSplitter__init__params_doc =\
"""
:type indices_or_sections: :samp:`None`, :obj:`int` or sequence of :obj:`int`
:param indices_or_sections: If an integer, indicates the number of
elements in the calculated *split* array. If a sequence, indicates
the indices (per axis) at which the splits occur.
See :ref:`splitting-by-number-of-tiles-examples` examples.
:type axis: :samp:`None`, :obj:`int` or sequence of :obj:`int`
:param axis: If an integer, indicates the axis which is to be split.
If a sequence integers, indicates the number of slices per axis,
i.e. if :samp:`{axis} = [3, 5]` then axis :samp:`0` is cut into
3 slices and axis :samp:`1` is cut into 5 slices for a total
of 15 (:samp:`3*5`) rectangular slices in the returned :samp:`(3, 5)`
shaped split.
See :ref:`splitting-by-number-of-tiles-examples` examples
and :ref:`splitting-by-per-axis-split-indices-examples` examples.
%s%s
:type tile_shape: :samp:`None` or sequence of :obj:`int`
:param tile_shape: When not :samp:`None`, specifies explicit shape for tiles.
Should be same length as :samp:`{array_shape}`.
See :ref:`splitting-by-tile-shape-examples` examples.
:type max_tile_bytes: :samp:`None` or :obj:`int`
:param max_tile_bytes: The maximum number of bytes for calculated :samp:`tile_shape`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
:type max_tile_shape: :samp:`None` or sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the calculated :samp:`tile_shape`.
Only relevant when :samp:`{max_tile_bytes}` is specified. Should be same length
as :samp:`{array_shape}`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
:type sub_tile_shape: :samp:`None` or sequence of :obj:`int`
:param sub_tile_shape: When not :samp:`None`, the calculated :samp:`tile_shape` will
be an even multiple of this sub-tile shape. Only relevant when :samp:`{max_tile_bytes}`
is specified. Should be same length as :samp:`{array_shape}`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.%s%s
"""
_halo_param_doc =\
"""
:type halo: :samp:`None`, :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended per axis in -ve and +ve directions with *halo*
elements. See :ref:`the-halo-parameter-examples` examples.
"""
#: Indicates that tiles are always within the array bounds.
#: See :ref:`the-halo-parameter-examples` examples.
__ARRAY_BOUNDS = "array_bounds"
@property
def ARRAY_BOUNDS(): # pylint: disable=invalid-name
"""
Indicates that tiles are always within the array bounds,
resulting in tiles which have truncated halos.
See :ref:`the-halo-parameter-examples` examples.
"""
return __ARRAY_BOUNDS
#: Indicates that tiles may extend beyond the array bounds.
#: See :ref:`the-halo-parameter-examples` examples.
__NO_BOUNDS = "no_bounds"
@property
def NO_BOUNDS(): # pylint: disable=invalid-name
"""
Indicates that tiles may have halos which extend beyond the array bounds.
See :ref:`the-halo-parameter-examples` examples.
"""
return __NO_BOUNDS
def convert_halo_to_array_form(halo, ndim):
"""
Converts the :samp:`{halo}` argument to a :samp:`(ndim, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, an :samp:`{ndim}` length sequence
of :samp:`int` or :samp:`({ndim}, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`({ndim}, 2)` shaped array form.
:type ndim: :obj:`int`
:param ndim: Number of dimensions.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({ndim}, 2)` shaped array of :obj:`numpy.int64` elements.
Examples::
>>> convert_halo_to_array_form(halo=2, ndim=4)
array([[2, 2],
[2, 2],
[2, 2],
[2, 2]])
>>> convert_halo_to_array_form(halo=[0, 1, 2], ndim=3)
array([[0, 0],
[1, 1],
[2, 2]])
>>> convert_halo_to_array_form(halo=[[0, 1], [2, 3], [3, 4]], ndim=3)
array([[0, 1],
[2, 3],
[3, 4]])
"""
dtyp = _np.int64
if halo is None:
halo = _np.zeros((ndim, 2), dtype=dtyp)
elif is_scalar(halo):
halo = _np.zeros((ndim, 2), dtype=dtyp) + halo
elif (ndim == 1) and (_np.array(halo).shape == (2,)):
halo = _np.array([halo, ], copy=True, dtype=dtyp)
elif len(_np.array(halo).shape) == 1:
halo = _np.array([halo, halo], dtype=dtyp).T.copy()
else:
halo = _np.array(halo, copy=True, dtype=dtyp)
if halo.shape[0] != ndim:
raise ValueError(
"Got halo.shape=%s, expecting halo.shape=(%s, 2)"
%
(halo.shape, ndim)
)
return halo
class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
ShapeSplitter([0, ]).__init__.__func__.__doc__ = \
"""
Initialises parameters which define a split.
%s
%s
.. seealso:: :ref:`array_split-examples`
""" % (
_array_shape_param_doc,
(
_ShapeSplitter__init__params_doc
%
(
_array_start_param_doc,
"\n" + _array_itemsize_param_doc,
_halo_param_doc,
_array_tile_bounds_policy_param_doc,
)
)
)
def shape_split(array_shape, *args, **kwargs):
"To be replaced."
return \
ShapeSplitter(
array_shape,
*args,
**kwargs
).calculate_split()
shape_split.__doc__ =\
"""
Splits specified :samp:`{array_shape}` in tiles, returns array of :obj:`slice` tuples.
%s
%s
:rtype: :obj:`numpy.ndarray`
:return: Array of :obj:`tuple` objects. Each :obj:`tuple` element
is a :obj:`slice` object so that each :obj:`tuple` defines
a multi-dimensional slice of an array of shape :samp:`{array_shape}`.
.. seealso:: :func:`array_split.array_split`, :meth:`array_split.ShapeSplitter`,
:ref:`array_split-examples`
""" % (
_array_shape_param_doc,
(
_ShapeSplitter__init__params_doc
%
(
_array_start_param_doc,
"\n" + _array_itemsize_param_doc,
_halo_param_doc,
_array_tile_bounds_policy_param_doc,
)
)
)
def array_split(
ary,
indices_or_sections=None,
axis=None,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None
):
"To be replaced."
return [
ary[slyce]
for slyce in
shape_split(
array_shape=ary.shape,
indices_or_sections=indices_or_sections,
axis=axis,
array_start=None,
array_itemsize=ary.itemsize,
tile_shape=tile_shape,
max_tile_bytes=max_tile_bytes,
max_tile_shape=max_tile_shape,
sub_tile_shape=sub_tile_shape,
halo=halo,
tile_bounds_policy=ARRAY_BOUNDS
).flatten()
]
array_split.__doc__ =\
"""
Splits the specified array :samp:`{ary}` into sub-arrays, returns list of :obj:`numpy.ndarray`.
:type ary: :obj:`numpy.ndarray`
:param ary: Array which is split into sub-arrays.
%s
:rtype: :obj:`list`
:return: List of :obj:`numpy.ndarray` elements, where each element is
a *slice* from :samp:`{ary}` (potentially an empty slice).
.. seealso:: :func:`array_split.shape_split`, :meth:`array_split.ShapeSplitter`,
:ref:`array_split-examples`
""" % (
_ShapeSplitter__init__params_doc
%
(
"",
"",
_halo_param_doc.replace("len({array_shape})", "len({ary}.shape)"),
""
)
)
__all__ = [s for s in dir() if not s.startswith('_')]
|
array-split/array_split | array_split/split.py | calculate_tile_shape_for_max_bytes | python | def calculate_tile_shape_for_max_bytes(
array_shape,
array_itemsize,
max_tile_bytes,
max_tile_shape=None,
sub_tile_shape=None,
halo=None
):
logger = _logging.getLogger(__name__ + ".calculate_tile_shape_for_max_bytes")
logger.debug("calculate_tile_shape_for_max_bytes: enter:")
logger.debug("array_shape=%s", array_shape)
logger.debug("array_itemsize=%s", array_itemsize)
logger.debug("max_tile_bytes=%s", max_tile_bytes)
logger.debug("max_tile_shape=%s", max_tile_shape)
logger.debug("sub_tile_shape=%s", sub_tile_shape)
logger.debug("halo=%s", halo)
array_shape = _np.array(array_shape, dtype="int64")
array_itemsize = _np.sum(array_itemsize, dtype="int64")
if max_tile_shape is None:
max_tile_shape = _np.array(array_shape, copy=True)
max_tile_shape = \
_np.array(_np.minimum(max_tile_shape, array_shape), copy=True, dtype=array_shape.dtype)
if sub_tile_shape is None:
sub_tile_shape = _np.ones((len(array_shape),), dtype="int64")
sub_tile_shape = _np.array(sub_tile_shape, dtype="int64")
halo = convert_halo_to_array_form(halo=halo, ndim=len(array_shape))
if _np.any(array_shape < sub_tile_shape):
raise ValueError(
"Got array_shape=%s element less than corresponding sub_tile_shape=%s element."
%
(
array_shape,
sub_tile_shape
)
)
logger.debug("max_tile_shape=%s", max_tile_shape)
logger.debug("sub_tile_shape=%s", sub_tile_shape)
logger.debug("halo=%s", halo)
array_sub_tile_split_shape = ((array_shape - 1) // sub_tile_shape) + 1
tile_sub_tile_split_shape = array_shape // sub_tile_shape
if len(tile_sub_tile_split_shape) <= 1:
tile_sub_tile_split_shape[0] = \
int(_np.floor(
(
(max_tile_bytes / float(array_itemsize))
-
_np.sum(halo)
)
/
float(sub_tile_shape[0])
))
tile_sub_tile_split_shape = \
_np.minimum(
tile_sub_tile_split_shape,
max_tile_shape // sub_tile_shape
)
logger.debug("Pre loop: tile_sub_tile_split_shape=%s", tile_sub_tile_split_shape)
current_axis = 0
while (
(current_axis < len(tile_sub_tile_split_shape))
and
(
(
_np.product(tile_sub_tile_split_shape * sub_tile_shape + _np.sum(halo, axis=1))
*
array_itemsize
)
>
max_tile_bytes
)
):
if current_axis < (len(tile_sub_tile_split_shape) - 1):
tile_sub_tile_split_shape[current_axis] = 1
tile_sub_tile_split_shape[current_axis] = \
(
max_tile_bytes
//
(
_np.product(
tile_sub_tile_split_shape *
sub_tile_shape +
_np.sum(
halo,
axis=1))
*
array_itemsize
)
)
tile_sub_tile_split_shape[current_axis] = \
max([1, tile_sub_tile_split_shape[current_axis]])
else:
sub_tile_shape_h = sub_tile_shape.copy()
sub_tile_shape_h[0:current_axis] += _np.sum(halo[0:current_axis, :], axis=1)
tile_sub_tile_split_shape[current_axis] = \
int(_np.floor(
(
(max_tile_bytes / float(array_itemsize))
-
_np.sum(halo[current_axis]) * _np.product(sub_tile_shape_h[0:current_axis])
)
/
float(_np.product(sub_tile_shape_h))
))
current_axis += 1
logger.debug("Post loop: tile_sub_tile_split_shape=%s", tile_sub_tile_split_shape)
tile_shape = _np.minimum(array_shape, tile_sub_tile_split_shape * sub_tile_shape)
logger.debug("pre cannonicalise tile_shape=%s", tile_shape)
tile_split_shape = ((array_shape - 1) // tile_shape) + 1
logger.debug("tile_split_shape=%s", tile_split_shape)
tile_shape = (((array_sub_tile_split_shape - 1) // tile_split_shape) + 1) * sub_tile_shape
logger.debug("post cannonicalise tile_shape=%s", tile_shape)
return tile_shape | Returns a tile shape :samp:`tile_shape`
such that :samp:`numpy.product(tile_shape)*numpy.sum({array_itemsize}) <= {max_tile_bytes}`.
Also, if :samp:`{max_tile_shape} is not None`
then :samp:`numpy.all(tile_shape <= {max_tile_shape}) is True` and
if :samp:`{sub_tile_shape} is not None`
the :samp:`numpy.all((tile_shape % {sub_tile_shape}) == 0) is True`.
:type array_shape: sequence of :obj:`int`
:param array_shape: Shape of the array which is to be split into tiles.
:type array_itemsize: :obj:`int`
:param array_itemsize: The number of bytes per element of the array to be tiled.
:type max_tile_bytes: :obj:`int`
:param max_tile_bytes: The maximum number of bytes for the returned :samp:`tile_shape`.
:type max_tile_shape: sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the returned :samp:`tile_shape`.
:type sub_tile_shape: sequence of :obj:`int`
:param sub_tile_shape: The returned :samp:`tile_shape` will be an even multiple
of this sub-tile shape.
:type halo: :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended in each axis direction with *halo*
elements. See :ref:`the-halo-parameter-examples` for meaning of :samp:`{halo}` values.
:rtype: :obj:`numpy.ndarray`
:return: A 1D array of shape :samp:`(len(array_shape),)` indicating a *tile shape*
which will (approximately) uniformly divide the given :samp:`{array_shape}` into
tiles (sub-arrays).
Examples::
>>> from array_split.split import calculate_tile_shape_for_max_bytes
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512
... )
array([512])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=2, # Doubling the itemsize halves the tile size.
... max_tile_bytes=512
... )
array([256])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512-1 # tile shape will now be halved
... )
array([256]) | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L218-L395 | [
"def convert_halo_to_array_form(halo, ndim):\n \"\"\"\n Converts the :samp:`{halo}` argument to a :samp:`(ndim, 2)`\n shaped array.\n\n :type halo: :samp:`None`, :obj:`int`, an :samp:`{ndim}` length sequence\n of :samp:`int` or :samp:`({ndim}, 2)` shaped array\n of :samp:`int`\n :param ... | """
===================================
The :mod:`array_split.split` Module
===================================
.. currentmodule:: array_split.split
Defines array splitting functions and classes.
Classes and Functions
=====================
.. autosummary::
:toctree: generated/
shape_factors - Compute *largest* factors of a given integer.
calculate_num_slices_per_axis - Computes per-axis divisions for a multi-dimensional shape.
calculate_tile_shape_for_max_bytes - Calculate a tile shape subject to max bytes restriction.
convert_halo_to_array_form - converts halo argument to :samp:`(ndim, 2)` shaped array.
ShapeSplitter - Splits a given shape into slices.
shape_split - Splits a specified shape and returns :obj:`numpy.ndarray` of :obj:`slice` elements.
array_split - Equivalent to :func:`numpy.array_split`.
Attributes
==========
.. autodata:: ARRAY_BOUNDS
.. autodata:: NO_BOUNDS
Utilities
=========
.. autosummary::
:toctree: generated/
is_scalar - Return :samp:`True` if argument is numeric scalar.
is_sequence - Return :samp:`True` if argument is a sequence.
is_indices - Return :samp:`True` if argument is a sequence.
pad_with_object - End pads a sequence with specified object.
pad_with_none - End pads a sequence with :samp:`None` elements.
"""
from __future__ import absolute_import
import numpy as _np
from .license import license as _license, copyright as _copyright, version as _version
from . import logging as _logging
__copyright__ = _copyright()
__version__ = _version()
__author__ = "Shane J. Latham"
__license__ = _license()
def is_scalar(obj):
"""
Returns :samp:`True` if argument :samp:`{obj}` is
a numeric type.
:type obj: :obj:`object`
:param obj: Return :samp:`True` if this is a scalar.
:rtype: :obj:`bool`
:return: :samp:`True` if :samp:`{obj}` is a numeric scalar.
Example::
>>> is_scalar(5)
True
>>> is_scalar(2.0)
True
>>> import numpy as np
>>> is_scalar(np.ones((10,), dtype="uint16")[0])
True
>>> is_scalar([1, 2, 3])
False
>>> is_scalar([i for i in range(0, 3)])
False
"""
return hasattr(obj, "__int__") or hasattr(obj, "__long__")
def is_sequence(obj):
"""
Returns :samp:`True` if argument :samp:`{obj}` is
a sequence (e.g. a :obj:`list` or :obj:`tuple`, etc).
:type obj: :obj:`object`
:param obj: Return :samp:`True` if this is a sequence.
:rtype: :obj:`bool`
:return: :samp:`True` if :samp:`{obj}` is a sequence.
Example::
>>> is_sequence([1, 2, 3])
True
>>> is_sequence([i for i in range(0, 3)])
True
>>> is_sequence(5)
False
"""
return (
hasattr(obj, "__len__")
or
hasattr(obj, "__getitem__")
or
hasattr(obj, "__iter__")
)
def is_indices(indices_or_sections):
"""
Test for the :samp:`{indices_or_sections}` argument of :meth:`ShapeSplitter.__init__`
to determine whether it is specifying *total number of tiles* or sequence of
*cut* indices. Returns :samp:`True` if argument :samp:`{indices_or_sections}` is
a sequence (e.g. a :obj:`list` or :obj:`tuple`, etc).
:type indices_or_sections: :obj:`object`
:param indices_or_sections: Return :samp:`True` if this is a sequence.
:rtype: :obj:`bool`
:return: :samp:`is_sequence({indices_or_sections})`.
"""
return is_sequence(indices_or_sections)
def pad_with_object(sequence, new_length, obj=None):
"""
Returns :samp:`sequence` :obj:`list` end-padded with :samp:`{obj}`
elements so that the length of the returned list equals :samp:`{new_length}`.
:type sequence: iterable
:param sequence: Return *listified* sequence which has been end-padded.
:type new_length: :obj:`int`
:param new_length: The length of the returned list.
:type obj: :obj:`object`
:param obj: Object used as padding elements.
:rtype: :obj:`list`
:return: A :obj:`list` of length :samp:`{new_length}`.
:raises ValueError: if :samp:`len({sequence}) > {new_length})`.
Example::
>>> pad_with_object([1, 2, 3], 5, obj=0)
[1, 2, 3, 0, 0]
>>> pad_with_object([1, 2, 3], 5, obj=None)
[1, 2, 3, None, None]
"""
if len(sequence) < new_length:
sequence = \
list(sequence) + [obj, ] * (new_length - len(sequence))
elif len(sequence) > new_length:
raise ValueError(
"Got len(sequence)=%s which exceeds new_length=%s"
%
(len(sequence), new_length)
)
return sequence
def pad_with_none(sequence, new_length):
"""
Returns :samp:`sequence` :obj:`list` end-padded with :samp:`None`
elements so that the length of the returned list equals :samp:`{new_length}`.
:type sequence: iterable
:param sequence: Return *listified* sequence which has been end-padded.
:type new_length: :obj:`int`
:param new_length: The length of the returned list.
:rtype: :obj:`list`
:return: A :obj:`list` of length :samp:`{new_length}`.
:raises ValueError: if :samp:`len({sequence}) > {new_length})`.
"""
return pad_with_object(sequence, new_length, obj=None)
def shape_factors(n, dim=2):
"""
Returns a :obj:`numpy.ndarray` of factors :samp:`f` such
that :samp:`(len(f) == {dim}) and (numpy.product(f) == {n})`.
The returned factors are as *square* (*cubic*, etc) as possible.
For example::
>>> shape_factors(24, 1)
array([24])
>>> shape_factors(24, 2)
array([4, 6])
>>> shape_factors(24, 3)
array([2, 3, 4])
>>> shape_factors(24, 4)
array([2, 2, 2, 3])
>>> shape_factors(24, 5)
array([1, 2, 2, 2, 3])
>>> shape_factors(24, 6)
array([1, 1, 2, 2, 2, 3])
:type n: :obj:`int`
:param n: Integer which is factored into :samp:`{dim}` factors.
:type dim: :obj:`int`
:param dim: Number of factors.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({dim},)` shaped array of integers which are factors of :samp:`{n}`.
"""
if dim <= 1:
factors = [n, ]
else:
for f in range(int(n ** (1.0 / float(dim))) + 1, 0, -1):
if (n % f) == 0:
factors = [f, ] + list(shape_factors(n // f, dim=dim - 1))
break
factors.sort()
return _np.array(factors)
def calculate_num_slices_per_axis(num_slices_per_axis, num_slices, max_slices_per_axis=None):
"""
Returns a :obj:`numpy.ndarray` (:samp:`return_array` say) where non-positive elements of
the :samp:`{num_slices_per_axis}` sequence have been replaced with
positive integer values such that :samp:`numpy.product(return_array) == num_slices`
and::
numpy.all(
return_array[numpy.where(num_slices_per_axis <= 0)]
<=
max_slices_per_axis[numpy.where(num_slices_per_axis <= 0)]
) is True
:type num_slices_per_axis: sequence of :obj:`int`
:param num_slices_per_axis: Constraint for per-axis sub-divisions.
Non-positive elements indicate values to be replaced in the
returned array. Positive values are identical to the corresponding
element in the returned array.
:type num_slices: integer
:param num_slices: Indicates the number of slices (rectangular sub-arrays)
formed by performing sub-divisions per axis. The returned array :samp:`return_array`
has elements assigned such that :samp:`numpy.product(return_array) == {num_slices}`.
:type max_slices_per_axis: sequence of :obj:`int` (or :samp:`None`)
:param max_slices_per_axis: Constraint specifying maximum number of per-axis sub-divisions.
If :samp:`None` defaults to :samp:`numpy.array([numpy.inf,]*len({num_slices_per_axis}))`.
:rtype: :obj:`numpy.ndarray`
:return: An array :samp:`return_array`
such that :samp:`numpy.product(return_array) == num_slices`.
Examples::
>>> from array_split.split import calculate_num_slices_per_axis
>>>
>>> calculate_num_slices_per_axis([0, 0, 0], 16)
array([4, 2, 2])
>>> calculate_num_slices_per_axis([1, 0, 0], 16)
array([1, 4, 4])
>>> calculate_num_slices_per_axis([1, 0, 0], 16, [2, 2, 16])
array([1, 2, 8])
"""
logger = _logging.getLogger(__name__)
ret_array = _np.array(num_slices_per_axis, copy=True)
if max_slices_per_axis is None:
max_slices_per_axis = _np.array([_np.inf, ] * len(num_slices_per_axis))
max_slices_per_axis = _np.array(max_slices_per_axis)
if _np.any(max_slices_per_axis <= 0):
raise ValueError("Got non-positive value in max_slices_per_axis=%s" % max_slices_per_axis)
while _np.any(ret_array <= 0):
prd = _np.product(ret_array[_np.where(ret_array > 0)]) # returns 1 for zero-length array
if (num_slices < prd) or ((num_slices % prd) > 0):
raise ValueError(
(
"Unable to construct grid of num_slices=%s elements from "
+
"num_slices_per_axis=%s (with max_slices_per_axis=%s)"
)
%
(num_slices, num_slices_per_axis, max_slices_per_axis)
)
ridx = _np.where(ret_array <= 0)
f = shape_factors(num_slices // prd, ridx[0].shape[0])[::-1]
if _np.all(f < max_slices_per_axis[ridx]):
ret_array[ridx] = f
else:
for i in range(ridx[0].shape[0]):
if f[i] >= max_slices_per_axis[ridx[0][i]]:
ret_array[ridx[0][i]] = max_slices_per_axis[ridx[0][i]]
prd = _np.product(ret_array[_np.where(ret_array > 0)])
while (num_slices % prd) > 0:
ret_array[ridx[0][i]] -= 1
prd = _np.product(ret_array[_np.where(ret_array > 0)])
logger.debug(
"ridx=%s, f=%s, ret_array=%s, max_slices_per_axis=%s",
ridx, f, ret_array, max_slices_per_axis
)
return ret_array
_array_shape_param_doc =\
"""
:type array_shape: sequence of :obj:`int`
:param array_shape: The shape to be *split*.
"""
_array_start_param_doc =\
"""
:type array_start: :samp:`None` or sequence of :obj:`int`
:param array_start: The start index. Defaults to :samp:`[0,]*len(array_shape)`.
The array indexing extents are assumed to range from :samp:`{array_start}`
to :samp:`{array_start} + {array_shape}`.
See :ref:`the-array_start-parameter-examples` examples.
"""
_array_itemsize_param_doc =\
"""
:type array_itemsize: int or sequence of :obj:`int`
:param array_itemsize: Number of bytes per array element.
Only relevant when :samp:`{max_tile_bytes}` is specified.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
"""
_array_tile_bounds_policy_param_doc =\
"""
:type tile_bounds_policy: :obj:`str`
:param tile_bounds_policy: Specifies whether tiles can extend beyond the array boundaries.
Only relevant for halo values greater than one. If :samp:`{tile_bounds_policy}`
is :data:`ARRAY_BOUNDS`
then the calculated tiles will not extend beyond the array
extents :samp:`{array_start}` and :samp:`{array_start} + {array_shape}`.
If :samp:`{tile_bounds_policy}` is :data:`NO_BOUNDS`
then the returned tiles will extend beyond
the :samp:`{array_start}` and :samp:`{array_start} + {array_shape}` extend
for positive :samp:`{halo}` values. See :ref:`the-halo-parameter-examples` examples.
"""
_ShapeSplitter__init__params_doc =\
"""
:type indices_or_sections: :samp:`None`, :obj:`int` or sequence of :obj:`int`
:param indices_or_sections: If an integer, indicates the number of
elements in the calculated *split* array. If a sequence, indicates
the indices (per axis) at which the splits occur.
See :ref:`splitting-by-number-of-tiles-examples` examples.
:type axis: :samp:`None`, :obj:`int` or sequence of :obj:`int`
:param axis: If an integer, indicates the axis which is to be split.
If a sequence integers, indicates the number of slices per axis,
i.e. if :samp:`{axis} = [3, 5]` then axis :samp:`0` is cut into
3 slices and axis :samp:`1` is cut into 5 slices for a total
of 15 (:samp:`3*5`) rectangular slices in the returned :samp:`(3, 5)`
shaped split.
See :ref:`splitting-by-number-of-tiles-examples` examples
and :ref:`splitting-by-per-axis-split-indices-examples` examples.
%s%s
:type tile_shape: :samp:`None` or sequence of :obj:`int`
:param tile_shape: When not :samp:`None`, specifies explicit shape for tiles.
Should be same length as :samp:`{array_shape}`.
See :ref:`splitting-by-tile-shape-examples` examples.
:type max_tile_bytes: :samp:`None` or :obj:`int`
:param max_tile_bytes: The maximum number of bytes for calculated :samp:`tile_shape`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
:type max_tile_shape: :samp:`None` or sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the calculated :samp:`tile_shape`.
Only relevant when :samp:`{max_tile_bytes}` is specified. Should be same length
as :samp:`{array_shape}`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
:type sub_tile_shape: :samp:`None` or sequence of :obj:`int`
:param sub_tile_shape: When not :samp:`None`, the calculated :samp:`tile_shape` will
be an even multiple of this sub-tile shape. Only relevant when :samp:`{max_tile_bytes}`
is specified. Should be same length as :samp:`{array_shape}`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.%s%s
"""
_halo_param_doc =\
"""
:type halo: :samp:`None`, :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended per axis in -ve and +ve directions with *halo*
elements. See :ref:`the-halo-parameter-examples` examples.
"""
#: Indicates that tiles are always within the array bounds.
#: See :ref:`the-halo-parameter-examples` examples.
__ARRAY_BOUNDS = "array_bounds"
@property
def ARRAY_BOUNDS(): # pylint: disable=invalid-name
"""
Indicates that tiles are always within the array bounds,
resulting in tiles which have truncated halos.
See :ref:`the-halo-parameter-examples` examples.
"""
return __ARRAY_BOUNDS
#: Indicates that tiles may extend beyond the array bounds.
#: See :ref:`the-halo-parameter-examples` examples.
__NO_BOUNDS = "no_bounds"
@property
def NO_BOUNDS(): # pylint: disable=invalid-name
"""
Indicates that tiles may have halos which extend beyond the array bounds.
See :ref:`the-halo-parameter-examples` examples.
"""
return __NO_BOUNDS
def convert_halo_to_array_form(halo, ndim):
"""
Converts the :samp:`{halo}` argument to a :samp:`(ndim, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, an :samp:`{ndim}` length sequence
of :samp:`int` or :samp:`({ndim}, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`({ndim}, 2)` shaped array form.
:type ndim: :obj:`int`
:param ndim: Number of dimensions.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({ndim}, 2)` shaped array of :obj:`numpy.int64` elements.
Examples::
>>> convert_halo_to_array_form(halo=2, ndim=4)
array([[2, 2],
[2, 2],
[2, 2],
[2, 2]])
>>> convert_halo_to_array_form(halo=[0, 1, 2], ndim=3)
array([[0, 0],
[1, 1],
[2, 2]])
>>> convert_halo_to_array_form(halo=[[0, 1], [2, 3], [3, 4]], ndim=3)
array([[0, 1],
[2, 3],
[3, 4]])
"""
dtyp = _np.int64
if halo is None:
halo = _np.zeros((ndim, 2), dtype=dtyp)
elif is_scalar(halo):
halo = _np.zeros((ndim, 2), dtype=dtyp) + halo
elif (ndim == 1) and (_np.array(halo).shape == (2,)):
halo = _np.array([halo, ], copy=True, dtype=dtyp)
elif len(_np.array(halo).shape) == 1:
halo = _np.array([halo, halo], dtype=dtyp).T.copy()
else:
halo = _np.array(halo, copy=True, dtype=dtyp)
if halo.shape[0] != ndim:
raise ValueError(
"Got halo.shape=%s, expecting halo.shape=(%s, 2)"
%
(halo.shape, ndim)
)
return halo
class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
ShapeSplitter([0, ]).__init__.__func__.__doc__ = \
"""
Initialises parameters which define a split.
%s
%s
.. seealso:: :ref:`array_split-examples`
""" % (
_array_shape_param_doc,
(
_ShapeSplitter__init__params_doc
%
(
_array_start_param_doc,
"\n" + _array_itemsize_param_doc,
_halo_param_doc,
_array_tile_bounds_policy_param_doc,
)
)
)
def shape_split(array_shape, *args, **kwargs):
"To be replaced."
return \
ShapeSplitter(
array_shape,
*args,
**kwargs
).calculate_split()
shape_split.__doc__ =\
"""
Splits specified :samp:`{array_shape}` in tiles, returns array of :obj:`slice` tuples.
%s
%s
:rtype: :obj:`numpy.ndarray`
:return: Array of :obj:`tuple` objects. Each :obj:`tuple` element
is a :obj:`slice` object so that each :obj:`tuple` defines
a multi-dimensional slice of an array of shape :samp:`{array_shape}`.
.. seealso:: :func:`array_split.array_split`, :meth:`array_split.ShapeSplitter`,
:ref:`array_split-examples`
""" % (
_array_shape_param_doc,
(
_ShapeSplitter__init__params_doc
%
(
_array_start_param_doc,
"\n" + _array_itemsize_param_doc,
_halo_param_doc,
_array_tile_bounds_policy_param_doc,
)
)
)
def array_split(
ary,
indices_or_sections=None,
axis=None,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None
):
"To be replaced."
return [
ary[slyce]
for slyce in
shape_split(
array_shape=ary.shape,
indices_or_sections=indices_or_sections,
axis=axis,
array_start=None,
array_itemsize=ary.itemsize,
tile_shape=tile_shape,
max_tile_bytes=max_tile_bytes,
max_tile_shape=max_tile_shape,
sub_tile_shape=sub_tile_shape,
halo=halo,
tile_bounds_policy=ARRAY_BOUNDS
).flatten()
]
array_split.__doc__ =\
"""
Splits the specified array :samp:`{ary}` into sub-arrays, returns list of :obj:`numpy.ndarray`.
:type ary: :obj:`numpy.ndarray`
:param ary: Array which is split into sub-arrays.
%s
:rtype: :obj:`list`
:return: List of :obj:`numpy.ndarray` elements, where each element is
a *slice* from :samp:`{ary}` (potentially an empty slice).
.. seealso:: :func:`array_split.shape_split`, :meth:`array_split.ShapeSplitter`,
:ref:`array_split-examples`
""" % (
_ShapeSplitter__init__params_doc
%
(
"",
"",
_halo_param_doc.replace("len({array_shape})", "len({ary}.shape)"),
""
)
)
__all__ = [s for s in dir() if not s.startswith('_')]
|
array-split/array_split | array_split/split.py | calculate_num_slices_per_axis | python | def calculate_num_slices_per_axis(num_slices_per_axis, num_slices, max_slices_per_axis=None):
logger = _logging.getLogger(__name__)
ret_array = _np.array(num_slices_per_axis, copy=True)
if max_slices_per_axis is None:
max_slices_per_axis = _np.array([_np.inf, ] * len(num_slices_per_axis))
max_slices_per_axis = _np.array(max_slices_per_axis)
if _np.any(max_slices_per_axis <= 0):
raise ValueError("Got non-positive value in max_slices_per_axis=%s" % max_slices_per_axis)
while _np.any(ret_array <= 0):
prd = _np.product(ret_array[_np.where(ret_array > 0)]) # returns 1 for zero-length array
if (num_slices < prd) or ((num_slices % prd) > 0):
raise ValueError(
(
"Unable to construct grid of num_slices=%s elements from "
+
"num_slices_per_axis=%s (with max_slices_per_axis=%s)"
)
%
(num_slices, num_slices_per_axis, max_slices_per_axis)
)
ridx = _np.where(ret_array <= 0)
f = shape_factors(num_slices // prd, ridx[0].shape[0])[::-1]
if _np.all(f < max_slices_per_axis[ridx]):
ret_array[ridx] = f
else:
for i in range(ridx[0].shape[0]):
if f[i] >= max_slices_per_axis[ridx[0][i]]:
ret_array[ridx[0][i]] = max_slices_per_axis[ridx[0][i]]
prd = _np.product(ret_array[_np.where(ret_array > 0)])
while (num_slices % prd) > 0:
ret_array[ridx[0][i]] -= 1
prd = _np.product(ret_array[_np.where(ret_array > 0)])
logger.debug(
"ridx=%s, f=%s, ret_array=%s, max_slices_per_axis=%s",
ridx, f, ret_array, max_slices_per_axis
)
return ret_array | Returns a :obj:`numpy.ndarray` (:samp:`return_array` say) where non-positive elements of
the :samp:`{num_slices_per_axis}` sequence have been replaced with
positive integer values such that :samp:`numpy.product(return_array) == num_slices`
and::
numpy.all(
return_array[numpy.where(num_slices_per_axis <= 0)]
<=
max_slices_per_axis[numpy.where(num_slices_per_axis <= 0)]
) is True
:type num_slices_per_axis: sequence of :obj:`int`
:param num_slices_per_axis: Constraint for per-axis sub-divisions.
Non-positive elements indicate values to be replaced in the
returned array. Positive values are identical to the corresponding
element in the returned array.
:type num_slices: integer
:param num_slices: Indicates the number of slices (rectangular sub-arrays)
formed by performing sub-divisions per axis. The returned array :samp:`return_array`
has elements assigned such that :samp:`numpy.product(return_array) == {num_slices}`.
:type max_slices_per_axis: sequence of :obj:`int` (or :samp:`None`)
:param max_slices_per_axis: Constraint specifying maximum number of per-axis sub-divisions.
If :samp:`None` defaults to :samp:`numpy.array([numpy.inf,]*len({num_slices_per_axis}))`.
:rtype: :obj:`numpy.ndarray`
:return: An array :samp:`return_array`
such that :samp:`numpy.product(return_array) == num_slices`.
Examples::
>>> from array_split.split import calculate_num_slices_per_axis
>>>
>>> calculate_num_slices_per_axis([0, 0, 0], 16)
array([4, 2, 2])
>>> calculate_num_slices_per_axis([1, 0, 0], 16)
array([1, 4, 4])
>>> calculate_num_slices_per_axis([1, 0, 0], 16, [2, 2, 16])
array([1, 2, 8]) | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L398-L481 | null | """
===================================
The :mod:`array_split.split` Module
===================================
.. currentmodule:: array_split.split
Defines array splitting functions and classes.
Classes and Functions
=====================
.. autosummary::
:toctree: generated/
shape_factors - Compute *largest* factors of a given integer.
calculate_num_slices_per_axis - Computes per-axis divisions for a multi-dimensional shape.
calculate_tile_shape_for_max_bytes - Calculate a tile shape subject to max bytes restriction.
convert_halo_to_array_form - converts halo argument to :samp:`(ndim, 2)` shaped array.
ShapeSplitter - Splits a given shape into slices.
shape_split - Splits a specified shape and returns :obj:`numpy.ndarray` of :obj:`slice` elements.
array_split - Equivalent to :func:`numpy.array_split`.
Attributes
==========
.. autodata:: ARRAY_BOUNDS
.. autodata:: NO_BOUNDS
Utilities
=========
.. autosummary::
:toctree: generated/
is_scalar - Return :samp:`True` if argument is numeric scalar.
is_sequence - Return :samp:`True` if argument is a sequence.
is_indices - Return :samp:`True` if argument is a sequence.
pad_with_object - End pads a sequence with specified object.
pad_with_none - End pads a sequence with :samp:`None` elements.
"""
from __future__ import absolute_import
import numpy as _np
from .license import license as _license, copyright as _copyright, version as _version
from . import logging as _logging
__copyright__ = _copyright()
__version__ = _version()
__author__ = "Shane J. Latham"
__license__ = _license()
def is_scalar(obj):
"""
Returns :samp:`True` if argument :samp:`{obj}` is
a numeric type.
:type obj: :obj:`object`
:param obj: Return :samp:`True` if this is a scalar.
:rtype: :obj:`bool`
:return: :samp:`True` if :samp:`{obj}` is a numeric scalar.
Example::
>>> is_scalar(5)
True
>>> is_scalar(2.0)
True
>>> import numpy as np
>>> is_scalar(np.ones((10,), dtype="uint16")[0])
True
>>> is_scalar([1, 2, 3])
False
>>> is_scalar([i for i in range(0, 3)])
False
"""
return hasattr(obj, "__int__") or hasattr(obj, "__long__")
def is_sequence(obj):
"""
Returns :samp:`True` if argument :samp:`{obj}` is
a sequence (e.g. a :obj:`list` or :obj:`tuple`, etc).
:type obj: :obj:`object`
:param obj: Return :samp:`True` if this is a sequence.
:rtype: :obj:`bool`
:return: :samp:`True` if :samp:`{obj}` is a sequence.
Example::
>>> is_sequence([1, 2, 3])
True
>>> is_sequence([i for i in range(0, 3)])
True
>>> is_sequence(5)
False
"""
return (
hasattr(obj, "__len__")
or
hasattr(obj, "__getitem__")
or
hasattr(obj, "__iter__")
)
def is_indices(indices_or_sections):
"""
Test for the :samp:`{indices_or_sections}` argument of :meth:`ShapeSplitter.__init__`
to determine whether it is specifying *total number of tiles* or sequence of
*cut* indices. Returns :samp:`True` if argument :samp:`{indices_or_sections}` is
a sequence (e.g. a :obj:`list` or :obj:`tuple`, etc).
:type indices_or_sections: :obj:`object`
:param indices_or_sections: Return :samp:`True` if this is a sequence.
:rtype: :obj:`bool`
:return: :samp:`is_sequence({indices_or_sections})`.
"""
return is_sequence(indices_or_sections)
def pad_with_object(sequence, new_length, obj=None):
"""
Returns :samp:`sequence` :obj:`list` end-padded with :samp:`{obj}`
elements so that the length of the returned list equals :samp:`{new_length}`.
:type sequence: iterable
:param sequence: Return *listified* sequence which has been end-padded.
:type new_length: :obj:`int`
:param new_length: The length of the returned list.
:type obj: :obj:`object`
:param obj: Object used as padding elements.
:rtype: :obj:`list`
:return: A :obj:`list` of length :samp:`{new_length}`.
:raises ValueError: if :samp:`len({sequence}) > {new_length})`.
Example::
>>> pad_with_object([1, 2, 3], 5, obj=0)
[1, 2, 3, 0, 0]
>>> pad_with_object([1, 2, 3], 5, obj=None)
[1, 2, 3, None, None]
"""
if len(sequence) < new_length:
sequence = \
list(sequence) + [obj, ] * (new_length - len(sequence))
elif len(sequence) > new_length:
raise ValueError(
"Got len(sequence)=%s which exceeds new_length=%s"
%
(len(sequence), new_length)
)
return sequence
def pad_with_none(sequence, new_length):
"""
Returns :samp:`sequence` :obj:`list` end-padded with :samp:`None`
elements so that the length of the returned list equals :samp:`{new_length}`.
:type sequence: iterable
:param sequence: Return *listified* sequence which has been end-padded.
:type new_length: :obj:`int`
:param new_length: The length of the returned list.
:rtype: :obj:`list`
:return: A :obj:`list` of length :samp:`{new_length}`.
:raises ValueError: if :samp:`len({sequence}) > {new_length})`.
"""
return pad_with_object(sequence, new_length, obj=None)
def shape_factors(n, dim=2):
"""
Returns a :obj:`numpy.ndarray` of factors :samp:`f` such
that :samp:`(len(f) == {dim}) and (numpy.product(f) == {n})`.
The returned factors are as *square* (*cubic*, etc) as possible.
For example::
>>> shape_factors(24, 1)
array([24])
>>> shape_factors(24, 2)
array([4, 6])
>>> shape_factors(24, 3)
array([2, 3, 4])
>>> shape_factors(24, 4)
array([2, 2, 2, 3])
>>> shape_factors(24, 5)
array([1, 2, 2, 2, 3])
>>> shape_factors(24, 6)
array([1, 1, 2, 2, 2, 3])
:type n: :obj:`int`
:param n: Integer which is factored into :samp:`{dim}` factors.
:type dim: :obj:`int`
:param dim: Number of factors.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({dim},)` shaped array of integers which are factors of :samp:`{n}`.
"""
if dim <= 1:
factors = [n, ]
else:
for f in range(int(n ** (1.0 / float(dim))) + 1, 0, -1):
if (n % f) == 0:
factors = [f, ] + list(shape_factors(n // f, dim=dim - 1))
break
factors.sort()
return _np.array(factors)
def calculate_tile_shape_for_max_bytes(
array_shape,
array_itemsize,
max_tile_bytes,
max_tile_shape=None,
sub_tile_shape=None,
halo=None
):
"""
Returns a tile shape :samp:`tile_shape`
such that :samp:`numpy.product(tile_shape)*numpy.sum({array_itemsize}) <= {max_tile_bytes}`.
Also, if :samp:`{max_tile_shape} is not None`
then :samp:`numpy.all(tile_shape <= {max_tile_shape}) is True` and
if :samp:`{sub_tile_shape} is not None`
the :samp:`numpy.all((tile_shape % {sub_tile_shape}) == 0) is True`.
:type array_shape: sequence of :obj:`int`
:param array_shape: Shape of the array which is to be split into tiles.
:type array_itemsize: :obj:`int`
:param array_itemsize: The number of bytes per element of the array to be tiled.
:type max_tile_bytes: :obj:`int`
:param max_tile_bytes: The maximum number of bytes for the returned :samp:`tile_shape`.
:type max_tile_shape: sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the returned :samp:`tile_shape`.
:type sub_tile_shape: sequence of :obj:`int`
:param sub_tile_shape: The returned :samp:`tile_shape` will be an even multiple
of this sub-tile shape.
:type halo: :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended in each axis direction with *halo*
elements. See :ref:`the-halo-parameter-examples` for meaning of :samp:`{halo}` values.
:rtype: :obj:`numpy.ndarray`
:return: A 1D array of shape :samp:`(len(array_shape),)` indicating a *tile shape*
which will (approximately) uniformly divide the given :samp:`{array_shape}` into
tiles (sub-arrays).
Examples::
>>> from array_split.split import calculate_tile_shape_for_max_bytes
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512
... )
array([512])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=2, # Doubling the itemsize halves the tile size.
... max_tile_bytes=512
... )
array([256])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512-1 # tile shape will now be halved
... )
array([256])
"""
logger = _logging.getLogger(__name__ + ".calculate_tile_shape_for_max_bytes")
logger.debug("calculate_tile_shape_for_max_bytes: enter:")
logger.debug("array_shape=%s", array_shape)
logger.debug("array_itemsize=%s", array_itemsize)
logger.debug("max_tile_bytes=%s", max_tile_bytes)
logger.debug("max_tile_shape=%s", max_tile_shape)
logger.debug("sub_tile_shape=%s", sub_tile_shape)
logger.debug("halo=%s", halo)
array_shape = _np.array(array_shape, dtype="int64")
array_itemsize = _np.sum(array_itemsize, dtype="int64")
if max_tile_shape is None:
max_tile_shape = _np.array(array_shape, copy=True)
max_tile_shape = \
_np.array(_np.minimum(max_tile_shape, array_shape), copy=True, dtype=array_shape.dtype)
if sub_tile_shape is None:
sub_tile_shape = _np.ones((len(array_shape),), dtype="int64")
sub_tile_shape = _np.array(sub_tile_shape, dtype="int64")
halo = convert_halo_to_array_form(halo=halo, ndim=len(array_shape))
if _np.any(array_shape < sub_tile_shape):
raise ValueError(
"Got array_shape=%s element less than corresponding sub_tile_shape=%s element."
%
(
array_shape,
sub_tile_shape
)
)
logger.debug("max_tile_shape=%s", max_tile_shape)
logger.debug("sub_tile_shape=%s", sub_tile_shape)
logger.debug("halo=%s", halo)
array_sub_tile_split_shape = ((array_shape - 1) // sub_tile_shape) + 1
tile_sub_tile_split_shape = array_shape // sub_tile_shape
if len(tile_sub_tile_split_shape) <= 1:
tile_sub_tile_split_shape[0] = \
int(_np.floor(
(
(max_tile_bytes / float(array_itemsize))
-
_np.sum(halo)
)
/
float(sub_tile_shape[0])
))
tile_sub_tile_split_shape = \
_np.minimum(
tile_sub_tile_split_shape,
max_tile_shape // sub_tile_shape
)
logger.debug("Pre loop: tile_sub_tile_split_shape=%s", tile_sub_tile_split_shape)
current_axis = 0
while (
(current_axis < len(tile_sub_tile_split_shape))
and
(
(
_np.product(tile_sub_tile_split_shape * sub_tile_shape + _np.sum(halo, axis=1))
*
array_itemsize
)
>
max_tile_bytes
)
):
if current_axis < (len(tile_sub_tile_split_shape) - 1):
tile_sub_tile_split_shape[current_axis] = 1
tile_sub_tile_split_shape[current_axis] = \
(
max_tile_bytes
//
(
_np.product(
tile_sub_tile_split_shape *
sub_tile_shape +
_np.sum(
halo,
axis=1))
*
array_itemsize
)
)
tile_sub_tile_split_shape[current_axis] = \
max([1, tile_sub_tile_split_shape[current_axis]])
else:
sub_tile_shape_h = sub_tile_shape.copy()
sub_tile_shape_h[0:current_axis] += _np.sum(halo[0:current_axis, :], axis=1)
tile_sub_tile_split_shape[current_axis] = \
int(_np.floor(
(
(max_tile_bytes / float(array_itemsize))
-
_np.sum(halo[current_axis]) * _np.product(sub_tile_shape_h[0:current_axis])
)
/
float(_np.product(sub_tile_shape_h))
))
current_axis += 1
logger.debug("Post loop: tile_sub_tile_split_shape=%s", tile_sub_tile_split_shape)
tile_shape = _np.minimum(array_shape, tile_sub_tile_split_shape * sub_tile_shape)
logger.debug("pre cannonicalise tile_shape=%s", tile_shape)
tile_split_shape = ((array_shape - 1) // tile_shape) + 1
logger.debug("tile_split_shape=%s", tile_split_shape)
tile_shape = (((array_sub_tile_split_shape - 1) // tile_split_shape) + 1) * sub_tile_shape
logger.debug("post cannonicalise tile_shape=%s", tile_shape)
return tile_shape
_array_shape_param_doc =\
"""
:type array_shape: sequence of :obj:`int`
:param array_shape: The shape to be *split*.
"""
_array_start_param_doc =\
"""
:type array_start: :samp:`None` or sequence of :obj:`int`
:param array_start: The start index. Defaults to :samp:`[0,]*len(array_shape)`.
The array indexing extents are assumed to range from :samp:`{array_start}`
to :samp:`{array_start} + {array_shape}`.
See :ref:`the-array_start-parameter-examples` examples.
"""
_array_itemsize_param_doc =\
"""
:type array_itemsize: int or sequence of :obj:`int`
:param array_itemsize: Number of bytes per array element.
Only relevant when :samp:`{max_tile_bytes}` is specified.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
"""
_array_tile_bounds_policy_param_doc =\
"""
:type tile_bounds_policy: :obj:`str`
:param tile_bounds_policy: Specifies whether tiles can extend beyond the array boundaries.
Only relevant for halo values greater than one. If :samp:`{tile_bounds_policy}`
is :data:`ARRAY_BOUNDS`
then the calculated tiles will not extend beyond the array
extents :samp:`{array_start}` and :samp:`{array_start} + {array_shape}`.
If :samp:`{tile_bounds_policy}` is :data:`NO_BOUNDS`
then the returned tiles will extend beyond
the :samp:`{array_start}` and :samp:`{array_start} + {array_shape}` extend
for positive :samp:`{halo}` values. See :ref:`the-halo-parameter-examples` examples.
"""
_ShapeSplitter__init__params_doc =\
"""
:type indices_or_sections: :samp:`None`, :obj:`int` or sequence of :obj:`int`
:param indices_or_sections: If an integer, indicates the number of
elements in the calculated *split* array. If a sequence, indicates
the indices (per axis) at which the splits occur.
See :ref:`splitting-by-number-of-tiles-examples` examples.
:type axis: :samp:`None`, :obj:`int` or sequence of :obj:`int`
:param axis: If an integer, indicates the axis which is to be split.
If a sequence integers, indicates the number of slices per axis,
i.e. if :samp:`{axis} = [3, 5]` then axis :samp:`0` is cut into
3 slices and axis :samp:`1` is cut into 5 slices for a total
of 15 (:samp:`3*5`) rectangular slices in the returned :samp:`(3, 5)`
shaped split.
See :ref:`splitting-by-number-of-tiles-examples` examples
and :ref:`splitting-by-per-axis-split-indices-examples` examples.
%s%s
:type tile_shape: :samp:`None` or sequence of :obj:`int`
:param tile_shape: When not :samp:`None`, specifies explicit shape for tiles.
Should be same length as :samp:`{array_shape}`.
See :ref:`splitting-by-tile-shape-examples` examples.
:type max_tile_bytes: :samp:`None` or :obj:`int`
:param max_tile_bytes: The maximum number of bytes for calculated :samp:`tile_shape`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
:type max_tile_shape: :samp:`None` or sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the calculated :samp:`tile_shape`.
Only relevant when :samp:`{max_tile_bytes}` is specified. Should be same length
as :samp:`{array_shape}`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
:type sub_tile_shape: :samp:`None` or sequence of :obj:`int`
:param sub_tile_shape: When not :samp:`None`, the calculated :samp:`tile_shape` will
be an even multiple of this sub-tile shape. Only relevant when :samp:`{max_tile_bytes}`
is specified. Should be same length as :samp:`{array_shape}`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.%s%s
"""
_halo_param_doc =\
"""
:type halo: :samp:`None`, :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended per axis in -ve and +ve directions with *halo*
elements. See :ref:`the-halo-parameter-examples` examples.
"""
#: Indicates that tiles are always within the array bounds.
#: See :ref:`the-halo-parameter-examples` examples.
__ARRAY_BOUNDS = "array_bounds"
@property
def ARRAY_BOUNDS(): # pylint: disable=invalid-name
"""
Indicates that tiles are always within the array bounds,
resulting in tiles which have truncated halos.
See :ref:`the-halo-parameter-examples` examples.
"""
return __ARRAY_BOUNDS
#: Indicates that tiles may extend beyond the array bounds.
#: See :ref:`the-halo-parameter-examples` examples.
__NO_BOUNDS = "no_bounds"
@property
def NO_BOUNDS(): # pylint: disable=invalid-name
"""
Indicates that tiles may have halos which extend beyond the array bounds.
See :ref:`the-halo-parameter-examples` examples.
"""
return __NO_BOUNDS
def convert_halo_to_array_form(halo, ndim):
"""
Converts the :samp:`{halo}` argument to a :samp:`(ndim, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, an :samp:`{ndim}` length sequence
of :samp:`int` or :samp:`({ndim}, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`({ndim}, 2)` shaped array form.
:type ndim: :obj:`int`
:param ndim: Number of dimensions.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({ndim}, 2)` shaped array of :obj:`numpy.int64` elements.
Examples::
>>> convert_halo_to_array_form(halo=2, ndim=4)
array([[2, 2],
[2, 2],
[2, 2],
[2, 2]])
>>> convert_halo_to_array_form(halo=[0, 1, 2], ndim=3)
array([[0, 0],
[1, 1],
[2, 2]])
>>> convert_halo_to_array_form(halo=[[0, 1], [2, 3], [3, 4]], ndim=3)
array([[0, 1],
[2, 3],
[3, 4]])
"""
dtyp = _np.int64
if halo is None:
halo = _np.zeros((ndim, 2), dtype=dtyp)
elif is_scalar(halo):
halo = _np.zeros((ndim, 2), dtype=dtyp) + halo
elif (ndim == 1) and (_np.array(halo).shape == (2,)):
halo = _np.array([halo, ], copy=True, dtype=dtyp)
elif len(_np.array(halo).shape) == 1:
halo = _np.array([halo, halo], dtype=dtyp).T.copy()
else:
halo = _np.array(halo, copy=True, dtype=dtyp)
if halo.shape[0] != ndim:
raise ValueError(
"Got halo.shape=%s, expecting halo.shape=(%s, 2)"
%
(halo.shape, ndim)
)
return halo
class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
ShapeSplitter([0, ]).__init__.__func__.__doc__ = \
"""
Initialises parameters which define a split.
%s
%s
.. seealso:: :ref:`array_split-examples`
""" % (
_array_shape_param_doc,
(
_ShapeSplitter__init__params_doc
%
(
_array_start_param_doc,
"\n" + _array_itemsize_param_doc,
_halo_param_doc,
_array_tile_bounds_policy_param_doc,
)
)
)
def shape_split(array_shape, *args, **kwargs):
"To be replaced."
return \
ShapeSplitter(
array_shape,
*args,
**kwargs
).calculate_split()
shape_split.__doc__ =\
"""
Splits specified :samp:`{array_shape}` in tiles, returns array of :obj:`slice` tuples.
%s
%s
:rtype: :obj:`numpy.ndarray`
:return: Array of :obj:`tuple` objects. Each :obj:`tuple` element
is a :obj:`slice` object so that each :obj:`tuple` defines
a multi-dimensional slice of an array of shape :samp:`{array_shape}`.
.. seealso:: :func:`array_split.array_split`, :meth:`array_split.ShapeSplitter`,
:ref:`array_split-examples`
""" % (
_array_shape_param_doc,
(
_ShapeSplitter__init__params_doc
%
(
_array_start_param_doc,
"\n" + _array_itemsize_param_doc,
_halo_param_doc,
_array_tile_bounds_policy_param_doc,
)
)
)
def array_split(
ary,
indices_or_sections=None,
axis=None,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None
):
"To be replaced."
return [
ary[slyce]
for slyce in
shape_split(
array_shape=ary.shape,
indices_or_sections=indices_or_sections,
axis=axis,
array_start=None,
array_itemsize=ary.itemsize,
tile_shape=tile_shape,
max_tile_bytes=max_tile_bytes,
max_tile_shape=max_tile_shape,
sub_tile_shape=sub_tile_shape,
halo=halo,
tile_bounds_policy=ARRAY_BOUNDS
).flatten()
]
array_split.__doc__ =\
"""
Splits the specified array :samp:`{ary}` into sub-arrays, returns list of :obj:`numpy.ndarray`.
:type ary: :obj:`numpy.ndarray`
:param ary: Array which is split into sub-arrays.
%s
:rtype: :obj:`list`
:return: List of :obj:`numpy.ndarray` elements, where each element is
a *slice* from :samp:`{ary}` (potentially an empty slice).
.. seealso:: :func:`array_split.shape_split`, :meth:`array_split.ShapeSplitter`,
:ref:`array_split-examples`
""" % (
_ShapeSplitter__init__params_doc
%
(
"",
"",
_halo_param_doc.replace("len({array_shape})", "len({ary}.shape)"),
""
)
)
__all__ = [s for s in dir() if not s.startswith('_')]
|
array-split/array_split | array_split/split.py | convert_halo_to_array_form | python | def convert_halo_to_array_form(halo, ndim):
dtyp = _np.int64
if halo is None:
halo = _np.zeros((ndim, 2), dtype=dtyp)
elif is_scalar(halo):
halo = _np.zeros((ndim, 2), dtype=dtyp) + halo
elif (ndim == 1) and (_np.array(halo).shape == (2,)):
halo = _np.array([halo, ], copy=True, dtype=dtyp)
elif len(_np.array(halo).shape) == 1:
halo = _np.array([halo, halo], dtype=dtyp).T.copy()
else:
halo = _np.array(halo, copy=True, dtype=dtyp)
if halo.shape[0] != ndim:
raise ValueError(
"Got halo.shape=%s, expecting halo.shape=(%s, 2)"
%
(halo.shape, ndim)
)
return halo | Converts the :samp:`{halo}` argument to a :samp:`(ndim, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, an :samp:`{ndim}` length sequence
of :samp:`int` or :samp:`({ndim}, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`({ndim}, 2)` shaped array form.
:type ndim: :obj:`int`
:param ndim: Number of dimensions.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({ndim}, 2)` shaped array of :obj:`numpy.int64` elements.
Examples::
>>> convert_halo_to_array_form(halo=2, ndim=4)
array([[2, 2],
[2, 2],
[2, 2],
[2, 2]])
>>> convert_halo_to_array_form(halo=[0, 1, 2], ndim=3)
array([[0, 0],
[1, 1],
[2, 2]])
>>> convert_halo_to_array_form(halo=[[0, 1], [2, 3], [3, 4]], ndim=3)
array([[0, 1],
[2, 3],
[3, 4]]) | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L592-L642 | [
"def is_scalar(obj):\n \"\"\"\n Returns :samp:`True` if argument :samp:`{obj}` is\n a numeric type.\n\n :type obj: :obj:`object`\n :param obj: Return :samp:`True` if this is a scalar.\n :rtype: :obj:`bool`\n :return: :samp:`True` if :samp:`{obj}` is a numeric scalar.\n\n Example::\n\n ... | """
===================================
The :mod:`array_split.split` Module
===================================
.. currentmodule:: array_split.split
Defines array splitting functions and classes.
Classes and Functions
=====================
.. autosummary::
:toctree: generated/
shape_factors - Compute *largest* factors of a given integer.
calculate_num_slices_per_axis - Computes per-axis divisions for a multi-dimensional shape.
calculate_tile_shape_for_max_bytes - Calculate a tile shape subject to max bytes restriction.
convert_halo_to_array_form - converts halo argument to :samp:`(ndim, 2)` shaped array.
ShapeSplitter - Splits a given shape into slices.
shape_split - Splits a specified shape and returns :obj:`numpy.ndarray` of :obj:`slice` elements.
array_split - Equivalent to :func:`numpy.array_split`.
Attributes
==========
.. autodata:: ARRAY_BOUNDS
.. autodata:: NO_BOUNDS
Utilities
=========
.. autosummary::
:toctree: generated/
is_scalar - Return :samp:`True` if argument is numeric scalar.
is_sequence - Return :samp:`True` if argument is a sequence.
is_indices - Return :samp:`True` if argument is a sequence.
pad_with_object - End pads a sequence with specified object.
pad_with_none - End pads a sequence with :samp:`None` elements.
"""
from __future__ import absolute_import
import numpy as _np
from .license import license as _license, copyright as _copyright, version as _version
from . import logging as _logging
__copyright__ = _copyright()
__version__ = _version()
__author__ = "Shane J. Latham"
__license__ = _license()
def is_scalar(obj):
"""
Returns :samp:`True` if argument :samp:`{obj}` is
a numeric type.
:type obj: :obj:`object`
:param obj: Return :samp:`True` if this is a scalar.
:rtype: :obj:`bool`
:return: :samp:`True` if :samp:`{obj}` is a numeric scalar.
Example::
>>> is_scalar(5)
True
>>> is_scalar(2.0)
True
>>> import numpy as np
>>> is_scalar(np.ones((10,), dtype="uint16")[0])
True
>>> is_scalar([1, 2, 3])
False
>>> is_scalar([i for i in range(0, 3)])
False
"""
return hasattr(obj, "__int__") or hasattr(obj, "__long__")
def is_sequence(obj):
"""
Returns :samp:`True` if argument :samp:`{obj}` is
a sequence (e.g. a :obj:`list` or :obj:`tuple`, etc).
:type obj: :obj:`object`
:param obj: Return :samp:`True` if this is a sequence.
:rtype: :obj:`bool`
:return: :samp:`True` if :samp:`{obj}` is a sequence.
Example::
>>> is_sequence([1, 2, 3])
True
>>> is_sequence([i for i in range(0, 3)])
True
>>> is_sequence(5)
False
"""
return (
hasattr(obj, "__len__")
or
hasattr(obj, "__getitem__")
or
hasattr(obj, "__iter__")
)
def is_indices(indices_or_sections):
"""
Test for the :samp:`{indices_or_sections}` argument of :meth:`ShapeSplitter.__init__`
to determine whether it is specifying *total number of tiles* or sequence of
*cut* indices. Returns :samp:`True` if argument :samp:`{indices_or_sections}` is
a sequence (e.g. a :obj:`list` or :obj:`tuple`, etc).
:type indices_or_sections: :obj:`object`
:param indices_or_sections: Return :samp:`True` if this is a sequence.
:rtype: :obj:`bool`
:return: :samp:`is_sequence({indices_or_sections})`.
"""
return is_sequence(indices_or_sections)
def pad_with_object(sequence, new_length, obj=None):
"""
Returns :samp:`sequence` :obj:`list` end-padded with :samp:`{obj}`
elements so that the length of the returned list equals :samp:`{new_length}`.
:type sequence: iterable
:param sequence: Return *listified* sequence which has been end-padded.
:type new_length: :obj:`int`
:param new_length: The length of the returned list.
:type obj: :obj:`object`
:param obj: Object used as padding elements.
:rtype: :obj:`list`
:return: A :obj:`list` of length :samp:`{new_length}`.
:raises ValueError: if :samp:`len({sequence}) > {new_length})`.
Example::
>>> pad_with_object([1, 2, 3], 5, obj=0)
[1, 2, 3, 0, 0]
>>> pad_with_object([1, 2, 3], 5, obj=None)
[1, 2, 3, None, None]
"""
if len(sequence) < new_length:
sequence = \
list(sequence) + [obj, ] * (new_length - len(sequence))
elif len(sequence) > new_length:
raise ValueError(
"Got len(sequence)=%s which exceeds new_length=%s"
%
(len(sequence), new_length)
)
return sequence
def pad_with_none(sequence, new_length):
"""
Returns :samp:`sequence` :obj:`list` end-padded with :samp:`None`
elements so that the length of the returned list equals :samp:`{new_length}`.
:type sequence: iterable
:param sequence: Return *listified* sequence which has been end-padded.
:type new_length: :obj:`int`
:param new_length: The length of the returned list.
:rtype: :obj:`list`
:return: A :obj:`list` of length :samp:`{new_length}`.
:raises ValueError: if :samp:`len({sequence}) > {new_length})`.
"""
return pad_with_object(sequence, new_length, obj=None)
def shape_factors(n, dim=2):
"""
Returns a :obj:`numpy.ndarray` of factors :samp:`f` such
that :samp:`(len(f) == {dim}) and (numpy.product(f) == {n})`.
The returned factors are as *square* (*cubic*, etc) as possible.
For example::
>>> shape_factors(24, 1)
array([24])
>>> shape_factors(24, 2)
array([4, 6])
>>> shape_factors(24, 3)
array([2, 3, 4])
>>> shape_factors(24, 4)
array([2, 2, 2, 3])
>>> shape_factors(24, 5)
array([1, 2, 2, 2, 3])
>>> shape_factors(24, 6)
array([1, 1, 2, 2, 2, 3])
:type n: :obj:`int`
:param n: Integer which is factored into :samp:`{dim}` factors.
:type dim: :obj:`int`
:param dim: Number of factors.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({dim},)` shaped array of integers which are factors of :samp:`{n}`.
"""
if dim <= 1:
factors = [n, ]
else:
for f in range(int(n ** (1.0 / float(dim))) + 1, 0, -1):
if (n % f) == 0:
factors = [f, ] + list(shape_factors(n // f, dim=dim - 1))
break
factors.sort()
return _np.array(factors)
def calculate_tile_shape_for_max_bytes(
array_shape,
array_itemsize,
max_tile_bytes,
max_tile_shape=None,
sub_tile_shape=None,
halo=None
):
"""
Returns a tile shape :samp:`tile_shape`
such that :samp:`numpy.product(tile_shape)*numpy.sum({array_itemsize}) <= {max_tile_bytes}`.
Also, if :samp:`{max_tile_shape} is not None`
then :samp:`numpy.all(tile_shape <= {max_tile_shape}) is True` and
if :samp:`{sub_tile_shape} is not None`
the :samp:`numpy.all((tile_shape % {sub_tile_shape}) == 0) is True`.
:type array_shape: sequence of :obj:`int`
:param array_shape: Shape of the array which is to be split into tiles.
:type array_itemsize: :obj:`int`
:param array_itemsize: The number of bytes per element of the array to be tiled.
:type max_tile_bytes: :obj:`int`
:param max_tile_bytes: The maximum number of bytes for the returned :samp:`tile_shape`.
:type max_tile_shape: sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the returned :samp:`tile_shape`.
:type sub_tile_shape: sequence of :obj:`int`
:param sub_tile_shape: The returned :samp:`tile_shape` will be an even multiple
of this sub-tile shape.
:type halo: :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended in each axis direction with *halo*
elements. See :ref:`the-halo-parameter-examples` for meaning of :samp:`{halo}` values.
:rtype: :obj:`numpy.ndarray`
:return: A 1D array of shape :samp:`(len(array_shape),)` indicating a *tile shape*
which will (approximately) uniformly divide the given :samp:`{array_shape}` into
tiles (sub-arrays).
Examples::
>>> from array_split.split import calculate_tile_shape_for_max_bytes
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512
... )
array([512])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=2, # Doubling the itemsize halves the tile size.
... max_tile_bytes=512
... )
array([256])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512-1 # tile shape will now be halved
... )
array([256])
"""
logger = _logging.getLogger(__name__ + ".calculate_tile_shape_for_max_bytes")
logger.debug("calculate_tile_shape_for_max_bytes: enter:")
logger.debug("array_shape=%s", array_shape)
logger.debug("array_itemsize=%s", array_itemsize)
logger.debug("max_tile_bytes=%s", max_tile_bytes)
logger.debug("max_tile_shape=%s", max_tile_shape)
logger.debug("sub_tile_shape=%s", sub_tile_shape)
logger.debug("halo=%s", halo)
array_shape = _np.array(array_shape, dtype="int64")
array_itemsize = _np.sum(array_itemsize, dtype="int64")
if max_tile_shape is None:
max_tile_shape = _np.array(array_shape, copy=True)
max_tile_shape = \
_np.array(_np.minimum(max_tile_shape, array_shape), copy=True, dtype=array_shape.dtype)
if sub_tile_shape is None:
sub_tile_shape = _np.ones((len(array_shape),), dtype="int64")
sub_tile_shape = _np.array(sub_tile_shape, dtype="int64")
halo = convert_halo_to_array_form(halo=halo, ndim=len(array_shape))
if _np.any(array_shape < sub_tile_shape):
raise ValueError(
"Got array_shape=%s element less than corresponding sub_tile_shape=%s element."
%
(
array_shape,
sub_tile_shape
)
)
logger.debug("max_tile_shape=%s", max_tile_shape)
logger.debug("sub_tile_shape=%s", sub_tile_shape)
logger.debug("halo=%s", halo)
array_sub_tile_split_shape = ((array_shape - 1) // sub_tile_shape) + 1
tile_sub_tile_split_shape = array_shape // sub_tile_shape
if len(tile_sub_tile_split_shape) <= 1:
tile_sub_tile_split_shape[0] = \
int(_np.floor(
(
(max_tile_bytes / float(array_itemsize))
-
_np.sum(halo)
)
/
float(sub_tile_shape[0])
))
tile_sub_tile_split_shape = \
_np.minimum(
tile_sub_tile_split_shape,
max_tile_shape // sub_tile_shape
)
logger.debug("Pre loop: tile_sub_tile_split_shape=%s", tile_sub_tile_split_shape)
current_axis = 0
while (
(current_axis < len(tile_sub_tile_split_shape))
and
(
(
_np.product(tile_sub_tile_split_shape * sub_tile_shape + _np.sum(halo, axis=1))
*
array_itemsize
)
>
max_tile_bytes
)
):
if current_axis < (len(tile_sub_tile_split_shape) - 1):
tile_sub_tile_split_shape[current_axis] = 1
tile_sub_tile_split_shape[current_axis] = \
(
max_tile_bytes
//
(
_np.product(
tile_sub_tile_split_shape *
sub_tile_shape +
_np.sum(
halo,
axis=1))
*
array_itemsize
)
)
tile_sub_tile_split_shape[current_axis] = \
max([1, tile_sub_tile_split_shape[current_axis]])
else:
sub_tile_shape_h = sub_tile_shape.copy()
sub_tile_shape_h[0:current_axis] += _np.sum(halo[0:current_axis, :], axis=1)
tile_sub_tile_split_shape[current_axis] = \
int(_np.floor(
(
(max_tile_bytes / float(array_itemsize))
-
_np.sum(halo[current_axis]) * _np.product(sub_tile_shape_h[0:current_axis])
)
/
float(_np.product(sub_tile_shape_h))
))
current_axis += 1
logger.debug("Post loop: tile_sub_tile_split_shape=%s", tile_sub_tile_split_shape)
tile_shape = _np.minimum(array_shape, tile_sub_tile_split_shape * sub_tile_shape)
logger.debug("pre cannonicalise tile_shape=%s", tile_shape)
tile_split_shape = ((array_shape - 1) // tile_shape) + 1
logger.debug("tile_split_shape=%s", tile_split_shape)
tile_shape = (((array_sub_tile_split_shape - 1) // tile_split_shape) + 1) * sub_tile_shape
logger.debug("post cannonicalise tile_shape=%s", tile_shape)
return tile_shape
def calculate_num_slices_per_axis(num_slices_per_axis, num_slices, max_slices_per_axis=None):
"""
Returns a :obj:`numpy.ndarray` (:samp:`return_array` say) where non-positive elements of
the :samp:`{num_slices_per_axis}` sequence have been replaced with
positive integer values such that :samp:`numpy.product(return_array) == num_slices`
and::
numpy.all(
return_array[numpy.where(num_slices_per_axis <= 0)]
<=
max_slices_per_axis[numpy.where(num_slices_per_axis <= 0)]
) is True
:type num_slices_per_axis: sequence of :obj:`int`
:param num_slices_per_axis: Constraint for per-axis sub-divisions.
Non-positive elements indicate values to be replaced in the
returned array. Positive values are identical to the corresponding
element in the returned array.
:type num_slices: integer
:param num_slices: Indicates the number of slices (rectangular sub-arrays)
formed by performing sub-divisions per axis. The returned array :samp:`return_array`
has elements assigned such that :samp:`numpy.product(return_array) == {num_slices}`.
:type max_slices_per_axis: sequence of :obj:`int` (or :samp:`None`)
:param max_slices_per_axis: Constraint specifying maximum number of per-axis sub-divisions.
If :samp:`None` defaults to :samp:`numpy.array([numpy.inf,]*len({num_slices_per_axis}))`.
:rtype: :obj:`numpy.ndarray`
:return: An array :samp:`return_array`
such that :samp:`numpy.product(return_array) == num_slices`.
Examples::
>>> from array_split.split import calculate_num_slices_per_axis
>>>
>>> calculate_num_slices_per_axis([0, 0, 0], 16)
array([4, 2, 2])
>>> calculate_num_slices_per_axis([1, 0, 0], 16)
array([1, 4, 4])
>>> calculate_num_slices_per_axis([1, 0, 0], 16, [2, 2, 16])
array([1, 2, 8])
"""
logger = _logging.getLogger(__name__)
ret_array = _np.array(num_slices_per_axis, copy=True)
if max_slices_per_axis is None:
max_slices_per_axis = _np.array([_np.inf, ] * len(num_slices_per_axis))
max_slices_per_axis = _np.array(max_slices_per_axis)
if _np.any(max_slices_per_axis <= 0):
raise ValueError("Got non-positive value in max_slices_per_axis=%s" % max_slices_per_axis)
while _np.any(ret_array <= 0):
prd = _np.product(ret_array[_np.where(ret_array > 0)]) # returns 1 for zero-length array
if (num_slices < prd) or ((num_slices % prd) > 0):
raise ValueError(
(
"Unable to construct grid of num_slices=%s elements from "
+
"num_slices_per_axis=%s (with max_slices_per_axis=%s)"
)
%
(num_slices, num_slices_per_axis, max_slices_per_axis)
)
ridx = _np.where(ret_array <= 0)
f = shape_factors(num_slices // prd, ridx[0].shape[0])[::-1]
if _np.all(f < max_slices_per_axis[ridx]):
ret_array[ridx] = f
else:
for i in range(ridx[0].shape[0]):
if f[i] >= max_slices_per_axis[ridx[0][i]]:
ret_array[ridx[0][i]] = max_slices_per_axis[ridx[0][i]]
prd = _np.product(ret_array[_np.where(ret_array > 0)])
while (num_slices % prd) > 0:
ret_array[ridx[0][i]] -= 1
prd = _np.product(ret_array[_np.where(ret_array > 0)])
logger.debug(
"ridx=%s, f=%s, ret_array=%s, max_slices_per_axis=%s",
ridx, f, ret_array, max_slices_per_axis
)
return ret_array
_array_shape_param_doc =\
"""
:type array_shape: sequence of :obj:`int`
:param array_shape: The shape to be *split*.
"""
_array_start_param_doc =\
"""
:type array_start: :samp:`None` or sequence of :obj:`int`
:param array_start: The start index. Defaults to :samp:`[0,]*len(array_shape)`.
The array indexing extents are assumed to range from :samp:`{array_start}`
to :samp:`{array_start} + {array_shape}`.
See :ref:`the-array_start-parameter-examples` examples.
"""
_array_itemsize_param_doc =\
"""
:type array_itemsize: int or sequence of :obj:`int`
:param array_itemsize: Number of bytes per array element.
Only relevant when :samp:`{max_tile_bytes}` is specified.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
"""
_array_tile_bounds_policy_param_doc =\
"""
:type tile_bounds_policy: :obj:`str`
:param tile_bounds_policy: Specifies whether tiles can extend beyond the array boundaries.
Only relevant for halo values greater than one. If :samp:`{tile_bounds_policy}`
is :data:`ARRAY_BOUNDS`
then the calculated tiles will not extend beyond the array
extents :samp:`{array_start}` and :samp:`{array_start} + {array_shape}`.
If :samp:`{tile_bounds_policy}` is :data:`NO_BOUNDS`
then the returned tiles will extend beyond
the :samp:`{array_start}` and :samp:`{array_start} + {array_shape}` extend
for positive :samp:`{halo}` values. See :ref:`the-halo-parameter-examples` examples.
"""
_ShapeSplitter__init__params_doc =\
"""
:type indices_or_sections: :samp:`None`, :obj:`int` or sequence of :obj:`int`
:param indices_or_sections: If an integer, indicates the number of
elements in the calculated *split* array. If a sequence, indicates
the indices (per axis) at which the splits occur.
See :ref:`splitting-by-number-of-tiles-examples` examples.
:type axis: :samp:`None`, :obj:`int` or sequence of :obj:`int`
:param axis: If an integer, indicates the axis which is to be split.
If a sequence integers, indicates the number of slices per axis,
i.e. if :samp:`{axis} = [3, 5]` then axis :samp:`0` is cut into
3 slices and axis :samp:`1` is cut into 5 slices for a total
of 15 (:samp:`3*5`) rectangular slices in the returned :samp:`(3, 5)`
shaped split.
See :ref:`splitting-by-number-of-tiles-examples` examples
and :ref:`splitting-by-per-axis-split-indices-examples` examples.
%s%s
:type tile_shape: :samp:`None` or sequence of :obj:`int`
:param tile_shape: When not :samp:`None`, specifies explicit shape for tiles.
Should be same length as :samp:`{array_shape}`.
See :ref:`splitting-by-tile-shape-examples` examples.
:type max_tile_bytes: :samp:`None` or :obj:`int`
:param max_tile_bytes: The maximum number of bytes for calculated :samp:`tile_shape`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
:type max_tile_shape: :samp:`None` or sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the calculated :samp:`tile_shape`.
Only relevant when :samp:`{max_tile_bytes}` is specified. Should be same length
as :samp:`{array_shape}`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
:type sub_tile_shape: :samp:`None` or sequence of :obj:`int`
:param sub_tile_shape: When not :samp:`None`, the calculated :samp:`tile_shape` will
be an even multiple of this sub-tile shape. Only relevant when :samp:`{max_tile_bytes}`
is specified. Should be same length as :samp:`{array_shape}`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.%s%s
"""
_halo_param_doc =\
"""
:type halo: :samp:`None`, :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended per axis in -ve and +ve directions with *halo*
elements. See :ref:`the-halo-parameter-examples` examples.
"""
#: Indicates that tiles are always within the array bounds.
#: See :ref:`the-halo-parameter-examples` examples.
__ARRAY_BOUNDS = "array_bounds"
@property
def ARRAY_BOUNDS(): # pylint: disable=invalid-name
"""
Indicates that tiles are always within the array bounds,
resulting in tiles which have truncated halos.
See :ref:`the-halo-parameter-examples` examples.
"""
return __ARRAY_BOUNDS
#: Indicates that tiles may extend beyond the array bounds.
#: See :ref:`the-halo-parameter-examples` examples.
__NO_BOUNDS = "no_bounds"
@property
def NO_BOUNDS(): # pylint: disable=invalid-name
"""
Indicates that tiles may have halos which extend beyond the array bounds.
See :ref:`the-halo-parameter-examples` examples.
"""
return __NO_BOUNDS
class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
ShapeSplitter([0, ]).__init__.__func__.__doc__ = \
"""
Initialises parameters which define a split.
%s
%s
.. seealso:: :ref:`array_split-examples`
""" % (
_array_shape_param_doc,
(
_ShapeSplitter__init__params_doc
%
(
_array_start_param_doc,
"\n" + _array_itemsize_param_doc,
_halo_param_doc,
_array_tile_bounds_policy_param_doc,
)
)
)
def shape_split(array_shape, *args, **kwargs):
"To be replaced."
return \
ShapeSplitter(
array_shape,
*args,
**kwargs
).calculate_split()
shape_split.__doc__ =\
"""
Splits specified :samp:`{array_shape}` in tiles, returns array of :obj:`slice` tuples.
%s
%s
:rtype: :obj:`numpy.ndarray`
:return: Array of :obj:`tuple` objects. Each :obj:`tuple` element
is a :obj:`slice` object so that each :obj:`tuple` defines
a multi-dimensional slice of an array of shape :samp:`{array_shape}`.
.. seealso:: :func:`array_split.array_split`, :meth:`array_split.ShapeSplitter`,
:ref:`array_split-examples`
""" % (
_array_shape_param_doc,
(
_ShapeSplitter__init__params_doc
%
(
_array_start_param_doc,
"\n" + _array_itemsize_param_doc,
_halo_param_doc,
_array_tile_bounds_policy_param_doc,
)
)
)
def array_split(
ary,
indices_or_sections=None,
axis=None,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None
):
"To be replaced."
return [
ary[slyce]
for slyce in
shape_split(
array_shape=ary.shape,
indices_or_sections=indices_or_sections,
axis=axis,
array_start=None,
array_itemsize=ary.itemsize,
tile_shape=tile_shape,
max_tile_bytes=max_tile_bytes,
max_tile_shape=max_tile_shape,
sub_tile_shape=sub_tile_shape,
halo=halo,
tile_bounds_policy=ARRAY_BOUNDS
).flatten()
]
array_split.__doc__ =\
"""
Splits the specified array :samp:`{ary}` into sub-arrays, returns list of :obj:`numpy.ndarray`.
:type ary: :obj:`numpy.ndarray`
:param ary: Array which is split into sub-arrays.
%s
:rtype: :obj:`list`
:return: List of :obj:`numpy.ndarray` elements, where each element is
a *slice* from :samp:`{ary}` (potentially an empty slice).
.. seealso:: :func:`array_split.shape_split`, :meth:`array_split.ShapeSplitter`,
:ref:`array_split-examples`
""" % (
_ShapeSplitter__init__params_doc
%
(
"",
"",
_halo_param_doc.replace("len({array_shape})", "len({ary}.shape)"),
""
)
)
__all__ = [s for s in dir() if not s.startswith('_')]
|
array-split/array_split | array_split/split.py | array_split | python | def array_split(
ary,
indices_or_sections=None,
axis=None,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None
):
"To be replaced."
return [
ary[slyce]
for slyce in
shape_split(
array_shape=ary.shape,
indices_or_sections=indices_or_sections,
axis=axis,
array_start=None,
array_itemsize=ary.itemsize,
tile_shape=tile_shape,
max_tile_bytes=max_tile_bytes,
max_tile_shape=max_tile_shape,
sub_tile_shape=sub_tile_shape,
halo=halo,
tile_bounds_policy=ARRAY_BOUNDS
).flatten()
] | To be replaced. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L1588-L1615 | [
"def shape_split(array_shape, *args, **kwargs):\n \"To be replaced.\"\n return \\\n ShapeSplitter(\n array_shape,\n *args,\n **kwargs\n ).calculate_split()\n"
] | """
===================================
The :mod:`array_split.split` Module
===================================
.. currentmodule:: array_split.split
Defines array splitting functions and classes.
Classes and Functions
=====================
.. autosummary::
:toctree: generated/
shape_factors - Compute *largest* factors of a given integer.
calculate_num_slices_per_axis - Computes per-axis divisions for a multi-dimensional shape.
calculate_tile_shape_for_max_bytes - Calculate a tile shape subject to max bytes restriction.
convert_halo_to_array_form - converts halo argument to :samp:`(ndim, 2)` shaped array.
ShapeSplitter - Splits a given shape into slices.
shape_split - Splits a specified shape and returns :obj:`numpy.ndarray` of :obj:`slice` elements.
array_split - Equivalent to :func:`numpy.array_split`.
Attributes
==========
.. autodata:: ARRAY_BOUNDS
.. autodata:: NO_BOUNDS
Utilities
=========
.. autosummary::
:toctree: generated/
is_scalar - Return :samp:`True` if argument is numeric scalar.
is_sequence - Return :samp:`True` if argument is a sequence.
is_indices - Return :samp:`True` if argument is a sequence.
pad_with_object - End pads a sequence with specified object.
pad_with_none - End pads a sequence with :samp:`None` elements.
"""
from __future__ import absolute_import
import numpy as _np
from .license import license as _license, copyright as _copyright, version as _version
from . import logging as _logging
__copyright__ = _copyright()
__version__ = _version()
__author__ = "Shane J. Latham"
__license__ = _license()
def is_scalar(obj):
"""
Returns :samp:`True` if argument :samp:`{obj}` is
a numeric type.
:type obj: :obj:`object`
:param obj: Return :samp:`True` if this is a scalar.
:rtype: :obj:`bool`
:return: :samp:`True` if :samp:`{obj}` is a numeric scalar.
Example::
>>> is_scalar(5)
True
>>> is_scalar(2.0)
True
>>> import numpy as np
>>> is_scalar(np.ones((10,), dtype="uint16")[0])
True
>>> is_scalar([1, 2, 3])
False
>>> is_scalar([i for i in range(0, 3)])
False
"""
return hasattr(obj, "__int__") or hasattr(obj, "__long__")
def is_sequence(obj):
"""
Returns :samp:`True` if argument :samp:`{obj}` is
a sequence (e.g. a :obj:`list` or :obj:`tuple`, etc).
:type obj: :obj:`object`
:param obj: Return :samp:`True` if this is a sequence.
:rtype: :obj:`bool`
:return: :samp:`True` if :samp:`{obj}` is a sequence.
Example::
>>> is_sequence([1, 2, 3])
True
>>> is_sequence([i for i in range(0, 3)])
True
>>> is_sequence(5)
False
"""
return (
hasattr(obj, "__len__")
or
hasattr(obj, "__getitem__")
or
hasattr(obj, "__iter__")
)
def is_indices(indices_or_sections):
"""
Test for the :samp:`{indices_or_sections}` argument of :meth:`ShapeSplitter.__init__`
to determine whether it is specifying *total number of tiles* or sequence of
*cut* indices. Returns :samp:`True` if argument :samp:`{indices_or_sections}` is
a sequence (e.g. a :obj:`list` or :obj:`tuple`, etc).
:type indices_or_sections: :obj:`object`
:param indices_or_sections: Return :samp:`True` if this is a sequence.
:rtype: :obj:`bool`
:return: :samp:`is_sequence({indices_or_sections})`.
"""
return is_sequence(indices_or_sections)
def pad_with_object(sequence, new_length, obj=None):
"""
Returns :samp:`sequence` :obj:`list` end-padded with :samp:`{obj}`
elements so that the length of the returned list equals :samp:`{new_length}`.
:type sequence: iterable
:param sequence: Return *listified* sequence which has been end-padded.
:type new_length: :obj:`int`
:param new_length: The length of the returned list.
:type obj: :obj:`object`
:param obj: Object used as padding elements.
:rtype: :obj:`list`
:return: A :obj:`list` of length :samp:`{new_length}`.
:raises ValueError: if :samp:`len({sequence}) > {new_length})`.
Example::
>>> pad_with_object([1, 2, 3], 5, obj=0)
[1, 2, 3, 0, 0]
>>> pad_with_object([1, 2, 3], 5, obj=None)
[1, 2, 3, None, None]
"""
if len(sequence) < new_length:
sequence = \
list(sequence) + [obj, ] * (new_length - len(sequence))
elif len(sequence) > new_length:
raise ValueError(
"Got len(sequence)=%s which exceeds new_length=%s"
%
(len(sequence), new_length)
)
return sequence
def pad_with_none(sequence, new_length):
"""
Returns :samp:`sequence` :obj:`list` end-padded with :samp:`None`
elements so that the length of the returned list equals :samp:`{new_length}`.
:type sequence: iterable
:param sequence: Return *listified* sequence which has been end-padded.
:type new_length: :obj:`int`
:param new_length: The length of the returned list.
:rtype: :obj:`list`
:return: A :obj:`list` of length :samp:`{new_length}`.
:raises ValueError: if :samp:`len({sequence}) > {new_length})`.
"""
return pad_with_object(sequence, new_length, obj=None)
def shape_factors(n, dim=2):
"""
Returns a :obj:`numpy.ndarray` of factors :samp:`f` such
that :samp:`(len(f) == {dim}) and (numpy.product(f) == {n})`.
The returned factors are as *square* (*cubic*, etc) as possible.
For example::
>>> shape_factors(24, 1)
array([24])
>>> shape_factors(24, 2)
array([4, 6])
>>> shape_factors(24, 3)
array([2, 3, 4])
>>> shape_factors(24, 4)
array([2, 2, 2, 3])
>>> shape_factors(24, 5)
array([1, 2, 2, 2, 3])
>>> shape_factors(24, 6)
array([1, 1, 2, 2, 2, 3])
:type n: :obj:`int`
:param n: Integer which is factored into :samp:`{dim}` factors.
:type dim: :obj:`int`
:param dim: Number of factors.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({dim},)` shaped array of integers which are factors of :samp:`{n}`.
"""
if dim <= 1:
factors = [n, ]
else:
for f in range(int(n ** (1.0 / float(dim))) + 1, 0, -1):
if (n % f) == 0:
factors = [f, ] + list(shape_factors(n // f, dim=dim - 1))
break
factors.sort()
return _np.array(factors)
def calculate_tile_shape_for_max_bytes(
array_shape,
array_itemsize,
max_tile_bytes,
max_tile_shape=None,
sub_tile_shape=None,
halo=None
):
"""
Returns a tile shape :samp:`tile_shape`
such that :samp:`numpy.product(tile_shape)*numpy.sum({array_itemsize}) <= {max_tile_bytes}`.
Also, if :samp:`{max_tile_shape} is not None`
then :samp:`numpy.all(tile_shape <= {max_tile_shape}) is True` and
if :samp:`{sub_tile_shape} is not None`
the :samp:`numpy.all((tile_shape % {sub_tile_shape}) == 0) is True`.
:type array_shape: sequence of :obj:`int`
:param array_shape: Shape of the array which is to be split into tiles.
:type array_itemsize: :obj:`int`
:param array_itemsize: The number of bytes per element of the array to be tiled.
:type max_tile_bytes: :obj:`int`
:param max_tile_bytes: The maximum number of bytes for the returned :samp:`tile_shape`.
:type max_tile_shape: sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the returned :samp:`tile_shape`.
:type sub_tile_shape: sequence of :obj:`int`
:param sub_tile_shape: The returned :samp:`tile_shape` will be an even multiple
of this sub-tile shape.
:type halo: :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended in each axis direction with *halo*
elements. See :ref:`the-halo-parameter-examples` for meaning of :samp:`{halo}` values.
:rtype: :obj:`numpy.ndarray`
:return: A 1D array of shape :samp:`(len(array_shape),)` indicating a *tile shape*
which will (approximately) uniformly divide the given :samp:`{array_shape}` into
tiles (sub-arrays).
Examples::
>>> from array_split.split import calculate_tile_shape_for_max_bytes
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512
... )
array([512])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=2, # Doubling the itemsize halves the tile size.
... max_tile_bytes=512
... )
array([256])
>>> calculate_tile_shape_for_max_bytes(
... array_shape=[512,],
... array_itemsize=1,
... max_tile_bytes=512-1 # tile shape will now be halved
... )
array([256])
"""
logger = _logging.getLogger(__name__ + ".calculate_tile_shape_for_max_bytes")
logger.debug("calculate_tile_shape_for_max_bytes: enter:")
logger.debug("array_shape=%s", array_shape)
logger.debug("array_itemsize=%s", array_itemsize)
logger.debug("max_tile_bytes=%s", max_tile_bytes)
logger.debug("max_tile_shape=%s", max_tile_shape)
logger.debug("sub_tile_shape=%s", sub_tile_shape)
logger.debug("halo=%s", halo)
array_shape = _np.array(array_shape, dtype="int64")
array_itemsize = _np.sum(array_itemsize, dtype="int64")
if max_tile_shape is None:
max_tile_shape = _np.array(array_shape, copy=True)
max_tile_shape = \
_np.array(_np.minimum(max_tile_shape, array_shape), copy=True, dtype=array_shape.dtype)
if sub_tile_shape is None:
sub_tile_shape = _np.ones((len(array_shape),), dtype="int64")
sub_tile_shape = _np.array(sub_tile_shape, dtype="int64")
halo = convert_halo_to_array_form(halo=halo, ndim=len(array_shape))
if _np.any(array_shape < sub_tile_shape):
raise ValueError(
"Got array_shape=%s element less than corresponding sub_tile_shape=%s element."
%
(
array_shape,
sub_tile_shape
)
)
logger.debug("max_tile_shape=%s", max_tile_shape)
logger.debug("sub_tile_shape=%s", sub_tile_shape)
logger.debug("halo=%s", halo)
array_sub_tile_split_shape = ((array_shape - 1) // sub_tile_shape) + 1
tile_sub_tile_split_shape = array_shape // sub_tile_shape
if len(tile_sub_tile_split_shape) <= 1:
tile_sub_tile_split_shape[0] = \
int(_np.floor(
(
(max_tile_bytes / float(array_itemsize))
-
_np.sum(halo)
)
/
float(sub_tile_shape[0])
))
tile_sub_tile_split_shape = \
_np.minimum(
tile_sub_tile_split_shape,
max_tile_shape // sub_tile_shape
)
logger.debug("Pre loop: tile_sub_tile_split_shape=%s", tile_sub_tile_split_shape)
current_axis = 0
while (
(current_axis < len(tile_sub_tile_split_shape))
and
(
(
_np.product(tile_sub_tile_split_shape * sub_tile_shape + _np.sum(halo, axis=1))
*
array_itemsize
)
>
max_tile_bytes
)
):
if current_axis < (len(tile_sub_tile_split_shape) - 1):
tile_sub_tile_split_shape[current_axis] = 1
tile_sub_tile_split_shape[current_axis] = \
(
max_tile_bytes
//
(
_np.product(
tile_sub_tile_split_shape *
sub_tile_shape +
_np.sum(
halo,
axis=1))
*
array_itemsize
)
)
tile_sub_tile_split_shape[current_axis] = \
max([1, tile_sub_tile_split_shape[current_axis]])
else:
sub_tile_shape_h = sub_tile_shape.copy()
sub_tile_shape_h[0:current_axis] += _np.sum(halo[0:current_axis, :], axis=1)
tile_sub_tile_split_shape[current_axis] = \
int(_np.floor(
(
(max_tile_bytes / float(array_itemsize))
-
_np.sum(halo[current_axis]) * _np.product(sub_tile_shape_h[0:current_axis])
)
/
float(_np.product(sub_tile_shape_h))
))
current_axis += 1
logger.debug("Post loop: tile_sub_tile_split_shape=%s", tile_sub_tile_split_shape)
tile_shape = _np.minimum(array_shape, tile_sub_tile_split_shape * sub_tile_shape)
logger.debug("pre cannonicalise tile_shape=%s", tile_shape)
tile_split_shape = ((array_shape - 1) // tile_shape) + 1
logger.debug("tile_split_shape=%s", tile_split_shape)
tile_shape = (((array_sub_tile_split_shape - 1) // tile_split_shape) + 1) * sub_tile_shape
logger.debug("post cannonicalise tile_shape=%s", tile_shape)
return tile_shape
def calculate_num_slices_per_axis(num_slices_per_axis, num_slices, max_slices_per_axis=None):
"""
Returns a :obj:`numpy.ndarray` (:samp:`return_array` say) where non-positive elements of
the :samp:`{num_slices_per_axis}` sequence have been replaced with
positive integer values such that :samp:`numpy.product(return_array) == num_slices`
and::
numpy.all(
return_array[numpy.where(num_slices_per_axis <= 0)]
<=
max_slices_per_axis[numpy.where(num_slices_per_axis <= 0)]
) is True
:type num_slices_per_axis: sequence of :obj:`int`
:param num_slices_per_axis: Constraint for per-axis sub-divisions.
Non-positive elements indicate values to be replaced in the
returned array. Positive values are identical to the corresponding
element in the returned array.
:type num_slices: integer
:param num_slices: Indicates the number of slices (rectangular sub-arrays)
formed by performing sub-divisions per axis. The returned array :samp:`return_array`
has elements assigned such that :samp:`numpy.product(return_array) == {num_slices}`.
:type max_slices_per_axis: sequence of :obj:`int` (or :samp:`None`)
:param max_slices_per_axis: Constraint specifying maximum number of per-axis sub-divisions.
If :samp:`None` defaults to :samp:`numpy.array([numpy.inf,]*len({num_slices_per_axis}))`.
:rtype: :obj:`numpy.ndarray`
:return: An array :samp:`return_array`
such that :samp:`numpy.product(return_array) == num_slices`.
Examples::
>>> from array_split.split import calculate_num_slices_per_axis
>>>
>>> calculate_num_slices_per_axis([0, 0, 0], 16)
array([4, 2, 2])
>>> calculate_num_slices_per_axis([1, 0, 0], 16)
array([1, 4, 4])
>>> calculate_num_slices_per_axis([1, 0, 0], 16, [2, 2, 16])
array([1, 2, 8])
"""
logger = _logging.getLogger(__name__)
ret_array = _np.array(num_slices_per_axis, copy=True)
if max_slices_per_axis is None:
max_slices_per_axis = _np.array([_np.inf, ] * len(num_slices_per_axis))
max_slices_per_axis = _np.array(max_slices_per_axis)
if _np.any(max_slices_per_axis <= 0):
raise ValueError("Got non-positive value in max_slices_per_axis=%s" % max_slices_per_axis)
while _np.any(ret_array <= 0):
prd = _np.product(ret_array[_np.where(ret_array > 0)]) # returns 1 for zero-length array
if (num_slices < prd) or ((num_slices % prd) > 0):
raise ValueError(
(
"Unable to construct grid of num_slices=%s elements from "
+
"num_slices_per_axis=%s (with max_slices_per_axis=%s)"
)
%
(num_slices, num_slices_per_axis, max_slices_per_axis)
)
ridx = _np.where(ret_array <= 0)
f = shape_factors(num_slices // prd, ridx[0].shape[0])[::-1]
if _np.all(f < max_slices_per_axis[ridx]):
ret_array[ridx] = f
else:
for i in range(ridx[0].shape[0]):
if f[i] >= max_slices_per_axis[ridx[0][i]]:
ret_array[ridx[0][i]] = max_slices_per_axis[ridx[0][i]]
prd = _np.product(ret_array[_np.where(ret_array > 0)])
while (num_slices % prd) > 0:
ret_array[ridx[0][i]] -= 1
prd = _np.product(ret_array[_np.where(ret_array > 0)])
logger.debug(
"ridx=%s, f=%s, ret_array=%s, max_slices_per_axis=%s",
ridx, f, ret_array, max_slices_per_axis
)
return ret_array
_array_shape_param_doc =\
"""
:type array_shape: sequence of :obj:`int`
:param array_shape: The shape to be *split*.
"""
_array_start_param_doc =\
"""
:type array_start: :samp:`None` or sequence of :obj:`int`
:param array_start: The start index. Defaults to :samp:`[0,]*len(array_shape)`.
The array indexing extents are assumed to range from :samp:`{array_start}`
to :samp:`{array_start} + {array_shape}`.
See :ref:`the-array_start-parameter-examples` examples.
"""
_array_itemsize_param_doc =\
"""
:type array_itemsize: int or sequence of :obj:`int`
:param array_itemsize: Number of bytes per array element.
Only relevant when :samp:`{max_tile_bytes}` is specified.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
"""
_array_tile_bounds_policy_param_doc =\
"""
:type tile_bounds_policy: :obj:`str`
:param tile_bounds_policy: Specifies whether tiles can extend beyond the array boundaries.
Only relevant for halo values greater than one. If :samp:`{tile_bounds_policy}`
is :data:`ARRAY_BOUNDS`
then the calculated tiles will not extend beyond the array
extents :samp:`{array_start}` and :samp:`{array_start} + {array_shape}`.
If :samp:`{tile_bounds_policy}` is :data:`NO_BOUNDS`
then the returned tiles will extend beyond
the :samp:`{array_start}` and :samp:`{array_start} + {array_shape}` extend
for positive :samp:`{halo}` values. See :ref:`the-halo-parameter-examples` examples.
"""
_ShapeSplitter__init__params_doc =\
"""
:type indices_or_sections: :samp:`None`, :obj:`int` or sequence of :obj:`int`
:param indices_or_sections: If an integer, indicates the number of
elements in the calculated *split* array. If a sequence, indicates
the indices (per axis) at which the splits occur.
See :ref:`splitting-by-number-of-tiles-examples` examples.
:type axis: :samp:`None`, :obj:`int` or sequence of :obj:`int`
:param axis: If an integer, indicates the axis which is to be split.
If a sequence integers, indicates the number of slices per axis,
i.e. if :samp:`{axis} = [3, 5]` then axis :samp:`0` is cut into
3 slices and axis :samp:`1` is cut into 5 slices for a total
of 15 (:samp:`3*5`) rectangular slices in the returned :samp:`(3, 5)`
shaped split.
See :ref:`splitting-by-number-of-tiles-examples` examples
and :ref:`splitting-by-per-axis-split-indices-examples` examples.
%s%s
:type tile_shape: :samp:`None` or sequence of :obj:`int`
:param tile_shape: When not :samp:`None`, specifies explicit shape for tiles.
Should be same length as :samp:`{array_shape}`.
See :ref:`splitting-by-tile-shape-examples` examples.
:type max_tile_bytes: :samp:`None` or :obj:`int`
:param max_tile_bytes: The maximum number of bytes for calculated :samp:`tile_shape`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
:type max_tile_shape: :samp:`None` or sequence of :obj:`int`
:param max_tile_shape: Per axis maximum shapes for the calculated :samp:`tile_shape`.
Only relevant when :samp:`{max_tile_bytes}` is specified. Should be same length
as :samp:`{array_shape}`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.
:type sub_tile_shape: :samp:`None` or sequence of :obj:`int`
:param sub_tile_shape: When not :samp:`None`, the calculated :samp:`tile_shape` will
be an even multiple of this sub-tile shape. Only relevant when :samp:`{max_tile_bytes}`
is specified. Should be same length as :samp:`{array_shape}`.
See :ref:`splitting-by-maximum-bytes-per-tile-examples` examples.%s%s
"""
_halo_param_doc =\
"""
:type halo: :samp:`None`, :obj:`int`, sequence of :obj:`int`, or :samp:`(len({array_shape}), 2)`
shaped :obj:`numpy.ndarray`
:param halo: How tiles are extended per axis in -ve and +ve directions with *halo*
elements. See :ref:`the-halo-parameter-examples` examples.
"""
#: Indicates that tiles are always within the array bounds.
#: See :ref:`the-halo-parameter-examples` examples.
__ARRAY_BOUNDS = "array_bounds"
@property
def ARRAY_BOUNDS(): # pylint: disable=invalid-name
"""
Indicates that tiles are always within the array bounds,
resulting in tiles which have truncated halos.
See :ref:`the-halo-parameter-examples` examples.
"""
return __ARRAY_BOUNDS
#: Indicates that tiles may extend beyond the array bounds.
#: See :ref:`the-halo-parameter-examples` examples.
__NO_BOUNDS = "no_bounds"
@property
def NO_BOUNDS(): # pylint: disable=invalid-name
"""
Indicates that tiles may have halos which extend beyond the array bounds.
See :ref:`the-halo-parameter-examples` examples.
"""
return __NO_BOUNDS
def convert_halo_to_array_form(halo, ndim):
"""
Converts the :samp:`{halo}` argument to a :samp:`(ndim, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, an :samp:`{ndim}` length sequence
of :samp:`int` or :samp:`({ndim}, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`({ndim}, 2)` shaped array form.
:type ndim: :obj:`int`
:param ndim: Number of dimensions.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`({ndim}, 2)` shaped array of :obj:`numpy.int64` elements.
Examples::
>>> convert_halo_to_array_form(halo=2, ndim=4)
array([[2, 2],
[2, 2],
[2, 2],
[2, 2]])
>>> convert_halo_to_array_form(halo=[0, 1, 2], ndim=3)
array([[0, 0],
[1, 1],
[2, 2]])
>>> convert_halo_to_array_form(halo=[[0, 1], [2, 3], [3, 4]], ndim=3)
array([[0, 1],
[2, 3],
[3, 4]])
"""
dtyp = _np.int64
if halo is None:
halo = _np.zeros((ndim, 2), dtype=dtyp)
elif is_scalar(halo):
halo = _np.zeros((ndim, 2), dtype=dtyp) + halo
elif (ndim == 1) and (_np.array(halo).shape == (2,)):
halo = _np.array([halo, ], copy=True, dtype=dtyp)
elif len(_np.array(halo).shape) == 1:
halo = _np.array([halo, halo], dtype=dtyp).T.copy()
else:
halo = _np.array(halo, copy=True, dtype=dtyp)
if halo.shape[0] != ndim:
raise ValueError(
"Got halo.shape=%s, expecting halo.shape=(%s, 2)"
%
(halo.shape, ndim)
)
return halo
class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
ShapeSplitter([0, ]).__init__.__func__.__doc__ = \
"""
Initialises parameters which define a split.
%s
%s
.. seealso:: :ref:`array_split-examples`
""" % (
_array_shape_param_doc,
(
_ShapeSplitter__init__params_doc
%
(
_array_start_param_doc,
"\n" + _array_itemsize_param_doc,
_halo_param_doc,
_array_tile_bounds_policy_param_doc,
)
)
)
def shape_split(array_shape, *args, **kwargs):
"To be replaced."
return \
ShapeSplitter(
array_shape,
*args,
**kwargs
).calculate_split()
shape_split.__doc__ =\
"""
Splits specified :samp:`{array_shape}` in tiles, returns array of :obj:`slice` tuples.
%s
%s
:rtype: :obj:`numpy.ndarray`
:return: Array of :obj:`tuple` objects. Each :obj:`tuple` element
is a :obj:`slice` object so that each :obj:`tuple` defines
a multi-dimensional slice of an array of shape :samp:`{array_shape}`.
.. seealso:: :func:`array_split.array_split`, :meth:`array_split.ShapeSplitter`,
:ref:`array_split-examples`
""" % (
_array_shape_param_doc,
(
_ShapeSplitter__init__params_doc
%
(
_array_start_param_doc,
"\n" + _array_itemsize_param_doc,
_halo_param_doc,
_array_tile_bounds_policy_param_doc,
)
)
)
array_split.__doc__ =\
"""
Splits the specified array :samp:`{ary}` into sub-arrays, returns list of :obj:`numpy.ndarray`.
:type ary: :obj:`numpy.ndarray`
:param ary: Array which is split into sub-arrays.
%s
:rtype: :obj:`list`
:return: List of :obj:`numpy.ndarray` elements, where each element is
a *slice* from :samp:`{ary}` (potentially an empty slice).
.. seealso:: :func:`array_split.shape_split`, :meth:`array_split.ShapeSplitter`,
:ref:`array_split-examples`
""" % (
_ShapeSplitter__init__params_doc
%
(
"",
"",
_halo_param_doc.replace("len({array_shape})", "len({ary}.shape)"),
""
)
)
__all__ = [s for s in dir() if not s.startswith('_')]
|
array-split/array_split | array_split/split.py | ShapeSplitter.convert_halo_to_array_form | python | def convert_halo_to_array_form(self, halo):
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape)) | Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L795-L807 | [
"def convert_halo_to_array_form(halo, ndim):\n \"\"\"\n Converts the :samp:`{halo}` argument to a :samp:`(ndim, 2)`\n shaped array.\n\n :type halo: :samp:`None`, :obj:`int`, an :samp:`{ndim}` length sequence\n of :samp:`int` or :samp:`({ndim}, 2)` shaped array\n of :samp:`int`\n :param ... | class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
|
array-split/array_split | array_split/split.py | ShapeSplitter.check_tile_bounds_policy | python | def check_tile_bounds_policy(self):
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
) | Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L1022-L1032 | null | class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
|
array-split/array_split | array_split/split.py | ShapeSplitter.check_consistent_parameter_dimensions | python | def check_consistent_parameter_dimensions(self):
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
) | Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L1034-L1089 | null | class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
|
array-split/array_split | array_split/split.py | ShapeSplitter.check_consistent_parameter_grouping | python | def check_consistent_parameter_grouping(self):
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
) | Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L1091-L1149 | null | class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
|
array-split/array_split | array_split/split.py | ShapeSplitter.update_tile_extent_bounds | python | def update_tile_extent_bounds(self):
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape | Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L1162-L1173 | null | class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
|
array-split/array_split | array_split/split.py | ShapeSplitter.set_split_extents_by_indices_per_axis | python | def set_split_extents_by_indices_per_axis(self):
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis) | Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L1175-L1207 | [
"def pad_with_none(sequence, new_length):\n \"\"\"\n Returns :samp:`sequence` :obj:`list` end-padded with :samp:`None`\n elements so that the length of the returned list equals :samp:`{new_length}`.\n\n :type sequence: iterable\n :param sequence: Return *listified* sequence which has been end-padded.... | class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
|
array-split/array_split | array_split/split.py | ShapeSplitter.calculate_split_from_extents | python | def calculate_split_from_extents(self):
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret | Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L1209-L1261 | null | class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
|
array-split/array_split | array_split/split.py | ShapeSplitter.calculate_axis_split_extents | python | def calculate_axis_split_extents(self, num_sections, size):
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends | Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L1328-L1361 | null | class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
|
array-split/array_split | array_split/split.py | ShapeSplitter.set_split_extents_by_split_size | python | def set_split_extents_by_split_size(self):
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
) | Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L1363-L1408 | [
"def calculate_num_slices_per_axis(num_slices_per_axis, num_slices, max_slices_per_axis=None):\n \"\"\"\n Returns a :obj:`numpy.ndarray` (:samp:`return_array` say) where non-positive elements of\n the :samp:`{num_slices_per_axis}` sequence have been replaced with\n positive integer values such that :sam... | class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
|
array-split/array_split | array_split/split.py | ShapeSplitter.set_split_extents_by_tile_shape | python | def set_split_extents_by_tile_shape(self):
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i] | Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L1423-L1436 | null | class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
|
array-split/array_split | array_split/split.py | ShapeSplitter.set_split_extents_by_tile_max_bytes | python | def set_split_extents_by_tile_max_bytes(self):
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape() | Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`). | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L1451-L1468 | [
"def calculate_tile_shape_for_max_bytes(\n array_shape,\n array_itemsize,\n max_tile_bytes,\n max_tile_shape=None,\n sub_tile_shape=None,\n halo=None\n):\n \"\"\"\n Returns a tile shape :samp:`tile_shape`\n such that :samp:`numpy.product(tile_shape)*numpy.sum({array_itemsize}) <= {max_til... | class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def set_split_extents(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`.
"""
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
|
array-split/array_split | array_split/split.py | ShapeSplitter.set_split_extents | python | def set_split_extents(self):
self.check_split_parameters()
self.update_tile_extent_bounds()
if self.indices_per_axis is not None:
self.set_split_extents_by_indices_per_axis()
elif (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
self.set_split_extents_by_split_size()
elif self.tile_shape is not None:
self.set_split_extents_by_tile_shape()
elif self.max_tile_bytes is not None:
self.set_split_extents_by_tile_max_bytes() | Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
selected attributes set from :meth:`__init__`. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L1485-L1502 | [
"def check_split_parameters(self):\n \"\"\"\n Ensures this object has a state consistent with evaluating a split.\n\n :raises ValueError: For conflicting or absent parameters.\n \"\"\"\n\n self.check_tile_bounds_policy()\n self.check_consistent_parameter_dimensions()\n self.check_consistent_par... | class ShapeSplitter(object):
"""
Implements array shape splitting. There are three main (top-level) methods:
:meth:`__init__`
Initialisation of parameters which define the split.
:meth:`set_split_extents`
Calculates the per-axis indices for the cuts. Sets
the :attr:`split_shape`, :attr:`split_begs`
and :attr:`split_ends` attributes.
:meth:`calculate_split`
Calls :meth:`set_split_extents` followed
by :meth:`calculate_split_from_extents` to
return the :obj:`numpy.ndarray` of :obj:`tuple` elements (slices).
Example::
>>> import numpy as np
>>> ary = np.arange(0, 10)
>>> splitter = ShapeSplitter(ary.shape, 3)
>>> split = splitter.calculate_split()
>>> split.shape
(3,)
>>> split
array([(slice(0, 4, None),), (slice(4, 7, None),), (slice(7, 10, None),)],
dtype=[('0', 'O')])
>>> [ary[slyce] for slyce in split.flatten()]
[array([0, 1, 2, 3]), array([4, 5, 6]), array([7, 8, 9])]
>>>
>>> splitter.split_shape # equivalent to split.shape above
array([3])
>>> splitter.split_begs # start indices for tile extents
[array([0, 4, 7])]
>>> splitter.split_ends # stop indices for tile extents
[array([ 4, 7, 10])]
"""
#: Class attribute for :obj:`logging.Logger` logging.
logger = _logging.getLogger(__name__ + ".ShapeSplitter")
#: Class attribute indicating list of valid values for :attr:`tile_bound_policy`.
#: See :data:`ARRAY_BOUNDS` and :data:`NO_BOUNDS`.
valid_tile_bounds_policies = [ARRAY_BOUNDS, NO_BOUNDS]
def __init__(
self,
array_shape,
indices_or_sections=None,
axis=None,
array_start=None,
array_itemsize=1,
tile_shape=None,
max_tile_bytes=None,
max_tile_shape=None,
sub_tile_shape=None,
halo=None,
tile_bounds_policy=ARRAY_BOUNDS
):
# Initialise *private* attributes.
self.__array_shape = None
self.__array_start = None
self.__array_itemsize = None
self.__indices_per_axis = None
self.__split_size = None
self.__split_num_slices_per_axis = None
self.__tile_shape = None
self.__max_tile_bytes = None
self.__max_tile_shape = None
self.__sub_tile_shape = None
self.__halo = None
self.__tile_bounds_policy = None
self.__tile_beg_min = None
self.__tile_end_max = None
self.__split_shape = None
self.__split_begs = None
self.__split_ends = None
# Now set properties from arguments
self.array_shape = _np.array(array_shape)
if array_start is None:
array_start = _np.zeros_like(self.array_shape)
self.array_start = array_start
self.array_itemsize = array_itemsize
indices_per_axis = None
if is_indices(indices_or_sections):
num_subarrays = None
indices_per_axis = indices_or_sections
if (
((axis is None) or is_scalar(axis))
and
(not _np.any([is_sequence(_e) for _e in indices_or_sections]))
):
if axis is None:
axis = 0
# Make indices_per_axis a list of lists, so that
# element 0 is a list of indices for axis 0
indices_per_axis = [None, ] * len(array_shape)
indices_per_axis[axis] = indices_or_sections
else:
indices_per_axis = None
num_subarrays = indices_or_sections
self.indices_per_axis = indices_per_axis
self.split_size = num_subarrays
split_num_slices_per_axis = None
if (self.split_size is not None) or (axis is not None):
if axis is None:
axis = 0
if is_sequence(axis):
split_num_slices_per_axis = pad_with_object(axis, len(self.array_shape), 1)
elif self.split_size is not None:
split_num_slices_per_axis = pad_with_object([], len(self.array_shape), 1)
split_num_slices_per_axis[axis] = self.split_size
self.split_num_slices_per_axis = split_num_slices_per_axis
self.tile_shape = tile_shape
self.max_tile_bytes = max_tile_bytes
self.max_tile_shape = max_tile_shape
self.sub_tile_shape = sub_tile_shape
halo = self.convert_halo_to_array_form(halo)
self.halo = halo
if tile_bounds_policy is None:
tile_bounds_policy = ARRAY_BOUNDS
self.tile_bounds_policy = tile_bounds_policy
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
self.split_shape = None
self.split_begs = None
self.split_ends = None
def convert_halo_to_array_form(self, halo):
"""
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)`
shaped array.
:type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence
of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array
of :samp:`int`
:param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form.
:rtype: :obj:`numpy.ndarray`
:return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
"""
return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
@property
def array_shape(self):
"""
The shape of the array which is to be split. A sequence of :obj:`int` indicating the
per-axis sizes which are to be split.
"""
return self.__array_shape
@array_shape.setter
def array_shape(self, array_shape):
self.__array_shape = array_shape
@property
def array_start(self):
"""
The start index. A sequence of :obj:`int` indicating the start of indexing for
the tile slices. Defaults to :samp:`numpy.zeros_like({self}.array_shape)`.
"""
return self.__array_start
@array_start.setter
def array_start(self, array_start):
self.__array_start = array_start
@property
def array_itemsize(self):
"""
The number of bytes per array element, see :attr:`max_tile_bytes`.
"""
return self.__array_itemsize
@array_itemsize.setter
def array_itemsize(self, array_itemsize):
self.__array_itemsize = array_itemsize
@property
def indices_per_axis(self):
"""
The per-axis indices indicating the cuts for the split.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects such
that :samp:`{self}.indices_per_axis[i]` indicates the
cut positions for axis :samp:`i`.
"""
return self.__indices_per_axis
@indices_per_axis.setter
def indices_per_axis(self, indices_per_axis):
self.__indices_per_axis = indices_per_axis
@property
def split_size(self):
"""
An :obj:`int` indicating the number of tiles in the calculated split.
"""
return self.__split_size
@split_size.setter
def split_size(self, split_size):
self.__split_size = split_size
@property
def split_num_slices_per_axis(self):
"""
Number of slices per axis.
A 1D :obj:`numpy.ndarray` of :obj:`int` indicating the number of slices (sections)
per axis, so that :samp:`{self}.split_num_slices_per_axis[i]` is an integer
indicating the number of sections along axis :samp:`i` in the calculated split.
"""
return self.__split_num_slices_per_axis
@split_num_slices_per_axis.setter
def split_num_slices_per_axis(self, split_num_slices_per_axis):
self.__split_num_slices_per_axis = split_num_slices_per_axis
@property
def tile_shape(self):
"""
The shape of all tiles in the calculated split.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
number of elements for tiles in the calculated split.
"""
return self.__tile_shape
@tile_shape.setter
def tile_shape(self, tile_shape):
self.__tile_shape = tile_shape
@property
def max_tile_bytes(self):
"""
The maximum number of bytes for any tile (including :attr:`halo`) in the returned split.
An :obj:`int` which constrains the tile shape such that any tile
from the computed split is no bigger than :samp:`{max_tile_bytes}`.
"""
return self.__max_tile_bytes
@max_tile_bytes.setter
def max_tile_bytes(self, max_tile_bytes):
self.__max_tile_bytes = max_tile_bytes
@property
def max_tile_shape(self):
"""
Per-axis maximum sizes for calculated tiles.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating the per-axis
maximum number of elements for tiles in the calculated split.
"""
return self.__max_tile_shape
@max_tile_shape.setter
def max_tile_shape(self, max_tile_shape):
self.__max_tile_shape = max_tile_shape
@property
def sub_tile_shape(self):
"""
Calculated tile shape will be an integer multiple of this sub-tile shape.
i.e. :samp:`(self.tile_shape[i] % self.sub_tile_shape[i]) == 0`,
for :samp:`i in range(0, len(self.tile_shape))`.
A 1D :samp:`numpy.ndarray` of :obj:`int` indicating sub-tile shape.
"""
return self.__sub_tile_shape
@sub_tile_shape.setter
def sub_tile_shape(self, sub_tile_shape):
self.__sub_tile_shape = sub_tile_shape
@property
def halo(self):
"""
Per-axis -ve and +ve halo sizes for extending tiles to overlap with neighbouring tiles.
A :samp:`(N, 2)` shaped array indicating the
"""
return self.__halo
@halo.setter
def halo(self, halo):
self.__halo = convert_halo_to_array_form(halo, ndim=self.array_shape.size)
@property
def tile_bounds_policy(self):
"""
A string indicating whether tile halo extents can extend beyond the array domain.
Valid values are indicated by :attr:`valid_tile_bounds_policies`.
"""
return self.__tile_bounds_policy
@tile_bounds_policy.setter
def tile_bounds_policy(self, tile_bounds_policy):
self.__tile_bounds_policy = tile_bounds_policy
@property
def tile_beg_min(self):
"""
The per-axis minimum index for :attr:`slice.start`. The per-axis lower bound for
tile start indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_beg_min
@tile_beg_min.setter
def tile_beg_min(self, tile_beg_min):
self.__tile_beg_min = tile_beg_min
@property
def tile_end_max(self):
"""
The per-axis maximum index for :attr:`slice.stop`. The per-axis upper bound for
tile stop indices. A 1D :obj:`numpy.ndarray`.
"""
return self.__tile_end_max
@tile_end_max.setter
def tile_end_max(self, tile_end_max):
self.__tile_end_max = tile_end_max
@property
def split_shape(self):
"""
The shape of the calculated split array. Indicates the per-axis number
of sections in the calculated split. A 1D :obj:`numpy.ndarray`.
"""
return self.__split_shape
@split_shape.setter
def split_shape(self, split_shape):
self.__split_shape = split_shape
@property
def split_begs(self):
"""
The list of per-axis start indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.start` index for for tiles.
"""
return self.__split_begs
@split_begs.setter
def split_begs(self, split_begs):
self.__split_begs = split_begs
@property
def split_ends(self):
"""
The list of per-axis stop indices for :obj:`slice` objects.
A :obj:`list` of 1D :obj:`numpy.ndarray` objects indicating
the :attr:`slice.stop` index for for tiles.
"""
return self.__split_ends
@split_ends.setter
def split_ends(self, split_ends):
self.__split_ends = split_ends
def check_tile_bounds_policy(self):
"""
Raises :obj:`ValueError` if :attr:`tile_bounds_policy`
is not in :samp:`[{self}.ARRAY_BOUNDS, {self}.NO_BOUNDS]`.
"""
if self.tile_bounds_policy not in self.valid_tile_bounds_policies:
raise ValueError(
"Got self.tile_bounds_policy=%s, which is not in %s."
%
(self.tile_bounds_policy, self.valid_tile_bounds_policies)
)
def check_consistent_parameter_dimensions(self):
"""
Ensure that all parameter dimensions are consistent with
the :attr:`array_shape` dimension.
:raises ValueError: For inconsistent parameter dimensions.
"""
if self.indices_per_axis is not None:
if len(self.indices_per_axis) > len(self.array_shape):
raise ValueError(
"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.indices_per_axis), len(self.array_shape))
)
if self.split_num_slices_per_axis is not None:
if len(self.split_num_slices_per_axis) > len(self.array_shape):
raise ValueError(
(
"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,"
+
" should be equal."
)
%
(len(self.split_num_slices_per_axis), len(self.array_shape))
)
if self.tile_shape is not None:
if len(self.tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.tile_shape), len(self.array_shape))
)
if self.sub_tile_shape is not None:
if len(self.sub_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.sub_tile_shape), len(self.array_shape))
)
if self.max_tile_shape is not None:
if len(self.max_tile_shape) != len(self.array_shape):
raise ValueError(
"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.max_tile_shape), len(self.array_shape))
)
if self.array_start is not None:
if len(self.array_start) != len(self.array_shape):
raise ValueError(
"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal."
%
(len(self.array_start), len(self.array_shape))
)
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
def check_split_parameters(self):
"""
Ensures this object has a state consistent with evaluating a split.
:raises ValueError: For conflicting or absent parameters.
"""
self.check_tile_bounds_policy()
self.check_consistent_parameter_dimensions()
self.check_consistent_parameter_grouping()
def update_tile_extent_bounds(self):
"""
Updates the :attr:`tile_beg_min` and :attr:`tile_end_max`
data members according to :attr:`tile_bounds_policy`.
"""
if self.tile_bounds_policy == NO_BOUNDS:
self.tile_beg_min = self.array_start - self.halo[:, 0]
self.tile_end_max = self.array_start + self.array_shape + self.halo[:, 1]
elif self.tile_bounds_policy == ARRAY_BOUNDS:
self.tile_beg_min = self.array_start
self.tile_end_max = self.array_start + self.array_shape
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
def calculate_split_from_extents(self):
"""
Returns split calculated using extents obtained
from :attr:`split_begs` and :attr:`split_ends`.
All calls to calculate the split end up here to produce
the :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of :obj:`tuple`-of-:obj:`slice` elements.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
tuple(
[
slice(
max([
self.split_begs[d][idx[d]]
+ self.array_start[d]
- self.halo[d, 0]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_beg_min[d]
]),
min([
self.split_ends[d][idx[d]]
+ self.array_start[d]
+ self.halo[d, 1]
* (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),
self.tile_end_max[d]
])
)
for d in range(len(self.split_shape))
]
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "object") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_halos_from_extents(self):
"""
Returns :samp:`(self.ndim, 2)` shaped halo array elements indicating
the halo for each split. Tiles on the boundary may have the halo trimmed
to account for the :attr:`tile_bounds_policy`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :samp:`(self.ndim, 2)` shaped :obj:`numpy.ndarray`
indicating the per-axis and per-direction number of halo elements for each tile
in the split.
"""
self.logger.debug("self.split_shape=%s", self.split_shape)
self.logger.debug("self.split_begs=%s", self.split_begs)
self.logger.debug("self.split_ends=%s", self.split_ends)
ret = \
_np.array(
[
(
tuple(
(
min([
self.split_begs[d][idx[d]] - self.tile_beg_min[d],
self.halo[d, 0]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
]),
min([
self.tile_end_max[d] - self.split_ends[d][idx[d]],
self.halo[d, 1]
*
(self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]])
])
)
for d in range(len(self.split_shape))
)
)
for idx in
_np.array(
_np.unravel_index(
_np.arange(0, _np.product(self.split_shape)),
self.split_shape
)
).T
],
dtype=[("%d" % d, "2int64") for d in range(len(self.split_shape))]
).reshape(self.split_shape)
return ret
def calculate_split_by_indices_per_axis(self):
"""
Returns split calculated using extents obtained
from :attr:`indices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_indices_per_axis()
return self.calculate_split_from_extents()
def calculate_axis_split_extents(self, num_sections, size):
"""
Divides :samp:`range(0, {size})` into (approximately) equal sized
intervals. Returns :samp:`(begs, ends)` where :samp:`slice(begs[i], ends[i])`
define the intervals for :samp:`i in range(0, {num_sections})`.
:type num_sections: :obj:`int`
:param num_sections: Divide :samp:`range(0, {size})` into this
many intervals (approximately) equal sized intervals.
:type size: :obj:`int`
:param size: Range for the subdivision.
:rtype: :obj:`tuple`
:return: Two element tuple :samp:`(begs, ends)`
such that :samp:`slice(begs[i], ends[i])` define the
intervals for :samp:`i in range(0, {num_sections})`.
"""
section_size = size // num_sections
if section_size >= 1:
begs = _np.arange(0, section_size * num_sections, section_size)
rem = size - section_size * num_sections
if rem > 0:
for i in range(rem):
begs[i + 1:] += 1
ends = _np.zeros_like(begs)
ends[0:-1] = begs[1:]
ends[-1] = size
else:
begs = _np.arange(0, num_sections)
begs[size:] = size
ends = begs.copy()
ends[0:-1] = begs[1:]
return begs, ends
def set_split_extents_by_split_size(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`split_size` and :attr:`split_num_slices_per_axis`.
"""
if self.split_size is None:
if (
_np.all([s is not None for s in self.split_num_slices_per_axis])
and
_np.all([s > 0 for s in self.split_num_slices_per_axis])
):
self.split_size = _np.product(self.split_num_slices_per_axis)
else:
raise ValueError(
(
"Got invalid self.split_num_slices_per_axis=%s, all elements "
+
"need to be integers greater than zero when self.split_size is None."
)
%
self.split_num_slices_per_axis
)
self.logger.debug(
"Pre cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
self.split_num_slices_per_axis = \
calculate_num_slices_per_axis(
self.split_num_slices_per_axis,
self.split_size,
self.array_shape
)
self.logger.debug(
"Post cannonicalise: self.split_num_slices_per_axis=%s",
self.split_num_slices_per_axis)
# Define the start and stop indices (extents) for each axis slice
self.split_shape = self.split_num_slices_per_axis.copy()
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i], self.split_ends[i] = \
self.calculate_axis_split_extents(
self.split_shape[i],
self.array_shape[i]
)
def calculate_split_by_split_size(self):
"""
Returns split calculated using extents obtained
from :attr:`split_size` and :attr:`split_num_slices_per_axis`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_split_size()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_shape(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from value of :attr:`tile_shape`.
"""
self.split_shape = ((self.array_shape - 1) // self.tile_shape) + 1
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.array_shape)):
self.split_begs[i] = _np.arange(0, self.array_shape[i], self.tile_shape[i])
self.split_ends[i] = _np.zeros_like(self.split_begs[i])
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
def calculate_split_by_tile_shape(self):
"""
Returns split calculated using extents obtained
from :attr:`tile_shape`.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_shape()
return self.calculate_split_from_extents()
def set_split_extents_by_tile_max_bytes(self):
"""
Sets split extents (:attr:`split_begs`
and :attr:`split_ends`) calculated using
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
"""
self.tile_shape = \
calculate_tile_shape_for_max_bytes(
array_shape=self.array_shape,
array_itemsize=self.array_itemsize,
max_tile_bytes=self.max_tile_bytes,
max_tile_shape=self.max_tile_shape,
sub_tile_shape=self.sub_tile_shape,
halo=self.halo
)
self.set_split_extents_by_tile_shape()
def calculate_split_by_tile_max_bytes(self):
"""
Returns split calculated using extents obtained
from :attr:`max_tile_bytes`
(and :attr:`max_tile_shape`, :attr:`sub_tile_shape`, :attr:`halo`).
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
where each element is a :obj:`tuple` of :obj:`slice` objects.
"""
self.set_split_extents_by_tile_max_bytes()
return self.calculate_split_from_extents()
def calculate_split(self):
"""
Computes the split.
:rtype: :obj:`numpy.ndarray`
:return:
A :mod:`numpy` `structured array <http://docs.scipy.org/doc/numpy/user/basics.rec.html>`_
of dimension :samp:`len({self}.array_shape)`.
Each element of the returned array is a :obj:`tuple`
containing :samp:`len({self}.array_shape)` elements, with each element
being a :obj:`slice` object. Each :obj:`tuple` defines a slice within
the bounds :samp:`{self}.array_start - {self}.halo[:, 0]`
to :samp:`{self}.array_start + {self}.array_shape + {self}.halo[:, 1]`.
"""
self.set_split_extents()
return self.calculate_split_from_extents()
|
array-split/array_split | array_split/logging.py | initialise_loggers | python | def initialise_loggers(names, log_level=_builtin_logging.WARNING, handler_class=SplitStreamHandler):
frmttr = get_formatter()
for name in names:
logr = _builtin_logging.getLogger(name)
handler = handler_class()
handler.setFormatter(frmttr)
logr.addHandler(handler)
logr.setLevel(log_level) | Initialises specified loggers to generate output at the
specified logging level. If the specified named loggers do not exist,
they are created.
:type names: :obj:`list` of :obj:`str`
:param names: List of logger names.
:type log_level: :obj:`int`
:param log_level: Log level for messages, typically
one of :obj:`logging.DEBUG`, :obj:`logging.INFO`, :obj:`logging.WARN`, :obj:`logging.ERROR`
or :obj:`logging.CRITICAL`.
See :ref:`levels`.
:type handler_class: One of the :obj:`logging.handlers` classes.
:param handler_class: The handler class for output of log messages,
for example :obj:`SplitStreamHandler` or :obj:`logging.StreamHandler`.
Example::
>>> from array_split import logging
>>> logging.initialise_loggers(["my_logger",], log_level=logging.INFO)
>>> logger = logging.getLogger("my_logger")
>>> logger.info("This is info logging.")
16:35:09|ARRSPLT| This is info logging.
>>> logger.debug("Not logged at logging.INFO level.")
>>> | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/logging.py#L208-L242 | [
"def get_formatter(prefix_string=\"ARRSPLT| \"):\n \"\"\"\n Returns :obj:`logging.Formatter` object which produces messages\n with *time* and :samp:`prefix_string` prefix.\n\n :type prefix_string: :obj:`str` or :samp:`None`\n :param prefix_string: Prefix for all logging messages.\n :rtype: :obj:`l... | """
=====================================
The :mod:`array_split.logging` Module
=====================================
Default initialisation of python logging.
Some simple wrappers of python built-in :mod:`logging` module
for :mod:`array_split` logging.
.. currentmodule:: array_split.logging
Classes and Functions
=====================
.. autosummary::
:toctree: generated/
SplitStreamHandler - A :obj:`logging.StreamHandler` which splits errors and warnings to *stderr*.
initialise_loggers - Initialises handlers and formatters for loggers.
get_formatter - "Returns :obj:`logging.Formatter` with time prefix string.
"""
# pylint: disable=unused-wildcard-import
# pylint: disable=wildcard-import
# pylint: disable=invalid-name
from __future__ import absolute_import
import sys
import logging as _builtin_logging
from logging import * # noqa: F401,F403
from .license import license as _license, copyright as _copyright, version as _version
__author__ = "Shane J. Latham"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
class _Python2SplitStreamHandler(_builtin_logging.Handler):
"""
A python :obj:`logging.handlers` :samp:`Handler` class for
splitting logging messages to different streams depending on
the logging-level.
"""
def __init__(self, outstr=sys.stdout, errstr=sys.stderr, splitlevel=_builtin_logging.WARNING):
"""
Initialise with a pair of streams and a threshold level which determines
the stream where the messages are writting.
:type outstr: file-like
:param outstr: Logging messages are written to this stream if
the message level is less than :samp:`self.splitLevel`.
:type errstr: stream
:param errstr: Logging messages are written to this stream if
the message level is greater-than-or-equal-to :samp:`self.splitLevel`.
:type splitlevel: int
:param splitlevel: Logging level threshold determining split streams for log messages.
"""
self.outStream = outstr
self.errStream = errstr
self.splitLevel = splitlevel
_builtin_logging.Handler.__init__(self)
def emit(self, record):
"""
Mostly copy-paste from :obj:`logging.StreamHandler`.
"""
try:
msg = self.format(record)
if record.levelno < self.splitLevel:
stream = self.outStream
else:
stream = self.errStream
fs = "%s\n"
try:
if (isinstance(msg, unicode) and # noqa: F405
getattr(stream, 'encoding', None)):
ufs = fs.decode(stream.encoding)
try:
stream.write(ufs % msg)
except UnicodeEncodeError:
stream.write((ufs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
stream.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class _Python3SplitStreamHandler(_builtin_logging.Handler):
"""
A python :obj:`logging.handlers` :samp:`Handler` class for
splitting logging messages to different streams depending on
the logging-level.
"""
terminator = '\n'
def __init__(self, outstr=sys.stdout, errstr=sys.stderr, splitlevel=_builtin_logging.WARNING):
"""
Initialise with a pair of streams and a threshold level which determines
the stream where the messages are writting.
:type outstr: file-like
:param outstr: Logging messages are written to this stream if
the message level is less than :samp:`self.splitLevel`.
:type errstr: stream
:param errstr: Logging messages are written to this stream if
the message level is greater-than-or-equal-to :samp:`self.splitLevel`.
:type splitlevel: int
:param splitlevel: Logging level threshold determining split streams for log messages.
"""
self.outStream = outstr
self.errStream = errstr
self.splitLevel = splitlevel
_builtin_logging.Handler.__init__(self)
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.outStream and hasattr(self.outStream, "flush"):
self.outStream.flush()
if self.errStream and hasattr(self.errStream, "flush"):
self.errStream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
if record.levelno < self.splitLevel:
stream = self.outStream
else:
stream = self.errStream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): # pragma: no cover
raise
except:
self.handleError(record)
if sys.version_info[0] <= 2:
class SplitStreamHandler(_Python2SplitStreamHandler):
"""
To be replaced.
"""
__doc__ = _Python2SplitStreamHandler.__doc__
else:
class SplitStreamHandler(_Python3SplitStreamHandler):
"""
To be replaced.
"""
__doc__ = _Python3SplitStreamHandler.__doc__
def get_formatter(prefix_string="ARRSPLT| "):
"""
Returns :obj:`logging.Formatter` object which produces messages
with *time* and :samp:`prefix_string` prefix.
:type prefix_string: :obj:`str` or :samp:`None`
:param prefix_string: Prefix for all logging messages.
:rtype: :obj:`logging.Formatter`
:return: Regular formatter for logging.
"""
if prefix_string is None:
prefix_string = ""
formatter = \
_builtin_logging.Formatter(
"%(asctime)s|" + prefix_string + "%(message)s",
"%H:%M:%S"
)
return formatter
|
array-split/array_split | array_split/logging.py | _Python2SplitStreamHandler.emit | python | def emit(self, record):
try:
msg = self.format(record)
if record.levelno < self.splitLevel:
stream = self.outStream
else:
stream = self.errStream
fs = "%s\n"
try:
if (isinstance(msg, unicode) and # noqa: F405
getattr(stream, 'encoding', None)):
ufs = fs.decode(stream.encoding)
try:
stream.write(ufs % msg)
except UnicodeEncodeError:
stream.write((ufs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
stream.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record) | Mostly copy-paste from :obj:`logging.StreamHandler`. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/logging.py#L69-L98 | null | class _Python2SplitStreamHandler(_builtin_logging.Handler):
"""
A python :obj:`logging.handlers` :samp:`Handler` class for
splitting logging messages to different streams depending on
the logging-level.
"""
def __init__(self, outstr=sys.stdout, errstr=sys.stderr, splitlevel=_builtin_logging.WARNING):
"""
Initialise with a pair of streams and a threshold level which determines
the stream where the messages are writting.
:type outstr: file-like
:param outstr: Logging messages are written to this stream if
the message level is less than :samp:`self.splitLevel`.
:type errstr: stream
:param errstr: Logging messages are written to this stream if
the message level is greater-than-or-equal-to :samp:`self.splitLevel`.
:type splitlevel: int
:param splitlevel: Logging level threshold determining split streams for log messages.
"""
self.outStream = outstr
self.errStream = errstr
self.splitLevel = splitlevel
_builtin_logging.Handler.__init__(self)
|
array-split/array_split | array_split/logging.py | _Python3SplitStreamHandler.flush | python | def flush(self):
self.acquire()
try:
if self.outStream and hasattr(self.outStream, "flush"):
self.outStream.flush()
if self.errStream and hasattr(self.errStream, "flush"):
self.errStream.flush()
finally:
self.release() | Flushes the stream. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/logging.py#L130-L141 | null | class _Python3SplitStreamHandler(_builtin_logging.Handler):
"""
A python :obj:`logging.handlers` :samp:`Handler` class for
splitting logging messages to different streams depending on
the logging-level.
"""
terminator = '\n'
def __init__(self, outstr=sys.stdout, errstr=sys.stderr, splitlevel=_builtin_logging.WARNING):
"""
Initialise with a pair of streams and a threshold level which determines
the stream where the messages are writting.
:type outstr: file-like
:param outstr: Logging messages are written to this stream if
the message level is less than :samp:`self.splitLevel`.
:type errstr: stream
:param errstr: Logging messages are written to this stream if
the message level is greater-than-or-equal-to :samp:`self.splitLevel`.
:type splitlevel: int
:param splitlevel: Logging level threshold determining split streams for log messages.
"""
self.outStream = outstr
self.errStream = errstr
self.splitLevel = splitlevel
_builtin_logging.Handler.__init__(self)
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
if record.levelno < self.splitLevel:
stream = self.outStream
else:
stream = self.errStream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): # pragma: no cover
raise
except:
self.handleError(record)
|
array-split/array_split | array_split/logging.py | _Python3SplitStreamHandler.emit | python | def emit(self, record):
try:
msg = self.format(record)
if record.levelno < self.splitLevel:
stream = self.outStream
else:
stream = self.errStream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): # pragma: no cover
raise
except:
self.handleError(record) | Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream. | train | https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/logging.py#L143-L166 | [
"def flush(self):\n \"\"\"\n Flushes the stream.\n \"\"\"\n self.acquire()\n try:\n if self.outStream and hasattr(self.outStream, \"flush\"):\n self.outStream.flush()\n if self.errStream and hasattr(self.errStream, \"flush\"):\n self.errStream.flush()\n finally:... | class _Python3SplitStreamHandler(_builtin_logging.Handler):
"""
A python :obj:`logging.handlers` :samp:`Handler` class for
splitting logging messages to different streams depending on
the logging-level.
"""
terminator = '\n'
def __init__(self, outstr=sys.stdout, errstr=sys.stderr, splitlevel=_builtin_logging.WARNING):
"""
Initialise with a pair of streams and a threshold level which determines
the stream where the messages are writting.
:type outstr: file-like
:param outstr: Logging messages are written to this stream if
the message level is less than :samp:`self.splitLevel`.
:type errstr: stream
:param errstr: Logging messages are written to this stream if
the message level is greater-than-or-equal-to :samp:`self.splitLevel`.
:type splitlevel: int
:param splitlevel: Logging level threshold determining split streams for log messages.
"""
self.outStream = outstr
self.errStream = errstr
self.splitLevel = splitlevel
_builtin_logging.Handler.__init__(self)
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.outStream and hasattr(self.outStream, "flush"):
self.outStream.flush()
if self.errStream and hasattr(self.errStream, "flush"):
self.errStream.flush()
finally:
self.release()
|
ibm-watson-data-lab/ibmseti | ibmseti/compamp.py | Compamp.header | python | def header(self):
'''
This returns the first header in the data file
'''
if self._header is None:
self._header = self._read_half_frame_header(self.data)
return self._header | This returns the first header in the data file | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/compamp.py#L71-L79 | [
"def _read_half_frame_header(self, data):\n rf_center_frequency, half_frame_number, activity_id, subband_spacing_hz, start_subband_id, \\\n number_of_subbands, over_sampling, polarization = struct.unpack('>diidiifi', data[:constants.header_offset]) \n\n #rf_center_frequency is the center frequency of the first ... | class Compamp(object):
'''
Class to read and unpack data from SETI archive-compamp and compamp files.
'''
def __init__(self, data):
'''
data is the raw data read from a SETI compamp or archive-compamp file.
Usually this is done like
aca = ibmseti.compamp.Compamp(open('path/to/file.archive-compamp','rb').read())
This object does not automatically read and unpack the data upon instantiation. Instead, it does
so only when called upon. This allows the object to be instantiated on a Spark driver,
and the work to read/unpack the data may be performed on an executor.
It also does not cache the unpacked data in order to reduce the memory footprint.
'''
self.data = data
self._header = None
def _read_half_frame_header(self, data):
rf_center_frequency, half_frame_number, activity_id, subband_spacing_hz, start_subband_id, \
number_of_subbands, over_sampling, polarization = struct.unpack('>diidiifi', data[:constants.header_offset])
#rf_center_frequency is the center frequency of the first subband in the file. If this is a compamp file,
#there is only one subband. If this is an archive-compamp, there are typically 16 subbands.
#This information must be used to properly calculate the
#todo -- add ability for somebody to use N*__bins_per_half_frame bins instead. Will
#need to allow for N to be passed into this function, or set in the Contsants object
half_frame_bytes = number_of_subbands * constants.bins_per_half_frame + constants.header_offset
number_of_half_frames = int(len(data) / half_frame_bytes)
return {'rf_center_frequency':rf_center_frequency,
'half_frame_number':half_frame_number,
'activity_id':activity_id,
'subband_spacing_hz':subband_spacing_hz,
'start_subband_id':start_subband_id,
'number_of_subbands':number_of_subbands,
'over_sampling':over_sampling,
'polarization':polarization,
'half_frame_bytes':half_frame_bytes,
'number_of_half_frames':number_of_half_frames}
def headers(self):
'''
This returns all headers in the data file. There should be one for each
half_frame in the file (typically 129).
'''
first_header = self.header()
single_compamp_data = np.frombuffer(self.data, dtype=np.int8)\
.reshape((first_header['number_of_half_frames'], first_header['half_frame_bytes']))
return [self._read_half_frame_header(row) for row in single_compamp_data]
def _packed_data(self):
'''
Returns the bit-packed data extracted from the data file. This is not so useful to analyze.
Use the complex_data method instead.
'''
header = self.header()
packed_data = np.frombuffer(self.data, dtype=np.int8)\
.reshape((header['number_of_half_frames'], header['half_frame_bytes'])) # create array of half frames
packed_data = packed_data[::-1, constants.header_offset:] # slice out header and flip half frame order to reverse time ordering
packed_data = packed_data.reshape((header['number_of_half_frames']*(header['half_frame_bytes']- constants.header_offset))) # compact into vector
return packed_data
def complex_data(self):
'''
This will cast each byte to an int8 and interpret each byte
as 4 bits real values and 4 bits imag values (RRRRIIII). The data are then
used to create a 3D numpy array of dtype=complex, which is returned.
The shape of the numpy array is N half frames, M subbands, K data points per half frame,
where K = constants.bins_per_half_frame, N is typically 129 and M is typically 1 for
compamp files and 16 for archive-compamp files.
Note that this returns a Numpy array of type complex64. This data is not retained within Compamp objects.
'''
#note that since we can only pack into int8 types, we must pad each 4-bit value with 4, 0 bits
#this effectively multiplies each 4-bit value by 16 when that value is represented as an 8-bit signed integer.
packed_data = self._packed_data()
header = self.header()
real_val = np.bitwise_and(packed_data, 0xf0).astype(np.int8) # coef's are: RRRRIIII (4 bits real,
imag_val = np.left_shift(np.bitwise_and(packed_data, 0x0f), 4).astype(np.int8) # 4 bits imaginary in 2's complement)
cdata = np.empty(len(real_val), np.complex64)
#"Normalize" by making appropriate bit-shift. Otherwise, values for real and imaginary coefficients are
#inflated by 16x.
cdata.real = np.right_shift(real_val, 4)
cdata.imag = np.right_shift(imag_val, 4)
# expose compamp measurement blocks
cdata = cdata.reshape((header['number_of_half_frames'], header['number_of_subbands'], constants.bins_per_half_frame))
return cdata
|
ibm-watson-data-lab/ibmseti | ibmseti/compamp.py | Compamp.headers | python | def headers(self):
'''
This returns all headers in the data file. There should be one for each
half_frame in the file (typically 129).
'''
first_header = self.header()
single_compamp_data = np.frombuffer(self.data, dtype=np.int8)\
.reshape((first_header['number_of_half_frames'], first_header['half_frame_bytes']))
return [self._read_half_frame_header(row) for row in single_compamp_data] | This returns all headers in the data file. There should be one for each
half_frame in the file (typically 129). | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/compamp.py#L81-L91 | [
"def header(self):\n '''\n This returns the first header in the data file\n\n '''\n if self._header is None:\n self._header = self._read_half_frame_header(self.data)\n\n return self._header\n"
] | class Compamp(object):
'''
Class to read and unpack data from SETI archive-compamp and compamp files.
'''
def __init__(self, data):
'''
data is the raw data read from a SETI compamp or archive-compamp file.
Usually this is done like
aca = ibmseti.compamp.Compamp(open('path/to/file.archive-compamp','rb').read())
This object does not automatically read and unpack the data upon instantiation. Instead, it does
so only when called upon. This allows the object to be instantiated on a Spark driver,
and the work to read/unpack the data may be performed on an executor.
It also does not cache the unpacked data in order to reduce the memory footprint.
'''
self.data = data
self._header = None
def _read_half_frame_header(self, data):
rf_center_frequency, half_frame_number, activity_id, subband_spacing_hz, start_subband_id, \
number_of_subbands, over_sampling, polarization = struct.unpack('>diidiifi', data[:constants.header_offset])
#rf_center_frequency is the center frequency of the first subband in the file. If this is a compamp file,
#there is only one subband. If this is an archive-compamp, there are typically 16 subbands.
#This information must be used to properly calculate the
#todo -- add ability for somebody to use N*__bins_per_half_frame bins instead. Will
#need to allow for N to be passed into this function, or set in the Contsants object
half_frame_bytes = number_of_subbands * constants.bins_per_half_frame + constants.header_offset
number_of_half_frames = int(len(data) / half_frame_bytes)
return {'rf_center_frequency':rf_center_frequency,
'half_frame_number':half_frame_number,
'activity_id':activity_id,
'subband_spacing_hz':subband_spacing_hz,
'start_subband_id':start_subband_id,
'number_of_subbands':number_of_subbands,
'over_sampling':over_sampling,
'polarization':polarization,
'half_frame_bytes':half_frame_bytes,
'number_of_half_frames':number_of_half_frames}
def header(self):
'''
This returns the first header in the data file
'''
if self._header is None:
self._header = self._read_half_frame_header(self.data)
return self._header
def _packed_data(self):
'''
Returns the bit-packed data extracted from the data file. This is not so useful to analyze.
Use the complex_data method instead.
'''
header = self.header()
packed_data = np.frombuffer(self.data, dtype=np.int8)\
.reshape((header['number_of_half_frames'], header['half_frame_bytes'])) # create array of half frames
packed_data = packed_data[::-1, constants.header_offset:] # slice out header and flip half frame order to reverse time ordering
packed_data = packed_data.reshape((header['number_of_half_frames']*(header['half_frame_bytes']- constants.header_offset))) # compact into vector
return packed_data
def complex_data(self):
'''
This will cast each byte to an int8 and interpret each byte
as 4 bits real values and 4 bits imag values (RRRRIIII). The data are then
used to create a 3D numpy array of dtype=complex, which is returned.
The shape of the numpy array is N half frames, M subbands, K data points per half frame,
where K = constants.bins_per_half_frame, N is typically 129 and M is typically 1 for
compamp files and 16 for archive-compamp files.
Note that this returns a Numpy array of type complex64. This data is not retained within Compamp objects.
'''
#note that since we can only pack into int8 types, we must pad each 4-bit value with 4, 0 bits
#this effectively multiplies each 4-bit value by 16 when that value is represented as an 8-bit signed integer.
packed_data = self._packed_data()
header = self.header()
real_val = np.bitwise_and(packed_data, 0xf0).astype(np.int8) # coef's are: RRRRIIII (4 bits real,
imag_val = np.left_shift(np.bitwise_and(packed_data, 0x0f), 4).astype(np.int8) # 4 bits imaginary in 2's complement)
cdata = np.empty(len(real_val), np.complex64)
#"Normalize" by making appropriate bit-shift. Otherwise, values for real and imaginary coefficients are
#inflated by 16x.
cdata.real = np.right_shift(real_val, 4)
cdata.imag = np.right_shift(imag_val, 4)
# expose compamp measurement blocks
cdata = cdata.reshape((header['number_of_half_frames'], header['number_of_subbands'], constants.bins_per_half_frame))
return cdata
|
ibm-watson-data-lab/ibmseti | ibmseti/compamp.py | Compamp._packed_data | python | def _packed_data(self):
'''
Returns the bit-packed data extracted from the data file. This is not so useful to analyze.
Use the complex_data method instead.
'''
header = self.header()
packed_data = np.frombuffer(self.data, dtype=np.int8)\
.reshape((header['number_of_half_frames'], header['half_frame_bytes'])) # create array of half frames
packed_data = packed_data[::-1, constants.header_offset:] # slice out header and flip half frame order to reverse time ordering
packed_data = packed_data.reshape((header['number_of_half_frames']*(header['half_frame_bytes']- constants.header_offset))) # compact into vector
return packed_data | Returns the bit-packed data extracted from the data file. This is not so useful to analyze.
Use the complex_data method instead. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/compamp.py#L93-L105 | [
"def header(self):\n '''\n This returns the first header in the data file\n\n '''\n if self._header is None:\n self._header = self._read_half_frame_header(self.data)\n\n return self._header\n"
] | class Compamp(object):
'''
Class to read and unpack data from SETI archive-compamp and compamp files.
'''
def __init__(self, data):
'''
data is the raw data read from a SETI compamp or archive-compamp file.
Usually this is done like
aca = ibmseti.compamp.Compamp(open('path/to/file.archive-compamp','rb').read())
This object does not automatically read and unpack the data upon instantiation. Instead, it does
so only when called upon. This allows the object to be instantiated on a Spark driver,
and the work to read/unpack the data may be performed on an executor.
It also does not cache the unpacked data in order to reduce the memory footprint.
'''
self.data = data
self._header = None
def _read_half_frame_header(self, data):
rf_center_frequency, half_frame_number, activity_id, subband_spacing_hz, start_subband_id, \
number_of_subbands, over_sampling, polarization = struct.unpack('>diidiifi', data[:constants.header_offset])
#rf_center_frequency is the center frequency of the first subband in the file. If this is a compamp file,
#there is only one subband. If this is an archive-compamp, there are typically 16 subbands.
#This information must be used to properly calculate the
#todo -- add ability for somebody to use N*__bins_per_half_frame bins instead. Will
#need to allow for N to be passed into this function, or set in the Contsants object
half_frame_bytes = number_of_subbands * constants.bins_per_half_frame + constants.header_offset
number_of_half_frames = int(len(data) / half_frame_bytes)
return {'rf_center_frequency':rf_center_frequency,
'half_frame_number':half_frame_number,
'activity_id':activity_id,
'subband_spacing_hz':subband_spacing_hz,
'start_subband_id':start_subband_id,
'number_of_subbands':number_of_subbands,
'over_sampling':over_sampling,
'polarization':polarization,
'half_frame_bytes':half_frame_bytes,
'number_of_half_frames':number_of_half_frames}
def header(self):
'''
This returns the first header in the data file
'''
if self._header is None:
self._header = self._read_half_frame_header(self.data)
return self._header
def headers(self):
'''
This returns all headers in the data file. There should be one for each
half_frame in the file (typically 129).
'''
first_header = self.header()
single_compamp_data = np.frombuffer(self.data, dtype=np.int8)\
.reshape((first_header['number_of_half_frames'], first_header['half_frame_bytes']))
return [self._read_half_frame_header(row) for row in single_compamp_data]
def complex_data(self):
'''
This will cast each byte to an int8 and interpret each byte
as 4 bits real values and 4 bits imag values (RRRRIIII). The data are then
used to create a 3D numpy array of dtype=complex, which is returned.
The shape of the numpy array is N half frames, M subbands, K data points per half frame,
where K = constants.bins_per_half_frame, N is typically 129 and M is typically 1 for
compamp files and 16 for archive-compamp files.
Note that this returns a Numpy array of type complex64. This data is not retained within Compamp objects.
'''
#note that since we can only pack into int8 types, we must pad each 4-bit value with 4, 0 bits
#this effectively multiplies each 4-bit value by 16 when that value is represented as an 8-bit signed integer.
packed_data = self._packed_data()
header = self.header()
real_val = np.bitwise_and(packed_data, 0xf0).astype(np.int8) # coef's are: RRRRIIII (4 bits real,
imag_val = np.left_shift(np.bitwise_and(packed_data, 0x0f), 4).astype(np.int8) # 4 bits imaginary in 2's complement)
cdata = np.empty(len(real_val), np.complex64)
#"Normalize" by making appropriate bit-shift. Otherwise, values for real and imaginary coefficients are
#inflated by 16x.
cdata.real = np.right_shift(real_val, 4)
cdata.imag = np.right_shift(imag_val, 4)
# expose compamp measurement blocks
cdata = cdata.reshape((header['number_of_half_frames'], header['number_of_subbands'], constants.bins_per_half_frame))
return cdata
|
ibm-watson-data-lab/ibmseti | ibmseti/compamp.py | Compamp.complex_data | python | def complex_data(self):
'''
This will cast each byte to an int8 and interpret each byte
as 4 bits real values and 4 bits imag values (RRRRIIII). The data are then
used to create a 3D numpy array of dtype=complex, which is returned.
The shape of the numpy array is N half frames, M subbands, K data points per half frame,
where K = constants.bins_per_half_frame, N is typically 129 and M is typically 1 for
compamp files and 16 for archive-compamp files.
Note that this returns a Numpy array of type complex64. This data is not retained within Compamp objects.
'''
#note that since we can only pack into int8 types, we must pad each 4-bit value with 4, 0 bits
#this effectively multiplies each 4-bit value by 16 when that value is represented as an 8-bit signed integer.
packed_data = self._packed_data()
header = self.header()
real_val = np.bitwise_and(packed_data, 0xf0).astype(np.int8) # coef's are: RRRRIIII (4 bits real,
imag_val = np.left_shift(np.bitwise_and(packed_data, 0x0f), 4).astype(np.int8) # 4 bits imaginary in 2's complement)
cdata = np.empty(len(real_val), np.complex64)
#"Normalize" by making appropriate bit-shift. Otherwise, values for real and imaginary coefficients are
#inflated by 16x.
cdata.real = np.right_shift(real_val, 4)
cdata.imag = np.right_shift(imag_val, 4)
# expose compamp measurement blocks
cdata = cdata.reshape((header['number_of_half_frames'], header['number_of_subbands'], constants.bins_per_half_frame))
return cdata | This will cast each byte to an int8 and interpret each byte
as 4 bits real values and 4 bits imag values (RRRRIIII). The data are then
used to create a 3D numpy array of dtype=complex, which is returned.
The shape of the numpy array is N half frames, M subbands, K data points per half frame,
where K = constants.bins_per_half_frame, N is typically 129 and M is typically 1 for
compamp files and 16 for archive-compamp files.
Note that this returns a Numpy array of type complex64. This data is not retained within Compamp objects. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/compamp.py#L107-L138 | [
"def header(self):\n '''\n This returns the first header in the data file\n\n '''\n if self._header is None:\n self._header = self._read_half_frame_header(self.data)\n\n return self._header\n",
"def _packed_data(self):\n '''\n Returns the bit-packed data extracted from the data file. This is not so usef... | class Compamp(object):
'''
Class to read and unpack data from SETI archive-compamp and compamp files.
'''
def __init__(self, data):
'''
data is the raw data read from a SETI compamp or archive-compamp file.
Usually this is done like
aca = ibmseti.compamp.Compamp(open('path/to/file.archive-compamp','rb').read())
This object does not automatically read and unpack the data upon instantiation. Instead, it does
so only when called upon. This allows the object to be instantiated on a Spark driver,
and the work to read/unpack the data may be performed on an executor.
It also does not cache the unpacked data in order to reduce the memory footprint.
'''
self.data = data
self._header = None
def _read_half_frame_header(self, data):
rf_center_frequency, half_frame_number, activity_id, subband_spacing_hz, start_subband_id, \
number_of_subbands, over_sampling, polarization = struct.unpack('>diidiifi', data[:constants.header_offset])
#rf_center_frequency is the center frequency of the first subband in the file. If this is a compamp file,
#there is only one subband. If this is an archive-compamp, there are typically 16 subbands.
#This information must be used to properly calculate the
#todo -- add ability for somebody to use N*__bins_per_half_frame bins instead. Will
#need to allow for N to be passed into this function, or set in the Contsants object
half_frame_bytes = number_of_subbands * constants.bins_per_half_frame + constants.header_offset
number_of_half_frames = int(len(data) / half_frame_bytes)
return {'rf_center_frequency':rf_center_frequency,
'half_frame_number':half_frame_number,
'activity_id':activity_id,
'subband_spacing_hz':subband_spacing_hz,
'start_subband_id':start_subband_id,
'number_of_subbands':number_of_subbands,
'over_sampling':over_sampling,
'polarization':polarization,
'half_frame_bytes':half_frame_bytes,
'number_of_half_frames':number_of_half_frames}
def header(self):
'''
This returns the first header in the data file
'''
if self._header is None:
self._header = self._read_half_frame_header(self.data)
return self._header
def headers(self):
'''
This returns all headers in the data file. There should be one for each
half_frame in the file (typically 129).
'''
first_header = self.header()
single_compamp_data = np.frombuffer(self.data, dtype=np.int8)\
.reshape((first_header['number_of_half_frames'], first_header['half_frame_bytes']))
return [self._read_half_frame_header(row) for row in single_compamp_data]
def _packed_data(self):
'''
Returns the bit-packed data extracted from the data file. This is not so useful to analyze.
Use the complex_data method instead.
'''
header = self.header()
packed_data = np.frombuffer(self.data, dtype=np.int8)\
.reshape((header['number_of_half_frames'], header['half_frame_bytes'])) # create array of half frames
packed_data = packed_data[::-1, constants.header_offset:] # slice out header and flip half frame order to reverse time ordering
packed_data = packed_data.reshape((header['number_of_half_frames']*(header['half_frame_bytes']- constants.header_offset))) # compact into vector
return packed_data
|
ibm-watson-data-lab/ibmseti | ibmseti/compamp.py | SimCompamp.complex_data | python | def complex_data(self):
'''
This unpacks the data into a time-series data, of complex values.
Also, any DC offset from the time-series is removed.
This is a 1D complex-valued numpy array.
'''
cp = np.frombuffer(self.data, dtype='i1').astype(np.float32).view(np.complex64)
cp = cp - cp.mean()
return cp | This unpacks the data into a time-series data, of complex values.
Also, any DC offset from the time-series is removed.
This is a 1D complex-valued numpy array. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/compamp.py#L248-L258 | null | class SimCompamp(object):
'''
Class to unpack data simulated SETI archive-compamp and compamp files.
Also has functions to compute the DFT and spectrograms.
'''
def __init__(self, data, shape=(int(32*12),int(6144/12))):
'''
data is the raw data read from a simulated SETI compamp or archive-compamp file.
Usually this is done like
aca = ibmseti.compamp.SimCompamp(open('path/to/file.archive-compamp','rb').read())
This object does not automatically read and unpack the data upon instantiation. Instead, it does
so only when called upon. This allows the object to be instantiated on a Spark driver,
and the work to read/unpack the data may be performed on an executor.
It also does not cache the unpacked data in order to reduce the memory footprint.
Standard usage:
raw_data = open('data_file.dat', r).read()
#or
r = requests.get('https://url/to/data_file.dat')
raw_data = r.content
import ibmseti
aca = ibmseti.SimCompamp(raw_data)
spectrogram = aca.get_spectrogram()
The shape can be changed with
aca.shape = (32,6144)
'''
header, self.data = data.split(b'\n',1)
private_header = None
if six.PY3:
header = header.decode('utf-8')
header = json.loads(header)
if header.get('simulator_software_version',0) > 0:
#this is the private header and we need to remove one more line
#to get the public header
private_header = header
header, self.data = self.data.split(b'\n',1)
if six.PY3:
header = header.decode('utf-8')
header = json.loads(header)
self._header = header
self.shape = shape
self._private_header = private_header
self.sigProc(None)
def header(self):
'''
Returns the header
NB: added to match the header() call from the Compamp object.
'''
return self._header
def private_header(self):
'''
Returns the private header
'''
return self._private_header
def sigProc(self, function=None):
'''
Set a function to peform signal processing before converting the time-series data into
a spectrogram.
Your function should expect a single input, a 2D complex-valued time-series numpy array. It will
have the shape you set with the self.shape attribute. Your function should return a
2D numpy array. Note: the returned array can be any shape and size.
If the function is None, there will be no effect on the time-series data.
For example:
import numpy as np
aca = ibmseti.compamp.SimCompamp(data)
def mySigProc(compdata):
return compdata * np.hanning(compdata.shape[1])
aca.sigProc(mySigProc)
#the hanning window will be applied to the 2D complex time-series data
spectrogram = aca.get_spectrogram()
'''
self._sigProc = function if function else lambda x : x
def _reshape(self, complex_data):
'''
Reshapes the input complex_data in a 2D array of size, shape. Standard is 384 x 512 for simulation files.
This is not the same size as standard SETI archive-compamp files, which are typically 129 x 6144. One can
also shape the data as 32 x 6144 in order to create spectrogram in a shape that is more similar to the
typical shape of spectrogram. Reshaping changes the time-resolution and frequency-resolution of the resulting
spectrogram. The optimal shape for signal class recognition may vary for each class.
However, you can play around with shape size as much as you want.
'''
return complex_data.reshape(*self.shape)
def _spec_fft(self, complex_data):
'''
Calculates the DFT of the complex_data along axis = 1. This assumes complex_data is a 2D array.
This uses numpy and the code is straight forward
np.fft.fftshift( np.fft.fft(complex_data), 1)
Note that we automatically shift the FFT frequency bins so that along the frequency axis,
"negative" frequencies are first, then the central frequency, followed by "positive" frequencies.
'''
return np.fft.fftshift( np.fft.fft(complex_data), 1)
def _spec_power(self, complex_data_fft):
'''
Computes the |v|^2 of input. Assuming a 2D array in the frequency domain (output of spec_fft), this
produces a spectrogram
'''
return np.abs(complex_data_fft)**2
def get_spectrogram(self):
'''
Transforms the input simulated data and computes a standard-sized spectrogram.
If self.sigProc function is not None, the 2D complex-valued time-series data will
be processed with that function before the FFT and spectrogram are calculated.
'''
return self._spec_power(self._spec_fft( self._sigProc( self._reshape( self.complex_data() )) ))
|
ibm-watson-data-lab/ibmseti | ibmseti/compamp.py | SimCompamp._spec_fft | python | def _spec_fft(self, complex_data):
'''
Calculates the DFT of the complex_data along axis = 1. This assumes complex_data is a 2D array.
This uses numpy and the code is straight forward
np.fft.fftshift( np.fft.fft(complex_data), 1)
Note that we automatically shift the FFT frequency bins so that along the frequency axis,
"negative" frequencies are first, then the central frequency, followed by "positive" frequencies.
'''
return np.fft.fftshift( np.fft.fft(complex_data), 1) | Calculates the DFT of the complex_data along axis = 1. This assumes complex_data is a 2D array.
This uses numpy and the code is straight forward
np.fft.fftshift( np.fft.fft(complex_data), 1)
Note that we automatically shift the FFT frequency bins so that along the frequency axis,
"negative" frequencies are first, then the central frequency, followed by "positive" frequencies. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/compamp.py#L272-L282 | null | class SimCompamp(object):
'''
Class to unpack data simulated SETI archive-compamp and compamp files.
Also has functions to compute the DFT and spectrograms.
'''
def __init__(self, data, shape=(int(32*12),int(6144/12))):
'''
data is the raw data read from a simulated SETI compamp or archive-compamp file.
Usually this is done like
aca = ibmseti.compamp.SimCompamp(open('path/to/file.archive-compamp','rb').read())
This object does not automatically read and unpack the data upon instantiation. Instead, it does
so only when called upon. This allows the object to be instantiated on a Spark driver,
and the work to read/unpack the data may be performed on an executor.
It also does not cache the unpacked data in order to reduce the memory footprint.
Standard usage:
raw_data = open('data_file.dat', r).read()
#or
r = requests.get('https://url/to/data_file.dat')
raw_data = r.content
import ibmseti
aca = ibmseti.SimCompamp(raw_data)
spectrogram = aca.get_spectrogram()
The shape can be changed with
aca.shape = (32,6144)
'''
header, self.data = data.split(b'\n',1)
private_header = None
if six.PY3:
header = header.decode('utf-8')
header = json.loads(header)
if header.get('simulator_software_version',0) > 0:
#this is the private header and we need to remove one more line
#to get the public header
private_header = header
header, self.data = self.data.split(b'\n',1)
if six.PY3:
header = header.decode('utf-8')
header = json.loads(header)
self._header = header
self.shape = shape
self._private_header = private_header
self.sigProc(None)
def header(self):
'''
Returns the header
NB: added to match the header() call from the Compamp object.
'''
return self._header
def private_header(self):
'''
Returns the private header
'''
return self._private_header
def sigProc(self, function=None):
'''
Set a function to peform signal processing before converting the time-series data into
a spectrogram.
Your function should expect a single input, a 2D complex-valued time-series numpy array. It will
have the shape you set with the self.shape attribute. Your function should return a
2D numpy array. Note: the returned array can be any shape and size.
If the function is None, there will be no effect on the time-series data.
For example:
import numpy as np
aca = ibmseti.compamp.SimCompamp(data)
def mySigProc(compdata):
return compdata * np.hanning(compdata.shape[1])
aca.sigProc(mySigProc)
#the hanning window will be applied to the 2D complex time-series data
spectrogram = aca.get_spectrogram()
'''
self._sigProc = function if function else lambda x : x
def complex_data(self):
'''
This unpacks the data into a time-series data, of complex values.
Also, any DC offset from the time-series is removed.
This is a 1D complex-valued numpy array.
'''
cp = np.frombuffer(self.data, dtype='i1').astype(np.float32).view(np.complex64)
cp = cp - cp.mean()
return cp
def _reshape(self, complex_data):
'''
Reshapes the input complex_data in a 2D array of size, shape. Standard is 384 x 512 for simulation files.
This is not the same size as standard SETI archive-compamp files, which are typically 129 x 6144. One can
also shape the data as 32 x 6144 in order to create spectrogram in a shape that is more similar to the
typical shape of spectrogram. Reshaping changes the time-resolution and frequency-resolution of the resulting
spectrogram. The optimal shape for signal class recognition may vary for each class.
However, you can play around with shape size as much as you want.
'''
return complex_data.reshape(*self.shape)
def _spec_power(self, complex_data_fft):
'''
Computes the |v|^2 of input. Assuming a 2D array in the frequency domain (output of spec_fft), this
produces a spectrogram
'''
return np.abs(complex_data_fft)**2
def get_spectrogram(self):
'''
Transforms the input simulated data and computes a standard-sized spectrogram.
If self.sigProc function is not None, the 2D complex-valued time-series data will
be processed with that function before the FFT and spectrogram are calculated.
'''
return self._spec_power(self._spec_fft( self._sigProc( self._reshape( self.complex_data() )) ))
|
ibm-watson-data-lab/ibmseti | ibmseti/compamp.py | SimCompamp.get_spectrogram | python | def get_spectrogram(self):
'''
Transforms the input simulated data and computes a standard-sized spectrogram.
If self.sigProc function is not None, the 2D complex-valued time-series data will
be processed with that function before the FFT and spectrogram are calculated.
'''
return self._spec_power(self._spec_fft( self._sigProc( self._reshape( self.complex_data() )) )) | Transforms the input simulated data and computes a standard-sized spectrogram.
If self.sigProc function is not None, the 2D complex-valued time-series data will
be processed with that function before the FFT and spectrogram are calculated. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/compamp.py#L291-L299 | [
"def complex_data(self):\n '''\n This unpacks the data into a time-series data, of complex values.\n\n Also, any DC offset from the time-series is removed.\n\n This is a 1D complex-valued numpy array.\n '''\n cp = np.frombuffer(self.data, dtype='i1').astype(np.float32).view(np.complex64)\n cp = cp - cp.mean(... | class SimCompamp(object):
'''
Class to unpack data simulated SETI archive-compamp and compamp files.
Also has functions to compute the DFT and spectrograms.
'''
def __init__(self, data, shape=(int(32*12),int(6144/12))):
'''
data is the raw data read from a simulated SETI compamp or archive-compamp file.
Usually this is done like
aca = ibmseti.compamp.SimCompamp(open('path/to/file.archive-compamp','rb').read())
This object does not automatically read and unpack the data upon instantiation. Instead, it does
so only when called upon. This allows the object to be instantiated on a Spark driver,
and the work to read/unpack the data may be performed on an executor.
It also does not cache the unpacked data in order to reduce the memory footprint.
Standard usage:
raw_data = open('data_file.dat', r).read()
#or
r = requests.get('https://url/to/data_file.dat')
raw_data = r.content
import ibmseti
aca = ibmseti.SimCompamp(raw_data)
spectrogram = aca.get_spectrogram()
The shape can be changed with
aca.shape = (32,6144)
'''
header, self.data = data.split(b'\n',1)
private_header = None
if six.PY3:
header = header.decode('utf-8')
header = json.loads(header)
if header.get('simulator_software_version',0) > 0:
#this is the private header and we need to remove one more line
#to get the public header
private_header = header
header, self.data = self.data.split(b'\n',1)
if six.PY3:
header = header.decode('utf-8')
header = json.loads(header)
self._header = header
self.shape = shape
self._private_header = private_header
self.sigProc(None)
def header(self):
'''
Returns the header
NB: added to match the header() call from the Compamp object.
'''
return self._header
def private_header(self):
'''
Returns the private header
'''
return self._private_header
def sigProc(self, function=None):
'''
Set a function to peform signal processing before converting the time-series data into
a spectrogram.
Your function should expect a single input, a 2D complex-valued time-series numpy array. It will
have the shape you set with the self.shape attribute. Your function should return a
2D numpy array. Note: the returned array can be any shape and size.
If the function is None, there will be no effect on the time-series data.
For example:
import numpy as np
aca = ibmseti.compamp.SimCompamp(data)
def mySigProc(compdata):
return compdata * np.hanning(compdata.shape[1])
aca.sigProc(mySigProc)
#the hanning window will be applied to the 2D complex time-series data
spectrogram = aca.get_spectrogram()
'''
self._sigProc = function if function else lambda x : x
def complex_data(self):
'''
This unpacks the data into a time-series data, of complex values.
Also, any DC offset from the time-series is removed.
This is a 1D complex-valued numpy array.
'''
cp = np.frombuffer(self.data, dtype='i1').astype(np.float32).view(np.complex64)
cp = cp - cp.mean()
return cp
def _reshape(self, complex_data):
'''
Reshapes the input complex_data in a 2D array of size, shape. Standard is 384 x 512 for simulation files.
This is not the same size as standard SETI archive-compamp files, which are typically 129 x 6144. One can
also shape the data as 32 x 6144 in order to create spectrogram in a shape that is more similar to the
typical shape of spectrogram. Reshaping changes the time-resolution and frequency-resolution of the resulting
spectrogram. The optimal shape for signal class recognition may vary for each class.
However, you can play around with shape size as much as you want.
'''
return complex_data.reshape(*self.shape)
def _spec_fft(self, complex_data):
'''
Calculates the DFT of the complex_data along axis = 1. This assumes complex_data is a 2D array.
This uses numpy and the code is straight forward
np.fft.fftshift( np.fft.fft(complex_data), 1)
Note that we automatically shift the FFT frequency bins so that along the frequency axis,
"negative" frequencies are first, then the central frequency, followed by "positive" frequencies.
'''
return np.fft.fftshift( np.fft.fft(complex_data), 1)
def _spec_power(self, complex_data_fft):
'''
Computes the |v|^2 of input. Assuming a 2D array in the frequency domain (output of spec_fft), this
produces a spectrogram
'''
return np.abs(complex_data_fft)**2
|
ibm-watson-data-lab/ibmseti | ibmseti/features.py | difference | python | def difference(arr, n=1, axis=0, **kwargs):
'''
Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), this function
uses the Numpy.diff function to calculate the nth
difference along either time or frequency.
If axis = 0 and n=1, then the first difference is taken
between subsequent time samples
If axis = 1 and n=1, then the first difference is taken
between frequency bins.
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.first_difference(x, axis=1)
>>> array([[2, 3, 4],
[5, 1, 2],
[4, 3, 3]])
ibmseti.features.first_difference(x, axis=0)
>>> array([[-1, 2, 0, -2],
[ 2, 1, 3, 4]])
'''
return np.diff(arr, n=n, axis=axis, **kwargs) | Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), this function
uses the Numpy.diff function to calculate the nth
difference along either time or frequency.
If axis = 0 and n=1, then the first difference is taken
between subsequent time samples
If axis = 1 and n=1, then the first difference is taken
between frequency bins.
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.first_difference(x, axis=1)
>>> array([[2, 3, 4],
[5, 1, 2],
[4, 3, 3]])
ibmseti.features.first_difference(x, axis=0)
>>> array([[-1, 2, 0, -2],
[ 2, 1, 3, 4]]) | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/features.py#L29-L60 | null | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This module contains some of the standard features that are extracted from
spectrograms and auto-correlation calculations from the raw SETI data.
Some functions are merely wrappers around Numpy-based operations, but
contain documentation that explicitly show how they are used with SETI data.
'''
import numpy as np
import scipy.stats
import math
def projection(arr, axis=0, **kwargs):
'''
Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function uses the numpy.sum function to project the
data onto the time or frequency axis into a 1D array.
If axis = 0, then the projection is onto the frequency axis
(the sum is along the time axis)
If axis = 1, then the projection is onto the time axis.
(the sum is along the frequency axis)
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.projection(x, axis=1)
>>> array([20, 19, 29])
ibmseti.features.projection(x, axis=0)
>>> array([ 3, 14, 21, 30])
One interesting kwarg that you may wish to use is `keepdims`.
See the documentation on numpy.sum for more information.
'''
return np.sum(arr, axis=axis, **kwargs)
def moment(arr, moment=1, axis=0, **kwargs):
'''
Uses the scipy.stats.moment to calculate the Nth central
moment about the mean.
If `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function, then the Nth moment along each axis
will be computed.
If axis = 0, then Nth moment for the data in each
frequency bin will be computed. (The calculation is done
*along* the 0th axis, which is the time axis.)
If axis = 1, then Nth moment for the data in each
time bin will be computed. (The calculation is done
*along* the 1st axis, which is the frequency axis.)
For example, consider the 2nd moment:
//each column is a frequency bin
x = array([[ 1., 3., 6., 10.], //each row is a time sample
[ 0., 5., 6., 8.],
[ 2., 6., 9., 12.]])
ibmseti.features.mement(x, moment=2, axis=0) //the returned array is of size 4, the number of columns / frequency bins.
>>> array([ 0.66666667, 1.55555556, 2., 2.66666667])
ibmseti.features.mement(x, moment=2, axis=1) //the returned array is of size 3, the number of rows / time bins.
>>> array([ 11.5 , 8.6875, 13.6875])
If `arr` is a 1D array, such as what you'd get if you projected
the spectrogram onto the time or frequency axis, then you must
use axis=0.
'''
return scipy.stats.moment(arr, moment=moment, axis=axis, **kwargs)
def first_order_gradient(arr, axis=0):
'''
Returns the gradient of arr along a particular axis using
the first order forward-difference.
Additionally, the result is padded with 0 so that the
returned array is the same shape as in input array.
'''
grad_arr = difference(arr, n=1, axis=axis)
return np.insert(grad_arr, grad_arr.shape[axis], 0, axis=axis)
def total_variation(arr):
'''
If arr is a 2D array (N X M), assumes that arr is a spectrogram with time along axis=0.
Calculates the 1D total variation in time for each frequency and returns an array
of size M.
If arr is a 1D array, calculates total variation and returns a scalar.
Sum ( Abs(arr_i+1,j - arr_ij) )
If arr is a 2D array, it's common to take the mean of the resulting M-sized array
to calculate a scalar feature.
'''
return np.sum(np.abs(np.diff(arr, axis=0)), axis=0)
def maximum_variation(arr):
'''
return np.max(arr, axis=0) - np.min(arr, axis=0)
If `arr` is a 1D array, a scalar is returned.
If `arr` is a 2D array (N x M), an array of length M is returned.
'''
return np.max(arr, axis=0) - np.min(arr, axis=0)
def tv_2d_isotropic(grad_0_arr, grad_1_arr):
'''
Calculates the Total Variation
Assumes a 2D array.
grad_0_arr is the gradient along the 0th axis of arr.
grad_1_arr is the gradient along the 1st axis of arr.
You can use the 1st order forward-difference measure
of the gradient (the standard calculation). Or you
can use the second_order central gradient.
'''
return np.sqrt(grad_0_arr**2 + grad_1_arr**2).sum()
def entropy(p, w):
'''
Computes the entropy for a discrete probability distribution function, as
represented by a histogram, `p`, with bin sizes `w`,
h_p = Sum -1 * p_i * ln(p_i / w_i)
Also computes the maximum allowed entropy for a histogram with bin sizes `w`.
h_max = ln( Sum w_i )
and returns both as a tuple (h_p , h_max ). The entropy is in 'natural' units.
Both `p` and `w` must be Numpy arrays.
If `p` is normalized to 1 ( Sum p_i * w_i = 1), then
the normalized entropy is equal toh_p / h_max and will
be in the range [0, 1].
For example, if `p` is a completely flat PDF (a uniform distribution), then
the normalized entropy will equal 1, indicating maximum amount of disorder.
(This is easily shown for the case where w_i = 1.)
If the `p_i` is zero for all i except j and p_j = 1, then the entropy will be 0,
indicating no disorder.
One can use this entropy measurement to search for signals in the spectrogram.
First we need to build a histogram of the measured power values in the spectrogram.
This histogram represents an estimate of the probability distribution function of the
observed power in the spectrogram.
If the spectrogram is entirely noise, the resulting histogram should be quite flat and
the normalized entropy ( h_p / h_max ) will approach 1. If there is a significant signal
in the spectrogram, then the histogram will not be flat and the normalized entropy will
be less than 1.
The decision that needs to be made is the number of bins and the bin size. And unfortunately,
the resulting entropy calculated will depend on the binning.
Based on testing and interpretibility, we recommend to use a fixed number of bins that either
span the full range of the power values in the spectrogram (0 to spectrogram.max()),
or span a fixed range (for example, from 0 to 500).
For example, you may set the range equal to the range of the values in the spectrogram.
bin_edges = range(0,int(spectrogram.max()) + 2) #add 1 to round up, and one to set the right bin edge.
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
If you choose to fix the range of the histogram, it is highly recommended that you use
`numpy.clip` to ensure that any of the values in the spectrogram that are greater than
your largest bin are not thrown away!
For example, if you decide on a fixed range between 0 and 500, and your spectrogram
contains a value of 777, the following code would produce a histogram where that 777 value
is not present in the count.
bin_edges = range(0,501)
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
But if you clip the spectrogram, you can interpret the last bin as being "the number
of spectrogram values equal to or greater than the lower bin edge".
bin_edges = range(0,501)
p, _ = np.histogram(np.clip(spectrogram.flatten(), 0, 500), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
You can also choose to fix the number of bins
bins = 50
p, bin_edges = np.histogram(spectrogram.flatten(), bins=bins, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
It is suggested to use any of the following measures as features:
bin range, spectrogram.min, spectrogram.max, number_of_bins, log(number_of_bins)
entropy, max_entropy, normalized_entropy.
Automatic Binning:
While Numpy and AstroML offer ways of automatically binning the data, it is unclear if this
is a good approach for entropy calculation -- especially when wishing to compare the value
across different spectrogram. The automatic binning tends to remove disorder in
the set of values, making the histogram smoother and more ordered than the data actually are.
This is true of automatic binning with fixed sizes (such as with the 'rice', and 'fd' options in
numpy.histogram), or with the variable sized arrays as can be calculated with Bayesian Blocks
with astroML. However, nothing is ruled out. In preliminary testing,
the calculated entropy from a histogram calculated with Bayesian Block binning seemed to be more
sensitive to a simulated signal than using fixed binning. However, it's unclear how to
interpret the results because "h_p/h_max" *increased* with the presence of a signal and exceeded 1.
**It is likely that the calculation of h_max is done incorrectly. Please check my work!**
It may even be that the total number of bins created by the Bayesian Block method would
be a suitable feature. For a completely flat distribution, there will only be one bin. If the
data contains significant variation in power levels, the Bayesian Block method will produce more
bins. More testing is required and your mileage may vary.
import astroML.plotting
bin_edges = astroML.density_estimation.bayesian_blocks(spectrogram.flatten())
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
Also to note: Using astroML.density_estimation.bayesian_blocks takes prohibitively long!
"Entropy" of raw data.
If `p` is NOT a PDF, then you're on your own to interpret the results. In this case, you
may set `w` = None and the calculation will assume w_i = 1 for all i.
For example,
h_p, _ = ibmseti.features.entropy(spectrogram.flatten(), None)
'''
if w is None:
w = np.ones(len(p))
h_p = np.sum([-x[0]*math.log(x[0]/x[1]) if x[0] else 0 for x in zip(p, w)])
h_max = math.log(np.sum(w))
return h_p, h_max
def asymmetry(spectrogram_L, spectrogram_R):
'''
returns (spectogram_L - spectrogram_R) / (spectogram_L + spectrogram_R)
The asymmetry measure should be used for both polarizations recorded for a particular observation.
On can then perform analysis such as integrating the returned spectrogram to determine if the
asymmetry of the signals are close to zero, indicating equal signal in both polarizations, or
close to +-1, indicating a strong asymmetry in the L or R polarization.
spectrogram_L and spectrogram_R should be Numpy arrays.
'''
return (spectrogram_L - spectrogram_R) / (spectrogram_L + spectrogram_R)
|
ibm-watson-data-lab/ibmseti | ibmseti/features.py | projection | python | def projection(arr, axis=0, **kwargs):
'''
Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function uses the numpy.sum function to project the
data onto the time or frequency axis into a 1D array.
If axis = 0, then the projection is onto the frequency axis
(the sum is along the time axis)
If axis = 1, then the projection is onto the time axis.
(the sum is along the frequency axis)
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.projection(x, axis=1)
>>> array([20, 19, 29])
ibmseti.features.projection(x, axis=0)
>>> array([ 3, 14, 21, 30])
One interesting kwarg that you may wish to use is `keepdims`.
See the documentation on numpy.sum for more information.
'''
return np.sum(arr, axis=axis, **kwargs) | Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function uses the numpy.sum function to project the
data onto the time or frequency axis into a 1D array.
If axis = 0, then the projection is onto the frequency axis
(the sum is along the time axis)
If axis = 1, then the projection is onto the time axis.
(the sum is along the frequency axis)
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.projection(x, axis=1)
>>> array([20, 19, 29])
ibmseti.features.projection(x, axis=0)
>>> array([ 3, 14, 21, 30])
One interesting kwarg that you may wish to use is `keepdims`.
See the documentation on numpy.sum for more information. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/features.py#L63-L95 | null | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This module contains some of the standard features that are extracted from
spectrograms and auto-correlation calculations from the raw SETI data.
Some functions are merely wrappers around Numpy-based operations, but
contain documentation that explicitly show how they are used with SETI data.
'''
import numpy as np
import scipy.stats
import math
def difference(arr, n=1, axis=0, **kwargs):
'''
Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), this function
uses the Numpy.diff function to calculate the nth
difference along either time or frequency.
If axis = 0 and n=1, then the first difference is taken
between subsequent time samples
If axis = 1 and n=1, then the first difference is taken
between frequency bins.
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.first_difference(x, axis=1)
>>> array([[2, 3, 4],
[5, 1, 2],
[4, 3, 3]])
ibmseti.features.first_difference(x, axis=0)
>>> array([[-1, 2, 0, -2],
[ 2, 1, 3, 4]])
'''
return np.diff(arr, n=n, axis=axis, **kwargs)
def moment(arr, moment=1, axis=0, **kwargs):
'''
Uses the scipy.stats.moment to calculate the Nth central
moment about the mean.
If `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function, then the Nth moment along each axis
will be computed.
If axis = 0, then Nth moment for the data in each
frequency bin will be computed. (The calculation is done
*along* the 0th axis, which is the time axis.)
If axis = 1, then Nth moment for the data in each
time bin will be computed. (The calculation is done
*along* the 1st axis, which is the frequency axis.)
For example, consider the 2nd moment:
//each column is a frequency bin
x = array([[ 1., 3., 6., 10.], //each row is a time sample
[ 0., 5., 6., 8.],
[ 2., 6., 9., 12.]])
ibmseti.features.mement(x, moment=2, axis=0) //the returned array is of size 4, the number of columns / frequency bins.
>>> array([ 0.66666667, 1.55555556, 2., 2.66666667])
ibmseti.features.mement(x, moment=2, axis=1) //the returned array is of size 3, the number of rows / time bins.
>>> array([ 11.5 , 8.6875, 13.6875])
If `arr` is a 1D array, such as what you'd get if you projected
the spectrogram onto the time or frequency axis, then you must
use axis=0.
'''
return scipy.stats.moment(arr, moment=moment, axis=axis, **kwargs)
def first_order_gradient(arr, axis=0):
'''
Returns the gradient of arr along a particular axis using
the first order forward-difference.
Additionally, the result is padded with 0 so that the
returned array is the same shape as in input array.
'''
grad_arr = difference(arr, n=1, axis=axis)
return np.insert(grad_arr, grad_arr.shape[axis], 0, axis=axis)
def total_variation(arr):
'''
If arr is a 2D array (N X M), assumes that arr is a spectrogram with time along axis=0.
Calculates the 1D total variation in time for each frequency and returns an array
of size M.
If arr is a 1D array, calculates total variation and returns a scalar.
Sum ( Abs(arr_i+1,j - arr_ij) )
If arr is a 2D array, it's common to take the mean of the resulting M-sized array
to calculate a scalar feature.
'''
return np.sum(np.abs(np.diff(arr, axis=0)), axis=0)
def maximum_variation(arr):
'''
return np.max(arr, axis=0) - np.min(arr, axis=0)
If `arr` is a 1D array, a scalar is returned.
If `arr` is a 2D array (N x M), an array of length M is returned.
'''
return np.max(arr, axis=0) - np.min(arr, axis=0)
def tv_2d_isotropic(grad_0_arr, grad_1_arr):
'''
Calculates the Total Variation
Assumes a 2D array.
grad_0_arr is the gradient along the 0th axis of arr.
grad_1_arr is the gradient along the 1st axis of arr.
You can use the 1st order forward-difference measure
of the gradient (the standard calculation). Or you
can use the second_order central gradient.
'''
return np.sqrt(grad_0_arr**2 + grad_1_arr**2).sum()
def entropy(p, w):
'''
Computes the entropy for a discrete probability distribution function, as
represented by a histogram, `p`, with bin sizes `w`,
h_p = Sum -1 * p_i * ln(p_i / w_i)
Also computes the maximum allowed entropy for a histogram with bin sizes `w`.
h_max = ln( Sum w_i )
and returns both as a tuple (h_p , h_max ). The entropy is in 'natural' units.
Both `p` and `w` must be Numpy arrays.
If `p` is normalized to 1 ( Sum p_i * w_i = 1), then
the normalized entropy is equal toh_p / h_max and will
be in the range [0, 1].
For example, if `p` is a completely flat PDF (a uniform distribution), then
the normalized entropy will equal 1, indicating maximum amount of disorder.
(This is easily shown for the case where w_i = 1.)
If the `p_i` is zero for all i except j and p_j = 1, then the entropy will be 0,
indicating no disorder.
One can use this entropy measurement to search for signals in the spectrogram.
First we need to build a histogram of the measured power values in the spectrogram.
This histogram represents an estimate of the probability distribution function of the
observed power in the spectrogram.
If the spectrogram is entirely noise, the resulting histogram should be quite flat and
the normalized entropy ( h_p / h_max ) will approach 1. If there is a significant signal
in the spectrogram, then the histogram will not be flat and the normalized entropy will
be less than 1.
The decision that needs to be made is the number of bins and the bin size. And unfortunately,
the resulting entropy calculated will depend on the binning.
Based on testing and interpretibility, we recommend to use a fixed number of bins that either
span the full range of the power values in the spectrogram (0 to spectrogram.max()),
or span a fixed range (for example, from 0 to 500).
For example, you may set the range equal to the range of the values in the spectrogram.
bin_edges = range(0,int(spectrogram.max()) + 2) #add 1 to round up, and one to set the right bin edge.
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
If you choose to fix the range of the histogram, it is highly recommended that you use
`numpy.clip` to ensure that any of the values in the spectrogram that are greater than
your largest bin are not thrown away!
For example, if you decide on a fixed range between 0 and 500, and your spectrogram
contains a value of 777, the following code would produce a histogram where that 777 value
is not present in the count.
bin_edges = range(0,501)
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
But if you clip the spectrogram, you can interpret the last bin as being "the number
of spectrogram values equal to or greater than the lower bin edge".
bin_edges = range(0,501)
p, _ = np.histogram(np.clip(spectrogram.flatten(), 0, 500), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
You can also choose to fix the number of bins
bins = 50
p, bin_edges = np.histogram(spectrogram.flatten(), bins=bins, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
It is suggested to use any of the following measures as features:
bin range, spectrogram.min, spectrogram.max, number_of_bins, log(number_of_bins)
entropy, max_entropy, normalized_entropy.
Automatic Binning:
While Numpy and AstroML offer ways of automatically binning the data, it is unclear if this
is a good approach for entropy calculation -- especially when wishing to compare the value
across different spectrogram. The automatic binning tends to remove disorder in
the set of values, making the histogram smoother and more ordered than the data actually are.
This is true of automatic binning with fixed sizes (such as with the 'rice', and 'fd' options in
numpy.histogram), or with the variable sized arrays as can be calculated with Bayesian Blocks
with astroML. However, nothing is ruled out. In preliminary testing,
the calculated entropy from a histogram calculated with Bayesian Block binning seemed to be more
sensitive to a simulated signal than using fixed binning. However, it's unclear how to
interpret the results because "h_p/h_max" *increased* with the presence of a signal and exceeded 1.
**It is likely that the calculation of h_max is done incorrectly. Please check my work!**
It may even be that the total number of bins created by the Bayesian Block method would
be a suitable feature. For a completely flat distribution, there will only be one bin. If the
data contains significant variation in power levels, the Bayesian Block method will produce more
bins. More testing is required and your mileage may vary.
import astroML.plotting
bin_edges = astroML.density_estimation.bayesian_blocks(spectrogram.flatten())
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
Also to note: Using astroML.density_estimation.bayesian_blocks takes prohibitively long!
"Entropy" of raw data.
If `p` is NOT a PDF, then you're on your own to interpret the results. In this case, you
may set `w` = None and the calculation will assume w_i = 1 for all i.
For example,
h_p, _ = ibmseti.features.entropy(spectrogram.flatten(), None)
'''
if w is None:
w = np.ones(len(p))
h_p = np.sum([-x[0]*math.log(x[0]/x[1]) if x[0] else 0 for x in zip(p, w)])
h_max = math.log(np.sum(w))
return h_p, h_max
def asymmetry(spectrogram_L, spectrogram_R):
'''
returns (spectogram_L - spectrogram_R) / (spectogram_L + spectrogram_R)
The asymmetry measure should be used for both polarizations recorded for a particular observation.
On can then perform analysis such as integrating the returned spectrogram to determine if the
asymmetry of the signals are close to zero, indicating equal signal in both polarizations, or
close to +-1, indicating a strong asymmetry in the L or R polarization.
spectrogram_L and spectrogram_R should be Numpy arrays.
'''
return (spectrogram_L - spectrogram_R) / (spectrogram_L + spectrogram_R)
|
ibm-watson-data-lab/ibmseti | ibmseti/features.py | moment | python | def moment(arr, moment=1, axis=0, **kwargs):
'''
Uses the scipy.stats.moment to calculate the Nth central
moment about the mean.
If `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function, then the Nth moment along each axis
will be computed.
If axis = 0, then Nth moment for the data in each
frequency bin will be computed. (The calculation is done
*along* the 0th axis, which is the time axis.)
If axis = 1, then Nth moment for the data in each
time bin will be computed. (The calculation is done
*along* the 1st axis, which is the frequency axis.)
For example, consider the 2nd moment:
//each column is a frequency bin
x = array([[ 1., 3., 6., 10.], //each row is a time sample
[ 0., 5., 6., 8.],
[ 2., 6., 9., 12.]])
ibmseti.features.mement(x, moment=2, axis=0) //the returned array is of size 4, the number of columns / frequency bins.
>>> array([ 0.66666667, 1.55555556, 2., 2.66666667])
ibmseti.features.mement(x, moment=2, axis=1) //the returned array is of size 3, the number of rows / time bins.
>>> array([ 11.5 , 8.6875, 13.6875])
If `arr` is a 1D array, such as what you'd get if you projected
the spectrogram onto the time or frequency axis, then you must
use axis=0.
'''
return scipy.stats.moment(arr, moment=moment, axis=axis, **kwargs) | Uses the scipy.stats.moment to calculate the Nth central
moment about the mean.
If `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function, then the Nth moment along each axis
will be computed.
If axis = 0, then Nth moment for the data in each
frequency bin will be computed. (The calculation is done
*along* the 0th axis, which is the time axis.)
If axis = 1, then Nth moment for the data in each
time bin will be computed. (The calculation is done
*along* the 1st axis, which is the frequency axis.)
For example, consider the 2nd moment:
//each column is a frequency bin
x = array([[ 1., 3., 6., 10.], //each row is a time sample
[ 0., 5., 6., 8.],
[ 2., 6., 9., 12.]])
ibmseti.features.mement(x, moment=2, axis=0) //the returned array is of size 4, the number of columns / frequency bins.
>>> array([ 0.66666667, 1.55555556, 2., 2.66666667])
ibmseti.features.mement(x, moment=2, axis=1) //the returned array is of size 3, the number of rows / time bins.
>>> array([ 11.5 , 8.6875, 13.6875])
If `arr` is a 1D array, such as what you'd get if you projected
the spectrogram onto the time or frequency axis, then you must
use axis=0. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/features.py#L98-L135 | null | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This module contains some of the standard features that are extracted from
spectrograms and auto-correlation calculations from the raw SETI data.
Some functions are merely wrappers around Numpy-based operations, but
contain documentation that explicitly show how they are used with SETI data.
'''
import numpy as np
import scipy.stats
import math
def difference(arr, n=1, axis=0, **kwargs):
'''
Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), this function
uses the Numpy.diff function to calculate the nth
difference along either time or frequency.
If axis = 0 and n=1, then the first difference is taken
between subsequent time samples
If axis = 1 and n=1, then the first difference is taken
between frequency bins.
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.first_difference(x, axis=1)
>>> array([[2, 3, 4],
[5, 1, 2],
[4, 3, 3]])
ibmseti.features.first_difference(x, axis=0)
>>> array([[-1, 2, 0, -2],
[ 2, 1, 3, 4]])
'''
return np.diff(arr, n=n, axis=axis, **kwargs)
def projection(arr, axis=0, **kwargs):
'''
Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function uses the numpy.sum function to project the
data onto the time or frequency axis into a 1D array.
If axis = 0, then the projection is onto the frequency axis
(the sum is along the time axis)
If axis = 1, then the projection is onto the time axis.
(the sum is along the frequency axis)
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.projection(x, axis=1)
>>> array([20, 19, 29])
ibmseti.features.projection(x, axis=0)
>>> array([ 3, 14, 21, 30])
One interesting kwarg that you may wish to use is `keepdims`.
See the documentation on numpy.sum for more information.
'''
return np.sum(arr, axis=axis, **kwargs)
def first_order_gradient(arr, axis=0):
'''
Returns the gradient of arr along a particular axis using
the first order forward-difference.
Additionally, the result is padded with 0 so that the
returned array is the same shape as in input array.
'''
grad_arr = difference(arr, n=1, axis=axis)
return np.insert(grad_arr, grad_arr.shape[axis], 0, axis=axis)
def total_variation(arr):
'''
If arr is a 2D array (N X M), assumes that arr is a spectrogram with time along axis=0.
Calculates the 1D total variation in time for each frequency and returns an array
of size M.
If arr is a 1D array, calculates total variation and returns a scalar.
Sum ( Abs(arr_i+1,j - arr_ij) )
If arr is a 2D array, it's common to take the mean of the resulting M-sized array
to calculate a scalar feature.
'''
return np.sum(np.abs(np.diff(arr, axis=0)), axis=0)
def maximum_variation(arr):
'''
return np.max(arr, axis=0) - np.min(arr, axis=0)
If `arr` is a 1D array, a scalar is returned.
If `arr` is a 2D array (N x M), an array of length M is returned.
'''
return np.max(arr, axis=0) - np.min(arr, axis=0)
def tv_2d_isotropic(grad_0_arr, grad_1_arr):
'''
Calculates the Total Variation
Assumes a 2D array.
grad_0_arr is the gradient along the 0th axis of arr.
grad_1_arr is the gradient along the 1st axis of arr.
You can use the 1st order forward-difference measure
of the gradient (the standard calculation). Or you
can use the second_order central gradient.
'''
return np.sqrt(grad_0_arr**2 + grad_1_arr**2).sum()
def entropy(p, w):
'''
Computes the entropy for a discrete probability distribution function, as
represented by a histogram, `p`, with bin sizes `w`,
h_p = Sum -1 * p_i * ln(p_i / w_i)
Also computes the maximum allowed entropy for a histogram with bin sizes `w`.
h_max = ln( Sum w_i )
and returns both as a tuple (h_p , h_max ). The entropy is in 'natural' units.
Both `p` and `w` must be Numpy arrays.
If `p` is normalized to 1 ( Sum p_i * w_i = 1), then
the normalized entropy is equal toh_p / h_max and will
be in the range [0, 1].
For example, if `p` is a completely flat PDF (a uniform distribution), then
the normalized entropy will equal 1, indicating maximum amount of disorder.
(This is easily shown for the case where w_i = 1.)
If the `p_i` is zero for all i except j and p_j = 1, then the entropy will be 0,
indicating no disorder.
One can use this entropy measurement to search for signals in the spectrogram.
First we need to build a histogram of the measured power values in the spectrogram.
This histogram represents an estimate of the probability distribution function of the
observed power in the spectrogram.
If the spectrogram is entirely noise, the resulting histogram should be quite flat and
the normalized entropy ( h_p / h_max ) will approach 1. If there is a significant signal
in the spectrogram, then the histogram will not be flat and the normalized entropy will
be less than 1.
The decision that needs to be made is the number of bins and the bin size. And unfortunately,
the resulting entropy calculated will depend on the binning.
Based on testing and interpretibility, we recommend to use a fixed number of bins that either
span the full range of the power values in the spectrogram (0 to spectrogram.max()),
or span a fixed range (for example, from 0 to 500).
For example, you may set the range equal to the range of the values in the spectrogram.
bin_edges = range(0,int(spectrogram.max()) + 2) #add 1 to round up, and one to set the right bin edge.
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
If you choose to fix the range of the histogram, it is highly recommended that you use
`numpy.clip` to ensure that any of the values in the spectrogram that are greater than
your largest bin are not thrown away!
For example, if you decide on a fixed range between 0 and 500, and your spectrogram
contains a value of 777, the following code would produce a histogram where that 777 value
is not present in the count.
bin_edges = range(0,501)
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
But if you clip the spectrogram, you can interpret the last bin as being "the number
of spectrogram values equal to or greater than the lower bin edge".
bin_edges = range(0,501)
p, _ = np.histogram(np.clip(spectrogram.flatten(), 0, 500), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
You can also choose to fix the number of bins
bins = 50
p, bin_edges = np.histogram(spectrogram.flatten(), bins=bins, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
It is suggested to use any of the following measures as features:
bin range, spectrogram.min, spectrogram.max, number_of_bins, log(number_of_bins)
entropy, max_entropy, normalized_entropy.
Automatic Binning:
While Numpy and AstroML offer ways of automatically binning the data, it is unclear if this
is a good approach for entropy calculation -- especially when wishing to compare the value
across different spectrogram. The automatic binning tends to remove disorder in
the set of values, making the histogram smoother and more ordered than the data actually are.
This is true of automatic binning with fixed sizes (such as with the 'rice', and 'fd' options in
numpy.histogram), or with the variable sized arrays as can be calculated with Bayesian Blocks
with astroML. However, nothing is ruled out. In preliminary testing,
the calculated entropy from a histogram calculated with Bayesian Block binning seemed to be more
sensitive to a simulated signal than using fixed binning. However, it's unclear how to
interpret the results because "h_p/h_max" *increased* with the presence of a signal and exceeded 1.
**It is likely that the calculation of h_max is done incorrectly. Please check my work!**
It may even be that the total number of bins created by the Bayesian Block method would
be a suitable feature. For a completely flat distribution, there will only be one bin. If the
data contains significant variation in power levels, the Bayesian Block method will produce more
bins. More testing is required and your mileage may vary.
import astroML.plotting
bin_edges = astroML.density_estimation.bayesian_blocks(spectrogram.flatten())
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
Also to note: Using astroML.density_estimation.bayesian_blocks takes prohibitively long!
"Entropy" of raw data.
If `p` is NOT a PDF, then you're on your own to interpret the results. In this case, you
may set `w` = None and the calculation will assume w_i = 1 for all i.
For example,
h_p, _ = ibmseti.features.entropy(spectrogram.flatten(), None)
'''
if w is None:
w = np.ones(len(p))
h_p = np.sum([-x[0]*math.log(x[0]/x[1]) if x[0] else 0 for x in zip(p, w)])
h_max = math.log(np.sum(w))
return h_p, h_max
def asymmetry(spectrogram_L, spectrogram_R):
'''
returns (spectogram_L - spectrogram_R) / (spectogram_L + spectrogram_R)
The asymmetry measure should be used for both polarizations recorded for a particular observation.
On can then perform analysis such as integrating the returned spectrogram to determine if the
asymmetry of the signals are close to zero, indicating equal signal in both polarizations, or
close to +-1, indicating a strong asymmetry in the L or R polarization.
spectrogram_L and spectrogram_R should be Numpy arrays.
'''
return (spectrogram_L - spectrogram_R) / (spectrogram_L + spectrogram_R)
|
ibm-watson-data-lab/ibmseti | ibmseti/features.py | first_order_gradient | python | def first_order_gradient(arr, axis=0):
'''
Returns the gradient of arr along a particular axis using
the first order forward-difference.
Additionally, the result is padded with 0 so that the
returned array is the same shape as in input array.
'''
grad_arr = difference(arr, n=1, axis=axis)
return np.insert(grad_arr, grad_arr.shape[axis], 0, axis=axis) | Returns the gradient of arr along a particular axis using
the first order forward-difference.
Additionally, the result is padded with 0 so that the
returned array is the same shape as in input array. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/features.py#L138-L146 | [
"def difference(arr, n=1, axis=0, **kwargs):\n '''\n Assuming that `arr` is a 2D spectrogram returned by\n ibmseti.dsp.raw_to_spectrogram(data), this function\n uses the Numpy.diff function to calculate the nth\n difference along either time or frequency.\n\n If axis = 0 and n=1, then the first difference is ... | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This module contains some of the standard features that are extracted from
spectrograms and auto-correlation calculations from the raw SETI data.
Some functions are merely wrappers around Numpy-based operations, but
contain documentation that explicitly show how they are used with SETI data.
'''
import numpy as np
import scipy.stats
import math
def difference(arr, n=1, axis=0, **kwargs):
'''
Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), this function
uses the Numpy.diff function to calculate the nth
difference along either time or frequency.
If axis = 0 and n=1, then the first difference is taken
between subsequent time samples
If axis = 1 and n=1, then the first difference is taken
between frequency bins.
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.first_difference(x, axis=1)
>>> array([[2, 3, 4],
[5, 1, 2],
[4, 3, 3]])
ibmseti.features.first_difference(x, axis=0)
>>> array([[-1, 2, 0, -2],
[ 2, 1, 3, 4]])
'''
return np.diff(arr, n=n, axis=axis, **kwargs)
def projection(arr, axis=0, **kwargs):
'''
Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function uses the numpy.sum function to project the
data onto the time or frequency axis into a 1D array.
If axis = 0, then the projection is onto the frequency axis
(the sum is along the time axis)
If axis = 1, then the projection is onto the time axis.
(the sum is along the frequency axis)
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.projection(x, axis=1)
>>> array([20, 19, 29])
ibmseti.features.projection(x, axis=0)
>>> array([ 3, 14, 21, 30])
One interesting kwarg that you may wish to use is `keepdims`.
See the documentation on numpy.sum for more information.
'''
return np.sum(arr, axis=axis, **kwargs)
def moment(arr, moment=1, axis=0, **kwargs):
'''
Uses the scipy.stats.moment to calculate the Nth central
moment about the mean.
If `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function, then the Nth moment along each axis
will be computed.
If axis = 0, then Nth moment for the data in each
frequency bin will be computed. (The calculation is done
*along* the 0th axis, which is the time axis.)
If axis = 1, then Nth moment for the data in each
time bin will be computed. (The calculation is done
*along* the 1st axis, which is the frequency axis.)
For example, consider the 2nd moment:
//each column is a frequency bin
x = array([[ 1., 3., 6., 10.], //each row is a time sample
[ 0., 5., 6., 8.],
[ 2., 6., 9., 12.]])
ibmseti.features.mement(x, moment=2, axis=0) //the returned array is of size 4, the number of columns / frequency bins.
>>> array([ 0.66666667, 1.55555556, 2., 2.66666667])
ibmseti.features.mement(x, moment=2, axis=1) //the returned array is of size 3, the number of rows / time bins.
>>> array([ 11.5 , 8.6875, 13.6875])
If `arr` is a 1D array, such as what you'd get if you projected
the spectrogram onto the time or frequency axis, then you must
use axis=0.
'''
return scipy.stats.moment(arr, moment=moment, axis=axis, **kwargs)
def total_variation(arr):
'''
If arr is a 2D array (N X M), assumes that arr is a spectrogram with time along axis=0.
Calculates the 1D total variation in time for each frequency and returns an array
of size M.
If arr is a 1D array, calculates total variation and returns a scalar.
Sum ( Abs(arr_i+1,j - arr_ij) )
If arr is a 2D array, it's common to take the mean of the resulting M-sized array
to calculate a scalar feature.
'''
return np.sum(np.abs(np.diff(arr, axis=0)), axis=0)
def maximum_variation(arr):
'''
return np.max(arr, axis=0) - np.min(arr, axis=0)
If `arr` is a 1D array, a scalar is returned.
If `arr` is a 2D array (N x M), an array of length M is returned.
'''
return np.max(arr, axis=0) - np.min(arr, axis=0)
def tv_2d_isotropic(grad_0_arr, grad_1_arr):
'''
Calculates the Total Variation
Assumes a 2D array.
grad_0_arr is the gradient along the 0th axis of arr.
grad_1_arr is the gradient along the 1st axis of arr.
You can use the 1st order forward-difference measure
of the gradient (the standard calculation). Or you
can use the second_order central gradient.
'''
return np.sqrt(grad_0_arr**2 + grad_1_arr**2).sum()
def entropy(p, w):
'''
Computes the entropy for a discrete probability distribution function, as
represented by a histogram, `p`, with bin sizes `w`,
h_p = Sum -1 * p_i * ln(p_i / w_i)
Also computes the maximum allowed entropy for a histogram with bin sizes `w`.
h_max = ln( Sum w_i )
and returns both as a tuple (h_p , h_max ). The entropy is in 'natural' units.
Both `p` and `w` must be Numpy arrays.
If `p` is normalized to 1 ( Sum p_i * w_i = 1), then
the normalized entropy is equal toh_p / h_max and will
be in the range [0, 1].
For example, if `p` is a completely flat PDF (a uniform distribution), then
the normalized entropy will equal 1, indicating maximum amount of disorder.
(This is easily shown for the case where w_i = 1.)
If the `p_i` is zero for all i except j and p_j = 1, then the entropy will be 0,
indicating no disorder.
One can use this entropy measurement to search for signals in the spectrogram.
First we need to build a histogram of the measured power values in the spectrogram.
This histogram represents an estimate of the probability distribution function of the
observed power in the spectrogram.
If the spectrogram is entirely noise, the resulting histogram should be quite flat and
the normalized entropy ( h_p / h_max ) will approach 1. If there is a significant signal
in the spectrogram, then the histogram will not be flat and the normalized entropy will
be less than 1.
The decision that needs to be made is the number of bins and the bin size. And unfortunately,
the resulting entropy calculated will depend on the binning.
Based on testing and interpretibility, we recommend to use a fixed number of bins that either
span the full range of the power values in the spectrogram (0 to spectrogram.max()),
or span a fixed range (for example, from 0 to 500).
For example, you may set the range equal to the range of the values in the spectrogram.
bin_edges = range(0,int(spectrogram.max()) + 2) #add 1 to round up, and one to set the right bin edge.
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
If you choose to fix the range of the histogram, it is highly recommended that you use
`numpy.clip` to ensure that any of the values in the spectrogram that are greater than
your largest bin are not thrown away!
For example, if you decide on a fixed range between 0 and 500, and your spectrogram
contains a value of 777, the following code would produce a histogram where that 777 value
is not present in the count.
bin_edges = range(0,501)
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
But if you clip the spectrogram, you can interpret the last bin as being "the number
of spectrogram values equal to or greater than the lower bin edge".
bin_edges = range(0,501)
p, _ = np.histogram(np.clip(spectrogram.flatten(), 0, 500), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
You can also choose to fix the number of bins
bins = 50
p, bin_edges = np.histogram(spectrogram.flatten(), bins=bins, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
It is suggested to use any of the following measures as features:
bin range, spectrogram.min, spectrogram.max, number_of_bins, log(number_of_bins)
entropy, max_entropy, normalized_entropy.
Automatic Binning:
While Numpy and AstroML offer ways of automatically binning the data, it is unclear if this
is a good approach for entropy calculation -- especially when wishing to compare the value
across different spectrogram. The automatic binning tends to remove disorder in
the set of values, making the histogram smoother and more ordered than the data actually are.
This is true of automatic binning with fixed sizes (such as with the 'rice', and 'fd' options in
numpy.histogram), or with the variable sized arrays as can be calculated with Bayesian Blocks
with astroML. However, nothing is ruled out. In preliminary testing,
the calculated entropy from a histogram calculated with Bayesian Block binning seemed to be more
sensitive to a simulated signal than using fixed binning. However, it's unclear how to
interpret the results because "h_p/h_max" *increased* with the presence of a signal and exceeded 1.
**It is likely that the calculation of h_max is done incorrectly. Please check my work!**
It may even be that the total number of bins created by the Bayesian Block method would
be a suitable feature. For a completely flat distribution, there will only be one bin. If the
data contains significant variation in power levels, the Bayesian Block method will produce more
bins. More testing is required and your mileage may vary.
import astroML.plotting
bin_edges = astroML.density_estimation.bayesian_blocks(spectrogram.flatten())
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
Also to note: Using astroML.density_estimation.bayesian_blocks takes prohibitively long!
"Entropy" of raw data.
If `p` is NOT a PDF, then you're on your own to interpret the results. In this case, you
may set `w` = None and the calculation will assume w_i = 1 for all i.
For example,
h_p, _ = ibmseti.features.entropy(spectrogram.flatten(), None)
'''
if w is None:
w = np.ones(len(p))
h_p = np.sum([-x[0]*math.log(x[0]/x[1]) if x[0] else 0 for x in zip(p, w)])
h_max = math.log(np.sum(w))
return h_p, h_max
def asymmetry(spectrogram_L, spectrogram_R):
'''
returns (spectogram_L - spectrogram_R) / (spectogram_L + spectrogram_R)
The asymmetry measure should be used for both polarizations recorded for a particular observation.
On can then perform analysis such as integrating the returned spectrogram to determine if the
asymmetry of the signals are close to zero, indicating equal signal in both polarizations, or
close to +-1, indicating a strong asymmetry in the L or R polarization.
spectrogram_L and spectrogram_R should be Numpy arrays.
'''
return (spectrogram_L - spectrogram_R) / (spectrogram_L + spectrogram_R)
|
ibm-watson-data-lab/ibmseti | ibmseti/features.py | total_variation | python | def total_variation(arr):
'''
If arr is a 2D array (N X M), assumes that arr is a spectrogram with time along axis=0.
Calculates the 1D total variation in time for each frequency and returns an array
of size M.
If arr is a 1D array, calculates total variation and returns a scalar.
Sum ( Abs(arr_i+1,j - arr_ij) )
If arr is a 2D array, it's common to take the mean of the resulting M-sized array
to calculate a scalar feature.
'''
return np.sum(np.abs(np.diff(arr, axis=0)), axis=0) | If arr is a 2D array (N X M), assumes that arr is a spectrogram with time along axis=0.
Calculates the 1D total variation in time for each frequency and returns an array
of size M.
If arr is a 1D array, calculates total variation and returns a scalar.
Sum ( Abs(arr_i+1,j - arr_ij) )
If arr is a 2D array, it's common to take the mean of the resulting M-sized array
to calculate a scalar feature. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/features.py#L149-L163 | null | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This module contains some of the standard features that are extracted from
spectrograms and auto-correlation calculations from the raw SETI data.
Some functions are merely wrappers around Numpy-based operations, but
contain documentation that explicitly show how they are used with SETI data.
'''
import numpy as np
import scipy.stats
import math
def difference(arr, n=1, axis=0, **kwargs):
'''
Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), this function
uses the Numpy.diff function to calculate the nth
difference along either time or frequency.
If axis = 0 and n=1, then the first difference is taken
between subsequent time samples
If axis = 1 and n=1, then the first difference is taken
between frequency bins.
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.first_difference(x, axis=1)
>>> array([[2, 3, 4],
[5, 1, 2],
[4, 3, 3]])
ibmseti.features.first_difference(x, axis=0)
>>> array([[-1, 2, 0, -2],
[ 2, 1, 3, 4]])
'''
return np.diff(arr, n=n, axis=axis, **kwargs)
def projection(arr, axis=0, **kwargs):
'''
Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function uses the numpy.sum function to project the
data onto the time or frequency axis into a 1D array.
If axis = 0, then the projection is onto the frequency axis
(the sum is along the time axis)
If axis = 1, then the projection is onto the time axis.
(the sum is along the frequency axis)
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.projection(x, axis=1)
>>> array([20, 19, 29])
ibmseti.features.projection(x, axis=0)
>>> array([ 3, 14, 21, 30])
One interesting kwarg that you may wish to use is `keepdims`.
See the documentation on numpy.sum for more information.
'''
return np.sum(arr, axis=axis, **kwargs)
def moment(arr, moment=1, axis=0, **kwargs):
'''
Uses the scipy.stats.moment to calculate the Nth central
moment about the mean.
If `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function, then the Nth moment along each axis
will be computed.
If axis = 0, then Nth moment for the data in each
frequency bin will be computed. (The calculation is done
*along* the 0th axis, which is the time axis.)
If axis = 1, then Nth moment for the data in each
time bin will be computed. (The calculation is done
*along* the 1st axis, which is the frequency axis.)
For example, consider the 2nd moment:
//each column is a frequency bin
x = array([[ 1., 3., 6., 10.], //each row is a time sample
[ 0., 5., 6., 8.],
[ 2., 6., 9., 12.]])
ibmseti.features.mement(x, moment=2, axis=0) //the returned array is of size 4, the number of columns / frequency bins.
>>> array([ 0.66666667, 1.55555556, 2., 2.66666667])
ibmseti.features.mement(x, moment=2, axis=1) //the returned array is of size 3, the number of rows / time bins.
>>> array([ 11.5 , 8.6875, 13.6875])
If `arr` is a 1D array, such as what you'd get if you projected
the spectrogram onto the time or frequency axis, then you must
use axis=0.
'''
return scipy.stats.moment(arr, moment=moment, axis=axis, **kwargs)
def first_order_gradient(arr, axis=0):
'''
Returns the gradient of arr along a particular axis using
the first order forward-difference.
Additionally, the result is padded with 0 so that the
returned array is the same shape as in input array.
'''
grad_arr = difference(arr, n=1, axis=axis)
return np.insert(grad_arr, grad_arr.shape[axis], 0, axis=axis)
def maximum_variation(arr):
'''
return np.max(arr, axis=0) - np.min(arr, axis=0)
If `arr` is a 1D array, a scalar is returned.
If `arr` is a 2D array (N x M), an array of length M is returned.
'''
return np.max(arr, axis=0) - np.min(arr, axis=0)
def tv_2d_isotropic(grad_0_arr, grad_1_arr):
'''
Calculates the Total Variation
Assumes a 2D array.
grad_0_arr is the gradient along the 0th axis of arr.
grad_1_arr is the gradient along the 1st axis of arr.
You can use the 1st order forward-difference measure
of the gradient (the standard calculation). Or you
can use the second_order central gradient.
'''
return np.sqrt(grad_0_arr**2 + grad_1_arr**2).sum()
def entropy(p, w):
'''
Computes the entropy for a discrete probability distribution function, as
represented by a histogram, `p`, with bin sizes `w`,
h_p = Sum -1 * p_i * ln(p_i / w_i)
Also computes the maximum allowed entropy for a histogram with bin sizes `w`.
h_max = ln( Sum w_i )
and returns both as a tuple (h_p , h_max ). The entropy is in 'natural' units.
Both `p` and `w` must be Numpy arrays.
If `p` is normalized to 1 ( Sum p_i * w_i = 1), then
the normalized entropy is equal toh_p / h_max and will
be in the range [0, 1].
For example, if `p` is a completely flat PDF (a uniform distribution), then
the normalized entropy will equal 1, indicating maximum amount of disorder.
(This is easily shown for the case where w_i = 1.)
If the `p_i` is zero for all i except j and p_j = 1, then the entropy will be 0,
indicating no disorder.
One can use this entropy measurement to search for signals in the spectrogram.
First we need to build a histogram of the measured power values in the spectrogram.
This histogram represents an estimate of the probability distribution function of the
observed power in the spectrogram.
If the spectrogram is entirely noise, the resulting histogram should be quite flat and
the normalized entropy ( h_p / h_max ) will approach 1. If there is a significant signal
in the spectrogram, then the histogram will not be flat and the normalized entropy will
be less than 1.
The decision that needs to be made is the number of bins and the bin size. And unfortunately,
the resulting entropy calculated will depend on the binning.
Based on testing and interpretibility, we recommend to use a fixed number of bins that either
span the full range of the power values in the spectrogram (0 to spectrogram.max()),
or span a fixed range (for example, from 0 to 500).
For example, you may set the range equal to the range of the values in the spectrogram.
bin_edges = range(0,int(spectrogram.max()) + 2) #add 1 to round up, and one to set the right bin edge.
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
If you choose to fix the range of the histogram, it is highly recommended that you use
`numpy.clip` to ensure that any of the values in the spectrogram that are greater than
your largest bin are not thrown away!
For example, if you decide on a fixed range between 0 and 500, and your spectrogram
contains a value of 777, the following code would produce a histogram where that 777 value
is not present in the count.
bin_edges = range(0,501)
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
But if you clip the spectrogram, you can interpret the last bin as being "the number
of spectrogram values equal to or greater than the lower bin edge".
bin_edges = range(0,501)
p, _ = np.histogram(np.clip(spectrogram.flatten(), 0, 500), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
You can also choose to fix the number of bins
bins = 50
p, bin_edges = np.histogram(spectrogram.flatten(), bins=bins, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
It is suggested to use any of the following measures as features:
bin range, spectrogram.min, spectrogram.max, number_of_bins, log(number_of_bins)
entropy, max_entropy, normalized_entropy.
Automatic Binning:
While Numpy and AstroML offer ways of automatically binning the data, it is unclear if this
is a good approach for entropy calculation -- especially when wishing to compare the value
across different spectrogram. The automatic binning tends to remove disorder in
the set of values, making the histogram smoother and more ordered than the data actually are.
This is true of automatic binning with fixed sizes (such as with the 'rice', and 'fd' options in
numpy.histogram), or with the variable sized arrays as can be calculated with Bayesian Blocks
with astroML. However, nothing is ruled out. In preliminary testing,
the calculated entropy from a histogram calculated with Bayesian Block binning seemed to be more
sensitive to a simulated signal than using fixed binning. However, it's unclear how to
interpret the results because "h_p/h_max" *increased* with the presence of a signal and exceeded 1.
**It is likely that the calculation of h_max is done incorrectly. Please check my work!**
It may even be that the total number of bins created by the Bayesian Block method would
be a suitable feature. For a completely flat distribution, there will only be one bin. If the
data contains significant variation in power levels, the Bayesian Block method will produce more
bins. More testing is required and your mileage may vary.
import astroML.plotting
bin_edges = astroML.density_estimation.bayesian_blocks(spectrogram.flatten())
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
Also to note: Using astroML.density_estimation.bayesian_blocks takes prohibitively long!
"Entropy" of raw data.
If `p` is NOT a PDF, then you're on your own to interpret the results. In this case, you
may set `w` = None and the calculation will assume w_i = 1 for all i.
For example,
h_p, _ = ibmseti.features.entropy(spectrogram.flatten(), None)
'''
if w is None:
w = np.ones(len(p))
h_p = np.sum([-x[0]*math.log(x[0]/x[1]) if x[0] else 0 for x in zip(p, w)])
h_max = math.log(np.sum(w))
return h_p, h_max
def asymmetry(spectrogram_L, spectrogram_R):
'''
returns (spectogram_L - spectrogram_R) / (spectogram_L + spectrogram_R)
The asymmetry measure should be used for both polarizations recorded for a particular observation.
On can then perform analysis such as integrating the returned spectrogram to determine if the
asymmetry of the signals are close to zero, indicating equal signal in both polarizations, or
close to +-1, indicating a strong asymmetry in the L or R polarization.
spectrogram_L and spectrogram_R should be Numpy arrays.
'''
return (spectrogram_L - spectrogram_R) / (spectrogram_L + spectrogram_R)
|
ibm-watson-data-lab/ibmseti | ibmseti/features.py | maximum_variation | python | def maximum_variation(arr):
'''
return np.max(arr, axis=0) - np.min(arr, axis=0)
If `arr` is a 1D array, a scalar is returned.
If `arr` is a 2D array (N x M), an array of length M is returned.
'''
return np.max(arr, axis=0) - np.min(arr, axis=0) | return np.max(arr, axis=0) - np.min(arr, axis=0)
If `arr` is a 1D array, a scalar is returned.
If `arr` is a 2D array (N x M), an array of length M is returned. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/features.py#L166-L174 | null | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This module contains some of the standard features that are extracted from
spectrograms and auto-correlation calculations from the raw SETI data.
Some functions are merely wrappers around Numpy-based operations, but
contain documentation that explicitly show how they are used with SETI data.
'''
import numpy as np
import scipy.stats
import math
def difference(arr, n=1, axis=0, **kwargs):
'''
Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), this function
uses the Numpy.diff function to calculate the nth
difference along either time or frequency.
If axis = 0 and n=1, then the first difference is taken
between subsequent time samples
If axis = 1 and n=1, then the first difference is taken
between frequency bins.
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.first_difference(x, axis=1)
>>> array([[2, 3, 4],
[5, 1, 2],
[4, 3, 3]])
ibmseti.features.first_difference(x, axis=0)
>>> array([[-1, 2, 0, -2],
[ 2, 1, 3, 4]])
'''
return np.diff(arr, n=n, axis=axis, **kwargs)
def projection(arr, axis=0, **kwargs):
'''
Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function uses the numpy.sum function to project the
data onto the time or frequency axis into a 1D array.
If axis = 0, then the projection is onto the frequency axis
(the sum is along the time axis)
If axis = 1, then the projection is onto the time axis.
(the sum is along the frequency axis)
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.projection(x, axis=1)
>>> array([20, 19, 29])
ibmseti.features.projection(x, axis=0)
>>> array([ 3, 14, 21, 30])
One interesting kwarg that you may wish to use is `keepdims`.
See the documentation on numpy.sum for more information.
'''
return np.sum(arr, axis=axis, **kwargs)
def moment(arr, moment=1, axis=0, **kwargs):
'''
Uses the scipy.stats.moment to calculate the Nth central
moment about the mean.
If `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function, then the Nth moment along each axis
will be computed.
If axis = 0, then Nth moment for the data in each
frequency bin will be computed. (The calculation is done
*along* the 0th axis, which is the time axis.)
If axis = 1, then Nth moment for the data in each
time bin will be computed. (The calculation is done
*along* the 1st axis, which is the frequency axis.)
For example, consider the 2nd moment:
//each column is a frequency bin
x = array([[ 1., 3., 6., 10.], //each row is a time sample
[ 0., 5., 6., 8.],
[ 2., 6., 9., 12.]])
ibmseti.features.mement(x, moment=2, axis=0) //the returned array is of size 4, the number of columns / frequency bins.
>>> array([ 0.66666667, 1.55555556, 2., 2.66666667])
ibmseti.features.mement(x, moment=2, axis=1) //the returned array is of size 3, the number of rows / time bins.
>>> array([ 11.5 , 8.6875, 13.6875])
If `arr` is a 1D array, such as what you'd get if you projected
the spectrogram onto the time or frequency axis, then you must
use axis=0.
'''
return scipy.stats.moment(arr, moment=moment, axis=axis, **kwargs)
def first_order_gradient(arr, axis=0):
'''
Returns the gradient of arr along a particular axis using
the first order forward-difference.
Additionally, the result is padded with 0 so that the
returned array is the same shape as in input array.
'''
grad_arr = difference(arr, n=1, axis=axis)
return np.insert(grad_arr, grad_arr.shape[axis], 0, axis=axis)
def total_variation(arr):
'''
If arr is a 2D array (N X M), assumes that arr is a spectrogram with time along axis=0.
Calculates the 1D total variation in time for each frequency and returns an array
of size M.
If arr is a 1D array, calculates total variation and returns a scalar.
Sum ( Abs(arr_i+1,j - arr_ij) )
If arr is a 2D array, it's common to take the mean of the resulting M-sized array
to calculate a scalar feature.
'''
return np.sum(np.abs(np.diff(arr, axis=0)), axis=0)
def tv_2d_isotropic(grad_0_arr, grad_1_arr):
'''
Calculates the Total Variation
Assumes a 2D array.
grad_0_arr is the gradient along the 0th axis of arr.
grad_1_arr is the gradient along the 1st axis of arr.
You can use the 1st order forward-difference measure
of the gradient (the standard calculation). Or you
can use the second_order central gradient.
'''
return np.sqrt(grad_0_arr**2 + grad_1_arr**2).sum()
def entropy(p, w):
'''
Computes the entropy for a discrete probability distribution function, as
represented by a histogram, `p`, with bin sizes `w`,
h_p = Sum -1 * p_i * ln(p_i / w_i)
Also computes the maximum allowed entropy for a histogram with bin sizes `w`.
h_max = ln( Sum w_i )
and returns both as a tuple (h_p , h_max ). The entropy is in 'natural' units.
Both `p` and `w` must be Numpy arrays.
If `p` is normalized to 1 ( Sum p_i * w_i = 1), then
the normalized entropy is equal toh_p / h_max and will
be in the range [0, 1].
For example, if `p` is a completely flat PDF (a uniform distribution), then
the normalized entropy will equal 1, indicating maximum amount of disorder.
(This is easily shown for the case where w_i = 1.)
If the `p_i` is zero for all i except j and p_j = 1, then the entropy will be 0,
indicating no disorder.
One can use this entropy measurement to search for signals in the spectrogram.
First we need to build a histogram of the measured power values in the spectrogram.
This histogram represents an estimate of the probability distribution function of the
observed power in the spectrogram.
If the spectrogram is entirely noise, the resulting histogram should be quite flat and
the normalized entropy ( h_p / h_max ) will approach 1. If there is a significant signal
in the spectrogram, then the histogram will not be flat and the normalized entropy will
be less than 1.
The decision that needs to be made is the number of bins and the bin size. And unfortunately,
the resulting entropy calculated will depend on the binning.
Based on testing and interpretibility, we recommend to use a fixed number of bins that either
span the full range of the power values in the spectrogram (0 to spectrogram.max()),
or span a fixed range (for example, from 0 to 500).
For example, you may set the range equal to the range of the values in the spectrogram.
bin_edges = range(0,int(spectrogram.max()) + 2) #add 1 to round up, and one to set the right bin edge.
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
If you choose to fix the range of the histogram, it is highly recommended that you use
`numpy.clip` to ensure that any of the values in the spectrogram that are greater than
your largest bin are not thrown away!
For example, if you decide on a fixed range between 0 and 500, and your spectrogram
contains a value of 777, the following code would produce a histogram where that 777 value
is not present in the count.
bin_edges = range(0,501)
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
But if you clip the spectrogram, you can interpret the last bin as being "the number
of spectrogram values equal to or greater than the lower bin edge".
bin_edges = range(0,501)
p, _ = np.histogram(np.clip(spectrogram.flatten(), 0, 500), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
You can also choose to fix the number of bins
bins = 50
p, bin_edges = np.histogram(spectrogram.flatten(), bins=bins, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
It is suggested to use any of the following measures as features:
bin range, spectrogram.min, spectrogram.max, number_of_bins, log(number_of_bins)
entropy, max_entropy, normalized_entropy.
Automatic Binning:
While Numpy and AstroML offer ways of automatically binning the data, it is unclear if this
is a good approach for entropy calculation -- especially when wishing to compare the value
across different spectrogram. The automatic binning tends to remove disorder in
the set of values, making the histogram smoother and more ordered than the data actually are.
This is true of automatic binning with fixed sizes (such as with the 'rice', and 'fd' options in
numpy.histogram), or with the variable sized arrays as can be calculated with Bayesian Blocks
with astroML. However, nothing is ruled out. In preliminary testing,
the calculated entropy from a histogram calculated with Bayesian Block binning seemed to be more
sensitive to a simulated signal than using fixed binning. However, it's unclear how to
interpret the results because "h_p/h_max" *increased* with the presence of a signal and exceeded 1.
**It is likely that the calculation of h_max is done incorrectly. Please check my work!**
It may even be that the total number of bins created by the Bayesian Block method would
be a suitable feature. For a completely flat distribution, there will only be one bin. If the
data contains significant variation in power levels, the Bayesian Block method will produce more
bins. More testing is required and your mileage may vary.
import astroML.plotting
bin_edges = astroML.density_estimation.bayesian_blocks(spectrogram.flatten())
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
Also to note: Using astroML.density_estimation.bayesian_blocks takes prohibitively long!
"Entropy" of raw data.
If `p` is NOT a PDF, then you're on your own to interpret the results. In this case, you
may set `w` = None and the calculation will assume w_i = 1 for all i.
For example,
h_p, _ = ibmseti.features.entropy(spectrogram.flatten(), None)
'''
if w is None:
w = np.ones(len(p))
h_p = np.sum([-x[0]*math.log(x[0]/x[1]) if x[0] else 0 for x in zip(p, w)])
h_max = math.log(np.sum(w))
return h_p, h_max
def asymmetry(spectrogram_L, spectrogram_R):
'''
returns (spectogram_L - spectrogram_R) / (spectogram_L + spectrogram_R)
The asymmetry measure should be used for both polarizations recorded for a particular observation.
On can then perform analysis such as integrating the returned spectrogram to determine if the
asymmetry of the signals are close to zero, indicating equal signal in both polarizations, or
close to +-1, indicating a strong asymmetry in the L or R polarization.
spectrogram_L and spectrogram_R should be Numpy arrays.
'''
return (spectrogram_L - spectrogram_R) / (spectrogram_L + spectrogram_R)
|
ibm-watson-data-lab/ibmseti | ibmseti/features.py | entropy | python | def entropy(p, w):
'''
Computes the entropy for a discrete probability distribution function, as
represented by a histogram, `p`, with bin sizes `w`,
h_p = Sum -1 * p_i * ln(p_i / w_i)
Also computes the maximum allowed entropy for a histogram with bin sizes `w`.
h_max = ln( Sum w_i )
and returns both as a tuple (h_p , h_max ). The entropy is in 'natural' units.
Both `p` and `w` must be Numpy arrays.
If `p` is normalized to 1 ( Sum p_i * w_i = 1), then
the normalized entropy is equal toh_p / h_max and will
be in the range [0, 1].
For example, if `p` is a completely flat PDF (a uniform distribution), then
the normalized entropy will equal 1, indicating maximum amount of disorder.
(This is easily shown for the case where w_i = 1.)
If the `p_i` is zero for all i except j and p_j = 1, then the entropy will be 0,
indicating no disorder.
One can use this entropy measurement to search for signals in the spectrogram.
First we need to build a histogram of the measured power values in the spectrogram.
This histogram represents an estimate of the probability distribution function of the
observed power in the spectrogram.
If the spectrogram is entirely noise, the resulting histogram should be quite flat and
the normalized entropy ( h_p / h_max ) will approach 1. If there is a significant signal
in the spectrogram, then the histogram will not be flat and the normalized entropy will
be less than 1.
The decision that needs to be made is the number of bins and the bin size. And unfortunately,
the resulting entropy calculated will depend on the binning.
Based on testing and interpretibility, we recommend to use a fixed number of bins that either
span the full range of the power values in the spectrogram (0 to spectrogram.max()),
or span a fixed range (for example, from 0 to 500).
For example, you may set the range equal to the range of the values in the spectrogram.
bin_edges = range(0,int(spectrogram.max()) + 2) #add 1 to round up, and one to set the right bin edge.
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
If you choose to fix the range of the histogram, it is highly recommended that you use
`numpy.clip` to ensure that any of the values in the spectrogram that are greater than
your largest bin are not thrown away!
For example, if you decide on a fixed range between 0 and 500, and your spectrogram
contains a value of 777, the following code would produce a histogram where that 777 value
is not present in the count.
bin_edges = range(0,501)
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
But if you clip the spectrogram, you can interpret the last bin as being "the number
of spectrogram values equal to or greater than the lower bin edge".
bin_edges = range(0,501)
p, _ = np.histogram(np.clip(spectrogram.flatten(), 0, 500), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
You can also choose to fix the number of bins
bins = 50
p, bin_edges = np.histogram(spectrogram.flatten(), bins=bins, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
It is suggested to use any of the following measures as features:
bin range, spectrogram.min, spectrogram.max, number_of_bins, log(number_of_bins)
entropy, max_entropy, normalized_entropy.
Automatic Binning:
While Numpy and AstroML offer ways of automatically binning the data, it is unclear if this
is a good approach for entropy calculation -- especially when wishing to compare the value
across different spectrogram. The automatic binning tends to remove disorder in
the set of values, making the histogram smoother and more ordered than the data actually are.
This is true of automatic binning with fixed sizes (such as with the 'rice', and 'fd' options in
numpy.histogram), or with the variable sized arrays as can be calculated with Bayesian Blocks
with astroML. However, nothing is ruled out. In preliminary testing,
the calculated entropy from a histogram calculated with Bayesian Block binning seemed to be more
sensitive to a simulated signal than using fixed binning. However, it's unclear how to
interpret the results because "h_p/h_max" *increased* with the presence of a signal and exceeded 1.
**It is likely that the calculation of h_max is done incorrectly. Please check my work!**
It may even be that the total number of bins created by the Bayesian Block method would
be a suitable feature. For a completely flat distribution, there will only be one bin. If the
data contains significant variation in power levels, the Bayesian Block method will produce more
bins. More testing is required and your mileage may vary.
import astroML.plotting
bin_edges = astroML.density_estimation.bayesian_blocks(spectrogram.flatten())
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
Also to note: Using astroML.density_estimation.bayesian_blocks takes prohibitively long!
"Entropy" of raw data.
If `p` is NOT a PDF, then you're on your own to interpret the results. In this case, you
may set `w` = None and the calculation will assume w_i = 1 for all i.
For example,
h_p, _ = ibmseti.features.entropy(spectrogram.flatten(), None)
'''
if w is None:
w = np.ones(len(p))
h_p = np.sum([-x[0]*math.log(x[0]/x[1]) if x[0] else 0 for x in zip(p, w)])
h_max = math.log(np.sum(w))
return h_p, h_max | Computes the entropy for a discrete probability distribution function, as
represented by a histogram, `p`, with bin sizes `w`,
h_p = Sum -1 * p_i * ln(p_i / w_i)
Also computes the maximum allowed entropy for a histogram with bin sizes `w`.
h_max = ln( Sum w_i )
and returns both as a tuple (h_p , h_max ). The entropy is in 'natural' units.
Both `p` and `w` must be Numpy arrays.
If `p` is normalized to 1 ( Sum p_i * w_i = 1), then
the normalized entropy is equal toh_p / h_max and will
be in the range [0, 1].
For example, if `p` is a completely flat PDF (a uniform distribution), then
the normalized entropy will equal 1, indicating maximum amount of disorder.
(This is easily shown for the case where w_i = 1.)
If the `p_i` is zero for all i except j and p_j = 1, then the entropy will be 0,
indicating no disorder.
One can use this entropy measurement to search for signals in the spectrogram.
First we need to build a histogram of the measured power values in the spectrogram.
This histogram represents an estimate of the probability distribution function of the
observed power in the spectrogram.
If the spectrogram is entirely noise, the resulting histogram should be quite flat and
the normalized entropy ( h_p / h_max ) will approach 1. If there is a significant signal
in the spectrogram, then the histogram will not be flat and the normalized entropy will
be less than 1.
The decision that needs to be made is the number of bins and the bin size. And unfortunately,
the resulting entropy calculated will depend on the binning.
Based on testing and interpretibility, we recommend to use a fixed number of bins that either
span the full range of the power values in the spectrogram (0 to spectrogram.max()),
or span a fixed range (for example, from 0 to 500).
For example, you may set the range equal to the range of the values in the spectrogram.
bin_edges = range(0,int(spectrogram.max()) + 2) #add 1 to round up, and one to set the right bin edge.
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
If you choose to fix the range of the histogram, it is highly recommended that you use
`numpy.clip` to ensure that any of the values in the spectrogram that are greater than
your largest bin are not thrown away!
For example, if you decide on a fixed range between 0 and 500, and your spectrogram
contains a value of 777, the following code would produce a histogram where that 777 value
is not present in the count.
bin_edges = range(0,501)
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
But if you clip the spectrogram, you can interpret the last bin as being "the number
of spectrogram values equal to or greater than the lower bin edge".
bin_edges = range(0,501)
p, _ = np.histogram(np.clip(spectrogram.flatten(), 0, 500), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
You can also choose to fix the number of bins
bins = 50
p, bin_edges = np.histogram(spectrogram.flatten(), bins=bins, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
It is suggested to use any of the following measures as features:
bin range, spectrogram.min, spectrogram.max, number_of_bins, log(number_of_bins)
entropy, max_entropy, normalized_entropy.
Automatic Binning:
While Numpy and AstroML offer ways of automatically binning the data, it is unclear if this
is a good approach for entropy calculation -- especially when wishing to compare the value
across different spectrogram. The automatic binning tends to remove disorder in
the set of values, making the histogram smoother and more ordered than the data actually are.
This is true of automatic binning with fixed sizes (such as with the 'rice', and 'fd' options in
numpy.histogram), or with the variable sized arrays as can be calculated with Bayesian Blocks
with astroML. However, nothing is ruled out. In preliminary testing,
the calculated entropy from a histogram calculated with Bayesian Block binning seemed to be more
sensitive to a simulated signal than using fixed binning. However, it's unclear how to
interpret the results because "h_p/h_max" *increased* with the presence of a signal and exceeded 1.
**It is likely that the calculation of h_max is done incorrectly. Please check my work!**
It may even be that the total number of bins created by the Bayesian Block method would
be a suitable feature. For a completely flat distribution, there will only be one bin. If the
data contains significant variation in power levels, the Bayesian Block method will produce more
bins. More testing is required and your mileage may vary.
import astroML.plotting
bin_edges = astroML.density_estimation.bayesian_blocks(spectrogram.flatten())
p, _ = np.histogram(spectrogram.flatten(), bins=bin_edges, density=True)
w = np.diff(bin_edges)
h_p, h_max = ibmseti.features.entropy(p,w)
Also to note: Using astroML.density_estimation.bayesian_blocks takes prohibitively long!
"Entropy" of raw data.
If `p` is NOT a PDF, then you're on your own to interpret the results. In this case, you
may set `w` = None and the calculation will assume w_i = 1 for all i.
For example,
h_p, _ = ibmseti.features.entropy(spectrogram.flatten(), None) | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/features.py#L194-L323 | null | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This module contains some of the standard features that are extracted from
spectrograms and auto-correlation calculations from the raw SETI data.
Some functions are merely wrappers around Numpy-based operations, but
contain documentation that explicitly show how they are used with SETI data.
'''
import numpy as np
import scipy.stats
import math
def difference(arr, n=1, axis=0, **kwargs):
'''
Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), this function
uses the Numpy.diff function to calculate the nth
difference along either time or frequency.
If axis = 0 and n=1, then the first difference is taken
between subsequent time samples
If axis = 1 and n=1, then the first difference is taken
between frequency bins.
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.first_difference(x, axis=1)
>>> array([[2, 3, 4],
[5, 1, 2],
[4, 3, 3]])
ibmseti.features.first_difference(x, axis=0)
>>> array([[-1, 2, 0, -2],
[ 2, 1, 3, 4]])
'''
return np.diff(arr, n=n, axis=axis, **kwargs)
def projection(arr, axis=0, **kwargs):
'''
Assuming that `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function uses the numpy.sum function to project the
data onto the time or frequency axis into a 1D array.
If axis = 0, then the projection is onto the frequency axis
(the sum is along the time axis)
If axis = 1, then the projection is onto the time axis.
(the sum is along the frequency axis)
For example:
//each column is a frequency bin
x = np.array([
[ 1, 3, 6, 10], //each row is a time sample
[ 0, 5, 6, 8],
[ 2, 6, 9, 12]])
ibmseti.features.projection(x, axis=1)
>>> array([20, 19, 29])
ibmseti.features.projection(x, axis=0)
>>> array([ 3, 14, 21, 30])
One interesting kwarg that you may wish to use is `keepdims`.
See the documentation on numpy.sum for more information.
'''
return np.sum(arr, axis=axis, **kwargs)
def moment(arr, moment=1, axis=0, **kwargs):
'''
Uses the scipy.stats.moment to calculate the Nth central
moment about the mean.
If `arr` is a 2D spectrogram returned by
ibmseti.dsp.raw_to_spectrogram(data), where each row
of the `arr` is a power spectrum at a particular time,
this function, then the Nth moment along each axis
will be computed.
If axis = 0, then Nth moment for the data in each
frequency bin will be computed. (The calculation is done
*along* the 0th axis, which is the time axis.)
If axis = 1, then Nth moment for the data in each
time bin will be computed. (The calculation is done
*along* the 1st axis, which is the frequency axis.)
For example, consider the 2nd moment:
//each column is a frequency bin
x = array([[ 1., 3., 6., 10.], //each row is a time sample
[ 0., 5., 6., 8.],
[ 2., 6., 9., 12.]])
ibmseti.features.mement(x, moment=2, axis=0) //the returned array is of size 4, the number of columns / frequency bins.
>>> array([ 0.66666667, 1.55555556, 2., 2.66666667])
ibmseti.features.mement(x, moment=2, axis=1) //the returned array is of size 3, the number of rows / time bins.
>>> array([ 11.5 , 8.6875, 13.6875])
If `arr` is a 1D array, such as what you'd get if you projected
the spectrogram onto the time or frequency axis, then you must
use axis=0.
'''
return scipy.stats.moment(arr, moment=moment, axis=axis, **kwargs)
def first_order_gradient(arr, axis=0):
'''
Returns the gradient of arr along a particular axis using
the first order forward-difference.
Additionally, the result is padded with 0 so that the
returned array is the same shape as in input array.
'''
grad_arr = difference(arr, n=1, axis=axis)
return np.insert(grad_arr, grad_arr.shape[axis], 0, axis=axis)
def total_variation(arr):
'''
If arr is a 2D array (N X M), assumes that arr is a spectrogram with time along axis=0.
Calculates the 1D total variation in time for each frequency and returns an array
of size M.
If arr is a 1D array, calculates total variation and returns a scalar.
Sum ( Abs(arr_i+1,j - arr_ij) )
If arr is a 2D array, it's common to take the mean of the resulting M-sized array
to calculate a scalar feature.
'''
return np.sum(np.abs(np.diff(arr, axis=0)), axis=0)
def maximum_variation(arr):
'''
return np.max(arr, axis=0) - np.min(arr, axis=0)
If `arr` is a 1D array, a scalar is returned.
If `arr` is a 2D array (N x M), an array of length M is returned.
'''
return np.max(arr, axis=0) - np.min(arr, axis=0)
def tv_2d_isotropic(grad_0_arr, grad_1_arr):
'''
Calculates the Total Variation
Assumes a 2D array.
grad_0_arr is the gradient along the 0th axis of arr.
grad_1_arr is the gradient along the 1st axis of arr.
You can use the 1st order forward-difference measure
of the gradient (the standard calculation). Or you
can use the second_order central gradient.
'''
return np.sqrt(grad_0_arr**2 + grad_1_arr**2).sum()
def asymmetry(spectrogram_L, spectrogram_R):
'''
returns (spectogram_L - spectrogram_R) / (spectogram_L + spectrogram_R)
The asymmetry measure should be used for both polarizations recorded for a particular observation.
On can then perform analysis such as integrating the returned spectrogram to determine if the
asymmetry of the signals are close to zero, indicating equal signal in both polarizations, or
close to +-1, indicating a strong asymmetry in the L or R polarization.
spectrogram_L and spectrogram_R should be Numpy arrays.
'''
return (spectrogram_L - spectrogram_R) / (spectrogram_L + spectrogram_R)
|
ibm-watson-data-lab/ibmseti | ibmseti/dsp.py | time_bins | python | def time_bins(header):
'''
Returns the time-axis lower bin edge values for the spectrogram.
'''
return np.arange(header['number_of_half_frames'], dtype=np.float64)*constants.bins_per_half_frame\
*(1.0 - header['over_sampling']) / header['subband_spacing_hz'] | Returns the time-axis lower bin edge values for the spectrogram. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/dsp.py#L23-L28 | null | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Utilities to work with Spectrograms -- Power spectra v time (aka "waterfall" plots)
'''
import numpy as np
from . import constants
def frequency_bins(header):
'''
Returnes the frequency-axis lower bin edge values for the spectrogram.
'''
center_frequency = 1.0e6*header['rf_center_frequency']
if header["number_of_subbands"] > 1:
center_frequency += header["subband_spacing_hz"]*(header["number_of_subbands"]/2.0 - 0.5)
return np.fft.fftshift(\
np.fft.fftfreq( int(header["number_of_subbands"] * constants.bins_per_half_frame*(1.0 - header['over_sampling'])), \
1.0/(header["number_of_subbands"]*header["subband_spacing_hz"])) + center_frequency
)
def complex_to_fourier(cdata, over_sampling, norm=None):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25)
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
returns the signal in complex fourier space. The output fourier data are shifted so the central frequency
is at the center of the values. All over-sampled frequencies have been removed so that all frequency bins
can be properly arranged next to each other.
'''
# FFT all blocks separately and rearrange output
fftcdata = np.fft.fftshift(np.fft.fft(cdata, norm=norm), 2)
# slice out oversampled frequencies
if over_sampling > 0:
fftcdata = fftcdata[:, :, int(cdata.shape[2]*over_sampling/2):-int(cdata.shape[2]*over_sampling/2)]
return fftcdata
def fourier_to_time(fcdata, norm=None):
'''
Converts the data from 2D fourier space signal to a 1D time-series.
fcdata: Complex fourier spectrum as a 2D array, The axis=0 is for each "half frame", and axis=1 contains the
fourier-space data for that half frame. Typically there are 129 "half frames" in the data.
Furthermore, it's assumed that fftshift has placed the central frequency at the center of axis=1.
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
Usage:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data() #cdata is a 3D numpy array in the time domain.
#can manipulate cdata in time-space if desired (use various windowing functions, for example)
fcdata = ibmseti.dsp.complex_to_fourier(cdata, aca.header()['over_sampling'])
fcdata_2d = ibmseti.dsp.reshape_to_2d(fcdata)
tcdata_1d = ibmseti.dsp.fourier_to_time(fcdata_2d)
One can recover the Fourier Spectrum of cdata_1d by:
cdata_2d = cdata_1d.reshape(aca.header()['number_of_half_frames'], int(aca.header()['number_of_subbands'] * ibmseti.constants.bins_per_half_frame*(1 - aca.header()['over_sampling'])))
fcdata_2d_v2 = np.fft.fftshift(np.fft.fft(cdata_2d), 1)
#fcdata_2d_v2 and fcdata_2d should be the same
np.sum(np.sum(fcdata_2d - fcdata_2d_v2)) # should equal to approximately 0
'''
return np.fft.ifft(np.fft.ifftshift(fcdata, 1),norm=norm).reshape(fcdata.shape[0] * fcdata.shape[1]) # single complex time series
def complex_to_power(cdata, over_sampling):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25).
returns a 3D spectrogram
Example:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data()
#can perform any transformations on cdata here, such as applying hanning windows for smoother FFT results.
#cdata = np.multiply(cdata, np.hanning(constants.bins_per_half_frame))
power = ibmseti.dsp.complex_to_power(, aca.header()['over_sampling'])
Typically, this 3D spectrogram is rehaped so that the subbands are aligned next to each other
in a 2D spectrogram
spectrogram = ibmseti.dsp.reshape_to_2d(power)
'''
fftcdata = complex_to_fourier(cdata, over_sampling)
# calculate power, normalize and amplify by factor 15 (what is the factor of 15 for?)
fftcdata = np.multiply(fftcdata.real**2 + fftcdata.imag**2, 15.0/cdata.shape[2])
return fftcdata
def reshape_to_2d(arr):
'''
Assumes a 3D Numpy array, and reshapes like
arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
This is useful for converting processed data from `complex_to_power`
and from `autocorrelation` into a 2D array for image analysis and display.
'''
return arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
def compamp_to_spectrogram(compamp):
'''
Returns spectrogram, with each row containing the measured power spectrum for a XX second time sample.
Using this function is shorthand for:
aca = ibmseti.compamp.Compamp(raw_data)
power = ibmseti.dsp.complex_to_power(aca.complex_data(), aca.header()['over_sampling'])
spectrogram = ibmseti.dsp.reshape_to_2d(power)
Example Usage:
import ibmseti
import matplotlib.pyplot as plt
plt.ion()
aca = ibmseti.compamp.Compamp(raw_data)
spectrogram = ibmseti.dsp.compamp_to_spectrogram(aca)
time_bins = ibmseti.dsp.time_bins( aca.header() )
freq_bins = ibmseti.dsp.frequency_bins( aca.header() )
fig, ax = plt.subplots()
ax.pcolormesh(freq_bins, time_bins, spectrogram)
#Time is on the horizontal axis and frequency is along the vertical.
'''
power = complex_to_power(compamp.complex_data(), compamp.header()['over_sampling'])
return reshape_to_2d(power)
def scale_to_png(arr):
if arr.min() < 0:
sh_arr = arr + -1.0*arr.min()
else:
sh_arr = arr
return np.clip(sh_arr * 255.0/sh_arr.max(), 0, 255).astype(np.uint8)
def compamp_to_ac(compamp, window=np.hanning): # convert single or multi-subband compamps into autocorrelation waterfall
'''
Adapted from Gerry Harp at SETI.
'''
header = compamp.header()
cdata = compamp.complex_data()
#Apply Windowing and Padding
cdata = np.multiply(cdata, window(cdata.shape[2])) # window for smoothing sharp time series start/end in freq. dom.
cdata_normal = cdata - cdata.mean(axis=2)[:, :, np.newaxis] # zero mean, does influence a minority of lines in some plots
cdata = np.zeros((cdata.shape[0], cdata.shape[1], 2 * cdata.shape[2]), complex)
cdata[:, :, cdata.shape[2]/2:cdata.shape[2] + cdata.shape[2]/2] = cdata_normal # zero-pad to 2N
#Perform Autocorrelation
cdata = np.fft.fftshift(np.fft.fft(cdata), 2) # FFT all blocks separately and arrange correctly
cdata = cdata.real**2 + cdata.imag**2 # FFT(AC(x)) = FFT(x)FFT*(x) = abs(x)^2
cdata = np.fft.ifftshift(np.fft.ifft(cdata), 2) # AC(x) = iFFT(abs(x)^2) and arrange correctly
cdata = np.abs(cdata) # magnitude of AC
# normalize each row to sqrt of AC triangle
cdata = np.divide(cdata, np.sqrt(np.sum(cdata, axis=2))[:, :, np.newaxis])
return cdata
def ac_viz(acdata):
'''
Adapted from Gerry Harp at SETI.
Slightly massages the autocorrelated calculation result for better visualization.
In particular, the natural log of the data are calculated and the
values along the subband edges are set to the maximum value of the data,
and the t=0 delay of the autocorrelation result are set to the value of the t=-1 delay.
This is allowed because the t=0, and subband edges do not carry any information.
To avoid log(0), a value of 0.000001 is added to all array elements before being logged.
'''
acdata = np.log(acdata+0.000001) # log to reduce darkening on sides of spectrum, due to AC triangling
acdata[:, :, acdata.shape[2]/2] = acdata[:, :, acdata.shape[2]/2 - 1] # vals at zero delay set to symmetric neighbor vals
acdata[:, :, acdata.shape[2] - 1] = np.max(acdata) # visualize subband edges
return acdata
|
ibm-watson-data-lab/ibmseti | ibmseti/dsp.py | frequency_bins | python | def frequency_bins(header):
'''
Returnes the frequency-axis lower bin edge values for the spectrogram.
'''
center_frequency = 1.0e6*header['rf_center_frequency']
if header["number_of_subbands"] > 1:
center_frequency += header["subband_spacing_hz"]*(header["number_of_subbands"]/2.0 - 0.5)
return np.fft.fftshift(\
np.fft.fftfreq( int(header["number_of_subbands"] * constants.bins_per_half_frame*(1.0 - header['over_sampling'])), \
1.0/(header["number_of_subbands"]*header["subband_spacing_hz"])) + center_frequency
) | Returnes the frequency-axis lower bin edge values for the spectrogram. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/dsp.py#L30-L42 | null | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Utilities to work with Spectrograms -- Power spectra v time (aka "waterfall" plots)
'''
import numpy as np
from . import constants
def time_bins(header):
'''
Returns the time-axis lower bin edge values for the spectrogram.
'''
return np.arange(header['number_of_half_frames'], dtype=np.float64)*constants.bins_per_half_frame\
*(1.0 - header['over_sampling']) / header['subband_spacing_hz']
def complex_to_fourier(cdata, over_sampling, norm=None):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25)
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
returns the signal in complex fourier space. The output fourier data are shifted so the central frequency
is at the center of the values. All over-sampled frequencies have been removed so that all frequency bins
can be properly arranged next to each other.
'''
# FFT all blocks separately and rearrange output
fftcdata = np.fft.fftshift(np.fft.fft(cdata, norm=norm), 2)
# slice out oversampled frequencies
if over_sampling > 0:
fftcdata = fftcdata[:, :, int(cdata.shape[2]*over_sampling/2):-int(cdata.shape[2]*over_sampling/2)]
return fftcdata
def fourier_to_time(fcdata, norm=None):
'''
Converts the data from 2D fourier space signal to a 1D time-series.
fcdata: Complex fourier spectrum as a 2D array, The axis=0 is for each "half frame", and axis=1 contains the
fourier-space data for that half frame. Typically there are 129 "half frames" in the data.
Furthermore, it's assumed that fftshift has placed the central frequency at the center of axis=1.
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
Usage:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data() #cdata is a 3D numpy array in the time domain.
#can manipulate cdata in time-space if desired (use various windowing functions, for example)
fcdata = ibmseti.dsp.complex_to_fourier(cdata, aca.header()['over_sampling'])
fcdata_2d = ibmseti.dsp.reshape_to_2d(fcdata)
tcdata_1d = ibmseti.dsp.fourier_to_time(fcdata_2d)
One can recover the Fourier Spectrum of cdata_1d by:
cdata_2d = cdata_1d.reshape(aca.header()['number_of_half_frames'], int(aca.header()['number_of_subbands'] * ibmseti.constants.bins_per_half_frame*(1 - aca.header()['over_sampling'])))
fcdata_2d_v2 = np.fft.fftshift(np.fft.fft(cdata_2d), 1)
#fcdata_2d_v2 and fcdata_2d should be the same
np.sum(np.sum(fcdata_2d - fcdata_2d_v2)) # should equal to approximately 0
'''
return np.fft.ifft(np.fft.ifftshift(fcdata, 1),norm=norm).reshape(fcdata.shape[0] * fcdata.shape[1]) # single complex time series
def complex_to_power(cdata, over_sampling):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25).
returns a 3D spectrogram
Example:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data()
#can perform any transformations on cdata here, such as applying hanning windows for smoother FFT results.
#cdata = np.multiply(cdata, np.hanning(constants.bins_per_half_frame))
power = ibmseti.dsp.complex_to_power(, aca.header()['over_sampling'])
Typically, this 3D spectrogram is rehaped so that the subbands are aligned next to each other
in a 2D spectrogram
spectrogram = ibmseti.dsp.reshape_to_2d(power)
'''
fftcdata = complex_to_fourier(cdata, over_sampling)
# calculate power, normalize and amplify by factor 15 (what is the factor of 15 for?)
fftcdata = np.multiply(fftcdata.real**2 + fftcdata.imag**2, 15.0/cdata.shape[2])
return fftcdata
def reshape_to_2d(arr):
'''
Assumes a 3D Numpy array, and reshapes like
arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
This is useful for converting processed data from `complex_to_power`
and from `autocorrelation` into a 2D array for image analysis and display.
'''
return arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
def compamp_to_spectrogram(compamp):
'''
Returns spectrogram, with each row containing the measured power spectrum for a XX second time sample.
Using this function is shorthand for:
aca = ibmseti.compamp.Compamp(raw_data)
power = ibmseti.dsp.complex_to_power(aca.complex_data(), aca.header()['over_sampling'])
spectrogram = ibmseti.dsp.reshape_to_2d(power)
Example Usage:
import ibmseti
import matplotlib.pyplot as plt
plt.ion()
aca = ibmseti.compamp.Compamp(raw_data)
spectrogram = ibmseti.dsp.compamp_to_spectrogram(aca)
time_bins = ibmseti.dsp.time_bins( aca.header() )
freq_bins = ibmseti.dsp.frequency_bins( aca.header() )
fig, ax = plt.subplots()
ax.pcolormesh(freq_bins, time_bins, spectrogram)
#Time is on the horizontal axis and frequency is along the vertical.
'''
power = complex_to_power(compamp.complex_data(), compamp.header()['over_sampling'])
return reshape_to_2d(power)
def scale_to_png(arr):
if arr.min() < 0:
sh_arr = arr + -1.0*arr.min()
else:
sh_arr = arr
return np.clip(sh_arr * 255.0/sh_arr.max(), 0, 255).astype(np.uint8)
def compamp_to_ac(compamp, window=np.hanning): # convert single or multi-subband compamps into autocorrelation waterfall
'''
Adapted from Gerry Harp at SETI.
'''
header = compamp.header()
cdata = compamp.complex_data()
#Apply Windowing and Padding
cdata = np.multiply(cdata, window(cdata.shape[2])) # window for smoothing sharp time series start/end in freq. dom.
cdata_normal = cdata - cdata.mean(axis=2)[:, :, np.newaxis] # zero mean, does influence a minority of lines in some plots
cdata = np.zeros((cdata.shape[0], cdata.shape[1], 2 * cdata.shape[2]), complex)
cdata[:, :, cdata.shape[2]/2:cdata.shape[2] + cdata.shape[2]/2] = cdata_normal # zero-pad to 2N
#Perform Autocorrelation
cdata = np.fft.fftshift(np.fft.fft(cdata), 2) # FFT all blocks separately and arrange correctly
cdata = cdata.real**2 + cdata.imag**2 # FFT(AC(x)) = FFT(x)FFT*(x) = abs(x)^2
cdata = np.fft.ifftshift(np.fft.ifft(cdata), 2) # AC(x) = iFFT(abs(x)^2) and arrange correctly
cdata = np.abs(cdata) # magnitude of AC
# normalize each row to sqrt of AC triangle
cdata = np.divide(cdata, np.sqrt(np.sum(cdata, axis=2))[:, :, np.newaxis])
return cdata
def ac_viz(acdata):
'''
Adapted from Gerry Harp at SETI.
Slightly massages the autocorrelated calculation result for better visualization.
In particular, the natural log of the data are calculated and the
values along the subband edges are set to the maximum value of the data,
and the t=0 delay of the autocorrelation result are set to the value of the t=-1 delay.
This is allowed because the t=0, and subband edges do not carry any information.
To avoid log(0), a value of 0.000001 is added to all array elements before being logged.
'''
acdata = np.log(acdata+0.000001) # log to reduce darkening on sides of spectrum, due to AC triangling
acdata[:, :, acdata.shape[2]/2] = acdata[:, :, acdata.shape[2]/2 - 1] # vals at zero delay set to symmetric neighbor vals
acdata[:, :, acdata.shape[2] - 1] = np.max(acdata) # visualize subband edges
return acdata
|
ibm-watson-data-lab/ibmseti | ibmseti/dsp.py | complex_to_fourier | python | def complex_to_fourier(cdata, over_sampling, norm=None):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25)
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
returns the signal in complex fourier space. The output fourier data are shifted so the central frequency
is at the center of the values. All over-sampled frequencies have been removed so that all frequency bins
can be properly arranged next to each other.
'''
# FFT all blocks separately and rearrange output
fftcdata = np.fft.fftshift(np.fft.fft(cdata, norm=norm), 2)
# slice out oversampled frequencies
if over_sampling > 0:
fftcdata = fftcdata[:, :, int(cdata.shape[2]*over_sampling/2):-int(cdata.shape[2]*over_sampling/2)]
return fftcdata | cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25)
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
returns the signal in complex fourier space. The output fourier data are shifted so the central frequency
is at the center of the values. All over-sampled frequencies have been removed so that all frequency bins
can be properly arranged next to each other. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/dsp.py#L44-L62 | null | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Utilities to work with Spectrograms -- Power spectra v time (aka "waterfall" plots)
'''
import numpy as np
from . import constants
def time_bins(header):
'''
Returns the time-axis lower bin edge values for the spectrogram.
'''
return np.arange(header['number_of_half_frames'], dtype=np.float64)*constants.bins_per_half_frame\
*(1.0 - header['over_sampling']) / header['subband_spacing_hz']
def frequency_bins(header):
'''
Returnes the frequency-axis lower bin edge values for the spectrogram.
'''
center_frequency = 1.0e6*header['rf_center_frequency']
if header["number_of_subbands"] > 1:
center_frequency += header["subband_spacing_hz"]*(header["number_of_subbands"]/2.0 - 0.5)
return np.fft.fftshift(\
np.fft.fftfreq( int(header["number_of_subbands"] * constants.bins_per_half_frame*(1.0 - header['over_sampling'])), \
1.0/(header["number_of_subbands"]*header["subband_spacing_hz"])) + center_frequency
)
def complex_to_fourier(cdata, over_sampling, norm=None):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25)
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
returns the signal in complex fourier space. The output fourier data are shifted so the central frequency
is at the center of the values. All over-sampled frequencies have been removed so that all frequency bins
can be properly arranged next to each other.
'''
# FFT all blocks separately and rearrange output
fftcdata = np.fft.fftshift(np.fft.fft(cdata, norm=norm), 2)
# slice out oversampled frequencies
if over_sampling > 0:
fftcdata = fftcdata[:, :, int(cdata.shape[2]*over_sampling/2):-int(cdata.shape[2]*over_sampling/2)]
return fftcdata
def fourier_to_time(fcdata, norm=None):
'''
Converts the data from 2D fourier space signal to a 1D time-series.
fcdata: Complex fourier spectrum as a 2D array, The axis=0 is for each "half frame", and axis=1 contains the
fourier-space data for that half frame. Typically there are 129 "half frames" in the data.
Furthermore, it's assumed that fftshift has placed the central frequency at the center of axis=1.
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
Usage:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data() #cdata is a 3D numpy array in the time domain.
#can manipulate cdata in time-space if desired (use various windowing functions, for example)
fcdata = ibmseti.dsp.complex_to_fourier(cdata, aca.header()['over_sampling'])
fcdata_2d = ibmseti.dsp.reshape_to_2d(fcdata)
tcdata_1d = ibmseti.dsp.fourier_to_time(fcdata_2d)
One can recover the Fourier Spectrum of cdata_1d by:
cdata_2d = cdata_1d.reshape(aca.header()['number_of_half_frames'], int(aca.header()['number_of_subbands'] * ibmseti.constants.bins_per_half_frame*(1 - aca.header()['over_sampling'])))
fcdata_2d_v2 = np.fft.fftshift(np.fft.fft(cdata_2d), 1)
#fcdata_2d_v2 and fcdata_2d should be the same
np.sum(np.sum(fcdata_2d - fcdata_2d_v2)) # should equal to approximately 0
'''
return np.fft.ifft(np.fft.ifftshift(fcdata, 1),norm=norm).reshape(fcdata.shape[0] * fcdata.shape[1]) # single complex time series
def complex_to_power(cdata, over_sampling):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25).
returns a 3D spectrogram
Example:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data()
#can perform any transformations on cdata here, such as applying hanning windows for smoother FFT results.
#cdata = np.multiply(cdata, np.hanning(constants.bins_per_half_frame))
power = ibmseti.dsp.complex_to_power(, aca.header()['over_sampling'])
Typically, this 3D spectrogram is rehaped so that the subbands are aligned next to each other
in a 2D spectrogram
spectrogram = ibmseti.dsp.reshape_to_2d(power)
'''
fftcdata = complex_to_fourier(cdata, over_sampling)
# calculate power, normalize and amplify by factor 15 (what is the factor of 15 for?)
fftcdata = np.multiply(fftcdata.real**2 + fftcdata.imag**2, 15.0/cdata.shape[2])
return fftcdata
def reshape_to_2d(arr):
'''
Assumes a 3D Numpy array, and reshapes like
arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
This is useful for converting processed data from `complex_to_power`
and from `autocorrelation` into a 2D array for image analysis and display.
'''
return arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
def compamp_to_spectrogram(compamp):
'''
Returns spectrogram, with each row containing the measured power spectrum for a XX second time sample.
Using this function is shorthand for:
aca = ibmseti.compamp.Compamp(raw_data)
power = ibmseti.dsp.complex_to_power(aca.complex_data(), aca.header()['over_sampling'])
spectrogram = ibmseti.dsp.reshape_to_2d(power)
Example Usage:
import ibmseti
import matplotlib.pyplot as plt
plt.ion()
aca = ibmseti.compamp.Compamp(raw_data)
spectrogram = ibmseti.dsp.compamp_to_spectrogram(aca)
time_bins = ibmseti.dsp.time_bins( aca.header() )
freq_bins = ibmseti.dsp.frequency_bins( aca.header() )
fig, ax = plt.subplots()
ax.pcolormesh(freq_bins, time_bins, spectrogram)
#Time is on the horizontal axis and frequency is along the vertical.
'''
power = complex_to_power(compamp.complex_data(), compamp.header()['over_sampling'])
return reshape_to_2d(power)
def scale_to_png(arr):
if arr.min() < 0:
sh_arr = arr + -1.0*arr.min()
else:
sh_arr = arr
return np.clip(sh_arr * 255.0/sh_arr.max(), 0, 255).astype(np.uint8)
def compamp_to_ac(compamp, window=np.hanning): # convert single or multi-subband compamps into autocorrelation waterfall
'''
Adapted from Gerry Harp at SETI.
'''
header = compamp.header()
cdata = compamp.complex_data()
#Apply Windowing and Padding
cdata = np.multiply(cdata, window(cdata.shape[2])) # window for smoothing sharp time series start/end in freq. dom.
cdata_normal = cdata - cdata.mean(axis=2)[:, :, np.newaxis] # zero mean, does influence a minority of lines in some plots
cdata = np.zeros((cdata.shape[0], cdata.shape[1], 2 * cdata.shape[2]), complex)
cdata[:, :, cdata.shape[2]/2:cdata.shape[2] + cdata.shape[2]/2] = cdata_normal # zero-pad to 2N
#Perform Autocorrelation
cdata = np.fft.fftshift(np.fft.fft(cdata), 2) # FFT all blocks separately and arrange correctly
cdata = cdata.real**2 + cdata.imag**2 # FFT(AC(x)) = FFT(x)FFT*(x) = abs(x)^2
cdata = np.fft.ifftshift(np.fft.ifft(cdata), 2) # AC(x) = iFFT(abs(x)^2) and arrange correctly
cdata = np.abs(cdata) # magnitude of AC
# normalize each row to sqrt of AC triangle
cdata = np.divide(cdata, np.sqrt(np.sum(cdata, axis=2))[:, :, np.newaxis])
return cdata
def ac_viz(acdata):
'''
Adapted from Gerry Harp at SETI.
Slightly massages the autocorrelated calculation result for better visualization.
In particular, the natural log of the data are calculated and the
values along the subband edges are set to the maximum value of the data,
and the t=0 delay of the autocorrelation result are set to the value of the t=-1 delay.
This is allowed because the t=0, and subband edges do not carry any information.
To avoid log(0), a value of 0.000001 is added to all array elements before being logged.
'''
acdata = np.log(acdata+0.000001) # log to reduce darkening on sides of spectrum, due to AC triangling
acdata[:, :, acdata.shape[2]/2] = acdata[:, :, acdata.shape[2]/2 - 1] # vals at zero delay set to symmetric neighbor vals
acdata[:, :, acdata.shape[2] - 1] = np.max(acdata) # visualize subband edges
return acdata
|
ibm-watson-data-lab/ibmseti | ibmseti/dsp.py | fourier_to_time | python | def fourier_to_time(fcdata, norm=None):
'''
Converts the data from 2D fourier space signal to a 1D time-series.
fcdata: Complex fourier spectrum as a 2D array, The axis=0 is for each "half frame", and axis=1 contains the
fourier-space data for that half frame. Typically there are 129 "half frames" in the data.
Furthermore, it's assumed that fftshift has placed the central frequency at the center of axis=1.
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
Usage:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data() #cdata is a 3D numpy array in the time domain.
#can manipulate cdata in time-space if desired (use various windowing functions, for example)
fcdata = ibmseti.dsp.complex_to_fourier(cdata, aca.header()['over_sampling'])
fcdata_2d = ibmseti.dsp.reshape_to_2d(fcdata)
tcdata_1d = ibmseti.dsp.fourier_to_time(fcdata_2d)
One can recover the Fourier Spectrum of cdata_1d by:
cdata_2d = cdata_1d.reshape(aca.header()['number_of_half_frames'], int(aca.header()['number_of_subbands'] * ibmseti.constants.bins_per_half_frame*(1 - aca.header()['over_sampling'])))
fcdata_2d_v2 = np.fft.fftshift(np.fft.fft(cdata_2d), 1)
#fcdata_2d_v2 and fcdata_2d should be the same
np.sum(np.sum(fcdata_2d - fcdata_2d_v2)) # should equal to approximately 0
'''
return np.fft.ifft(np.fft.ifftshift(fcdata, 1),norm=norm).reshape(fcdata.shape[0] * fcdata.shape[1]) | Converts the data from 2D fourier space signal to a 1D time-series.
fcdata: Complex fourier spectrum as a 2D array, The axis=0 is for each "half frame", and axis=1 contains the
fourier-space data for that half frame. Typically there are 129 "half frames" in the data.
Furthermore, it's assumed that fftshift has placed the central frequency at the center of axis=1.
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
Usage:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data() #cdata is a 3D numpy array in the time domain.
#can manipulate cdata in time-space if desired (use various windowing functions, for example)
fcdata = ibmseti.dsp.complex_to_fourier(cdata, aca.header()['over_sampling'])
fcdata_2d = ibmseti.dsp.reshape_to_2d(fcdata)
tcdata_1d = ibmseti.dsp.fourier_to_time(fcdata_2d)
One can recover the Fourier Spectrum of cdata_1d by:
cdata_2d = cdata_1d.reshape(aca.header()['number_of_half_frames'], int(aca.header()['number_of_subbands'] * ibmseti.constants.bins_per_half_frame*(1 - aca.header()['over_sampling'])))
fcdata_2d_v2 = np.fft.fftshift(np.fft.fft(cdata_2d), 1)
#fcdata_2d_v2 and fcdata_2d should be the same
np.sum(np.sum(fcdata_2d - fcdata_2d_v2)) # should equal to approximately 0 | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/dsp.py#L64-L94 | null | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Utilities to work with Spectrograms -- Power spectra v time (aka "waterfall" plots)
'''
import numpy as np
from . import constants
def time_bins(header):
'''
Returns the time-axis lower bin edge values for the spectrogram.
'''
return np.arange(header['number_of_half_frames'], dtype=np.float64)*constants.bins_per_half_frame\
*(1.0 - header['over_sampling']) / header['subband_spacing_hz']
def frequency_bins(header):
'''
Returnes the frequency-axis lower bin edge values for the spectrogram.
'''
center_frequency = 1.0e6*header['rf_center_frequency']
if header["number_of_subbands"] > 1:
center_frequency += header["subband_spacing_hz"]*(header["number_of_subbands"]/2.0 - 0.5)
return np.fft.fftshift(\
np.fft.fftfreq( int(header["number_of_subbands"] * constants.bins_per_half_frame*(1.0 - header['over_sampling'])), \
1.0/(header["number_of_subbands"]*header["subband_spacing_hz"])) + center_frequency
)
def complex_to_fourier(cdata, over_sampling, norm=None):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25)
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
returns the signal in complex fourier space. The output fourier data are shifted so the central frequency
is at the center of the values. All over-sampled frequencies have been removed so that all frequency bins
can be properly arranged next to each other.
'''
# FFT all blocks separately and rearrange output
fftcdata = np.fft.fftshift(np.fft.fft(cdata, norm=norm), 2)
# slice out oversampled frequencies
if over_sampling > 0:
fftcdata = fftcdata[:, :, int(cdata.shape[2]*over_sampling/2):-int(cdata.shape[2]*over_sampling/2)]
return fftcdata
# single complex time series
def complex_to_power(cdata, over_sampling):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25).
returns a 3D spectrogram
Example:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data()
#can perform any transformations on cdata here, such as applying hanning windows for smoother FFT results.
#cdata = np.multiply(cdata, np.hanning(constants.bins_per_half_frame))
power = ibmseti.dsp.complex_to_power(, aca.header()['over_sampling'])
Typically, this 3D spectrogram is rehaped so that the subbands are aligned next to each other
in a 2D spectrogram
spectrogram = ibmseti.dsp.reshape_to_2d(power)
'''
fftcdata = complex_to_fourier(cdata, over_sampling)
# calculate power, normalize and amplify by factor 15 (what is the factor of 15 for?)
fftcdata = np.multiply(fftcdata.real**2 + fftcdata.imag**2, 15.0/cdata.shape[2])
return fftcdata
def reshape_to_2d(arr):
'''
Assumes a 3D Numpy array, and reshapes like
arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
This is useful for converting processed data from `complex_to_power`
and from `autocorrelation` into a 2D array for image analysis and display.
'''
return arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
def compamp_to_spectrogram(compamp):
'''
Returns spectrogram, with each row containing the measured power spectrum for a XX second time sample.
Using this function is shorthand for:
aca = ibmseti.compamp.Compamp(raw_data)
power = ibmseti.dsp.complex_to_power(aca.complex_data(), aca.header()['over_sampling'])
spectrogram = ibmseti.dsp.reshape_to_2d(power)
Example Usage:
import ibmseti
import matplotlib.pyplot as plt
plt.ion()
aca = ibmseti.compamp.Compamp(raw_data)
spectrogram = ibmseti.dsp.compamp_to_spectrogram(aca)
time_bins = ibmseti.dsp.time_bins( aca.header() )
freq_bins = ibmseti.dsp.frequency_bins( aca.header() )
fig, ax = plt.subplots()
ax.pcolormesh(freq_bins, time_bins, spectrogram)
#Time is on the horizontal axis and frequency is along the vertical.
'''
power = complex_to_power(compamp.complex_data(), compamp.header()['over_sampling'])
return reshape_to_2d(power)
def scale_to_png(arr):
if arr.min() < 0:
sh_arr = arr + -1.0*arr.min()
else:
sh_arr = arr
return np.clip(sh_arr * 255.0/sh_arr.max(), 0, 255).astype(np.uint8)
def compamp_to_ac(compamp, window=np.hanning): # convert single or multi-subband compamps into autocorrelation waterfall
'''
Adapted from Gerry Harp at SETI.
'''
header = compamp.header()
cdata = compamp.complex_data()
#Apply Windowing and Padding
cdata = np.multiply(cdata, window(cdata.shape[2])) # window for smoothing sharp time series start/end in freq. dom.
cdata_normal = cdata - cdata.mean(axis=2)[:, :, np.newaxis] # zero mean, does influence a minority of lines in some plots
cdata = np.zeros((cdata.shape[0], cdata.shape[1], 2 * cdata.shape[2]), complex)
cdata[:, :, cdata.shape[2]/2:cdata.shape[2] + cdata.shape[2]/2] = cdata_normal # zero-pad to 2N
#Perform Autocorrelation
cdata = np.fft.fftshift(np.fft.fft(cdata), 2) # FFT all blocks separately and arrange correctly
cdata = cdata.real**2 + cdata.imag**2 # FFT(AC(x)) = FFT(x)FFT*(x) = abs(x)^2
cdata = np.fft.ifftshift(np.fft.ifft(cdata), 2) # AC(x) = iFFT(abs(x)^2) and arrange correctly
cdata = np.abs(cdata) # magnitude of AC
# normalize each row to sqrt of AC triangle
cdata = np.divide(cdata, np.sqrt(np.sum(cdata, axis=2))[:, :, np.newaxis])
return cdata
def ac_viz(acdata):
'''
Adapted from Gerry Harp at SETI.
Slightly massages the autocorrelated calculation result for better visualization.
In particular, the natural log of the data are calculated and the
values along the subband edges are set to the maximum value of the data,
and the t=0 delay of the autocorrelation result are set to the value of the t=-1 delay.
This is allowed because the t=0, and subband edges do not carry any information.
To avoid log(0), a value of 0.000001 is added to all array elements before being logged.
'''
acdata = np.log(acdata+0.000001) # log to reduce darkening on sides of spectrum, due to AC triangling
acdata[:, :, acdata.shape[2]/2] = acdata[:, :, acdata.shape[2]/2 - 1] # vals at zero delay set to symmetric neighbor vals
acdata[:, :, acdata.shape[2] - 1] = np.max(acdata) # visualize subband edges
return acdata
|
ibm-watson-data-lab/ibmseti | ibmseti/dsp.py | complex_to_power | python | def complex_to_power(cdata, over_sampling):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25).
returns a 3D spectrogram
Example:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data()
#can perform any transformations on cdata here, such as applying hanning windows for smoother FFT results.
#cdata = np.multiply(cdata, np.hanning(constants.bins_per_half_frame))
power = ibmseti.dsp.complex_to_power(, aca.header()['over_sampling'])
Typically, this 3D spectrogram is rehaped so that the subbands are aligned next to each other
in a 2D spectrogram
spectrogram = ibmseti.dsp.reshape_to_2d(power)
'''
fftcdata = complex_to_fourier(cdata, over_sampling)
# calculate power, normalize and amplify by factor 15 (what is the factor of 15 for?)
fftcdata = np.multiply(fftcdata.real**2 + fftcdata.imag**2, 15.0/cdata.shape[2])
return fftcdata | cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25).
returns a 3D spectrogram
Example:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data()
#can perform any transformations on cdata here, such as applying hanning windows for smoother FFT results.
#cdata = np.multiply(cdata, np.hanning(constants.bins_per_half_frame))
power = ibmseti.dsp.complex_to_power(, aca.header()['over_sampling'])
Typically, this 3D spectrogram is rehaped so that the subbands are aligned next to each other
in a 2D spectrogram
spectrogram = ibmseti.dsp.reshape_to_2d(power) | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/dsp.py#L97-L122 | [
"def complex_to_fourier(cdata, over_sampling, norm=None):\n '''\n cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())\n over_sampling: The fraction of oversampling across subbands (typically 0.25)\n norm: None or \"ortho\" -- see Numpy FFT Normalization documenta... | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Utilities to work with Spectrograms -- Power spectra v time (aka "waterfall" plots)
'''
import numpy as np
from . import constants
def time_bins(header):
'''
Returns the time-axis lower bin edge values for the spectrogram.
'''
return np.arange(header['number_of_half_frames'], dtype=np.float64)*constants.bins_per_half_frame\
*(1.0 - header['over_sampling']) / header['subband_spacing_hz']
def frequency_bins(header):
'''
Returnes the frequency-axis lower bin edge values for the spectrogram.
'''
center_frequency = 1.0e6*header['rf_center_frequency']
if header["number_of_subbands"] > 1:
center_frequency += header["subband_spacing_hz"]*(header["number_of_subbands"]/2.0 - 0.5)
return np.fft.fftshift(\
np.fft.fftfreq( int(header["number_of_subbands"] * constants.bins_per_half_frame*(1.0 - header['over_sampling'])), \
1.0/(header["number_of_subbands"]*header["subband_spacing_hz"])) + center_frequency
)
def complex_to_fourier(cdata, over_sampling, norm=None):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25)
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
returns the signal in complex fourier space. The output fourier data are shifted so the central frequency
is at the center of the values. All over-sampled frequencies have been removed so that all frequency bins
can be properly arranged next to each other.
'''
# FFT all blocks separately and rearrange output
fftcdata = np.fft.fftshift(np.fft.fft(cdata, norm=norm), 2)
# slice out oversampled frequencies
if over_sampling > 0:
fftcdata = fftcdata[:, :, int(cdata.shape[2]*over_sampling/2):-int(cdata.shape[2]*over_sampling/2)]
return fftcdata
def fourier_to_time(fcdata, norm=None):
'''
Converts the data from 2D fourier space signal to a 1D time-series.
fcdata: Complex fourier spectrum as a 2D array, The axis=0 is for each "half frame", and axis=1 contains the
fourier-space data for that half frame. Typically there are 129 "half frames" in the data.
Furthermore, it's assumed that fftshift has placed the central frequency at the center of axis=1.
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
Usage:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data() #cdata is a 3D numpy array in the time domain.
#can manipulate cdata in time-space if desired (use various windowing functions, for example)
fcdata = ibmseti.dsp.complex_to_fourier(cdata, aca.header()['over_sampling'])
fcdata_2d = ibmseti.dsp.reshape_to_2d(fcdata)
tcdata_1d = ibmseti.dsp.fourier_to_time(fcdata_2d)
One can recover the Fourier Spectrum of cdata_1d by:
cdata_2d = cdata_1d.reshape(aca.header()['number_of_half_frames'], int(aca.header()['number_of_subbands'] * ibmseti.constants.bins_per_half_frame*(1 - aca.header()['over_sampling'])))
fcdata_2d_v2 = np.fft.fftshift(np.fft.fft(cdata_2d), 1)
#fcdata_2d_v2 and fcdata_2d should be the same
np.sum(np.sum(fcdata_2d - fcdata_2d_v2)) # should equal to approximately 0
'''
return np.fft.ifft(np.fft.ifftshift(fcdata, 1),norm=norm).reshape(fcdata.shape[0] * fcdata.shape[1]) # single complex time series
def complex_to_power(cdata, over_sampling):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25).
returns a 3D spectrogram
Example:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data()
#can perform any transformations on cdata here, such as applying hanning windows for smoother FFT results.
#cdata = np.multiply(cdata, np.hanning(constants.bins_per_half_frame))
power = ibmseti.dsp.complex_to_power(, aca.header()['over_sampling'])
Typically, this 3D spectrogram is rehaped so that the subbands are aligned next to each other
in a 2D spectrogram
spectrogram = ibmseti.dsp.reshape_to_2d(power)
'''
fftcdata = complex_to_fourier(cdata, over_sampling)
# calculate power, normalize and amplify by factor 15 (what is the factor of 15 for?)
fftcdata = np.multiply(fftcdata.real**2 + fftcdata.imag**2, 15.0/cdata.shape[2])
return fftcdata
def reshape_to_2d(arr):
'''
Assumes a 3D Numpy array, and reshapes like
arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
This is useful for converting processed data from `complex_to_power`
and from `autocorrelation` into a 2D array for image analysis and display.
'''
return arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
def compamp_to_spectrogram(compamp):
'''
Returns spectrogram, with each row containing the measured power spectrum for a XX second time sample.
Using this function is shorthand for:
aca = ibmseti.compamp.Compamp(raw_data)
power = ibmseti.dsp.complex_to_power(aca.complex_data(), aca.header()['over_sampling'])
spectrogram = ibmseti.dsp.reshape_to_2d(power)
Example Usage:
import ibmseti
import matplotlib.pyplot as plt
plt.ion()
aca = ibmseti.compamp.Compamp(raw_data)
spectrogram = ibmseti.dsp.compamp_to_spectrogram(aca)
time_bins = ibmseti.dsp.time_bins( aca.header() )
freq_bins = ibmseti.dsp.frequency_bins( aca.header() )
fig, ax = plt.subplots()
ax.pcolormesh(freq_bins, time_bins, spectrogram)
#Time is on the horizontal axis and frequency is along the vertical.
'''
power = complex_to_power(compamp.complex_data(), compamp.header()['over_sampling'])
return reshape_to_2d(power)
def scale_to_png(arr):
if arr.min() < 0:
sh_arr = arr + -1.0*arr.min()
else:
sh_arr = arr
return np.clip(sh_arr * 255.0/sh_arr.max(), 0, 255).astype(np.uint8)
def compamp_to_ac(compamp, window=np.hanning): # convert single or multi-subband compamps into autocorrelation waterfall
'''
Adapted from Gerry Harp at SETI.
'''
header = compamp.header()
cdata = compamp.complex_data()
#Apply Windowing and Padding
cdata = np.multiply(cdata, window(cdata.shape[2])) # window for smoothing sharp time series start/end in freq. dom.
cdata_normal = cdata - cdata.mean(axis=2)[:, :, np.newaxis] # zero mean, does influence a minority of lines in some plots
cdata = np.zeros((cdata.shape[0], cdata.shape[1], 2 * cdata.shape[2]), complex)
cdata[:, :, cdata.shape[2]/2:cdata.shape[2] + cdata.shape[2]/2] = cdata_normal # zero-pad to 2N
#Perform Autocorrelation
cdata = np.fft.fftshift(np.fft.fft(cdata), 2) # FFT all blocks separately and arrange correctly
cdata = cdata.real**2 + cdata.imag**2 # FFT(AC(x)) = FFT(x)FFT*(x) = abs(x)^2
cdata = np.fft.ifftshift(np.fft.ifft(cdata), 2) # AC(x) = iFFT(abs(x)^2) and arrange correctly
cdata = np.abs(cdata) # magnitude of AC
# normalize each row to sqrt of AC triangle
cdata = np.divide(cdata, np.sqrt(np.sum(cdata, axis=2))[:, :, np.newaxis])
return cdata
def ac_viz(acdata):
'''
Adapted from Gerry Harp at SETI.
Slightly massages the autocorrelated calculation result for better visualization.
In particular, the natural log of the data are calculated and the
values along the subband edges are set to the maximum value of the data,
and the t=0 delay of the autocorrelation result are set to the value of the t=-1 delay.
This is allowed because the t=0, and subband edges do not carry any information.
To avoid log(0), a value of 0.000001 is added to all array elements before being logged.
'''
acdata = np.log(acdata+0.000001) # log to reduce darkening on sides of spectrum, due to AC triangling
acdata[:, :, acdata.shape[2]/2] = acdata[:, :, acdata.shape[2]/2 - 1] # vals at zero delay set to symmetric neighbor vals
acdata[:, :, acdata.shape[2] - 1] = np.max(acdata) # visualize subband edges
return acdata
|
ibm-watson-data-lab/ibmseti | ibmseti/dsp.py | reshape_to_2d | python | def reshape_to_2d(arr):
'''
Assumes a 3D Numpy array, and reshapes like
arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
This is useful for converting processed data from `complex_to_power`
and from `autocorrelation` into a 2D array for image analysis and display.
'''
return arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2])) | Assumes a 3D Numpy array, and reshapes like
arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
This is useful for converting processed data from `complex_to_power`
and from `autocorrelation` into a 2D array for image analysis and display. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/dsp.py#L124-L134 | null | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Utilities to work with Spectrograms -- Power spectra v time (aka "waterfall" plots)
'''
import numpy as np
from . import constants
def time_bins(header):
'''
Returns the time-axis lower bin edge values for the spectrogram.
'''
return np.arange(header['number_of_half_frames'], dtype=np.float64)*constants.bins_per_half_frame\
*(1.0 - header['over_sampling']) / header['subband_spacing_hz']
def frequency_bins(header):
'''
Returnes the frequency-axis lower bin edge values for the spectrogram.
'''
center_frequency = 1.0e6*header['rf_center_frequency']
if header["number_of_subbands"] > 1:
center_frequency += header["subband_spacing_hz"]*(header["number_of_subbands"]/2.0 - 0.5)
return np.fft.fftshift(\
np.fft.fftfreq( int(header["number_of_subbands"] * constants.bins_per_half_frame*(1.0 - header['over_sampling'])), \
1.0/(header["number_of_subbands"]*header["subband_spacing_hz"])) + center_frequency
)
def complex_to_fourier(cdata, over_sampling, norm=None):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25)
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
returns the signal in complex fourier space. The output fourier data are shifted so the central frequency
is at the center of the values. All over-sampled frequencies have been removed so that all frequency bins
can be properly arranged next to each other.
'''
# FFT all blocks separately and rearrange output
fftcdata = np.fft.fftshift(np.fft.fft(cdata, norm=norm), 2)
# slice out oversampled frequencies
if over_sampling > 0:
fftcdata = fftcdata[:, :, int(cdata.shape[2]*over_sampling/2):-int(cdata.shape[2]*over_sampling/2)]
return fftcdata
def fourier_to_time(fcdata, norm=None):
'''
Converts the data from 2D fourier space signal to a 1D time-series.
fcdata: Complex fourier spectrum as a 2D array, The axis=0 is for each "half frame", and axis=1 contains the
fourier-space data for that half frame. Typically there are 129 "half frames" in the data.
Furthermore, it's assumed that fftshift has placed the central frequency at the center of axis=1.
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
Usage:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data() #cdata is a 3D numpy array in the time domain.
#can manipulate cdata in time-space if desired (use various windowing functions, for example)
fcdata = ibmseti.dsp.complex_to_fourier(cdata, aca.header()['over_sampling'])
fcdata_2d = ibmseti.dsp.reshape_to_2d(fcdata)
tcdata_1d = ibmseti.dsp.fourier_to_time(fcdata_2d)
One can recover the Fourier Spectrum of cdata_1d by:
cdata_2d = cdata_1d.reshape(aca.header()['number_of_half_frames'], int(aca.header()['number_of_subbands'] * ibmseti.constants.bins_per_half_frame*(1 - aca.header()['over_sampling'])))
fcdata_2d_v2 = np.fft.fftshift(np.fft.fft(cdata_2d), 1)
#fcdata_2d_v2 and fcdata_2d should be the same
np.sum(np.sum(fcdata_2d - fcdata_2d_v2)) # should equal to approximately 0
'''
return np.fft.ifft(np.fft.ifftshift(fcdata, 1),norm=norm).reshape(fcdata.shape[0] * fcdata.shape[1]) # single complex time series
def complex_to_power(cdata, over_sampling):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25).
returns a 3D spectrogram
Example:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data()
#can perform any transformations on cdata here, such as applying hanning windows for smoother FFT results.
#cdata = np.multiply(cdata, np.hanning(constants.bins_per_half_frame))
power = ibmseti.dsp.complex_to_power(, aca.header()['over_sampling'])
Typically, this 3D spectrogram is rehaped so that the subbands are aligned next to each other
in a 2D spectrogram
spectrogram = ibmseti.dsp.reshape_to_2d(power)
'''
fftcdata = complex_to_fourier(cdata, over_sampling)
# calculate power, normalize and amplify by factor 15 (what is the factor of 15 for?)
fftcdata = np.multiply(fftcdata.real**2 + fftcdata.imag**2, 15.0/cdata.shape[2])
return fftcdata
def reshape_to_2d(arr):
'''
Assumes a 3D Numpy array, and reshapes like
arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
This is useful for converting processed data from `complex_to_power`
and from `autocorrelation` into a 2D array for image analysis and display.
'''
return arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
def compamp_to_spectrogram(compamp):
'''
Returns spectrogram, with each row containing the measured power spectrum for a XX second time sample.
Using this function is shorthand for:
aca = ibmseti.compamp.Compamp(raw_data)
power = ibmseti.dsp.complex_to_power(aca.complex_data(), aca.header()['over_sampling'])
spectrogram = ibmseti.dsp.reshape_to_2d(power)
Example Usage:
import ibmseti
import matplotlib.pyplot as plt
plt.ion()
aca = ibmseti.compamp.Compamp(raw_data)
spectrogram = ibmseti.dsp.compamp_to_spectrogram(aca)
time_bins = ibmseti.dsp.time_bins( aca.header() )
freq_bins = ibmseti.dsp.frequency_bins( aca.header() )
fig, ax = plt.subplots()
ax.pcolormesh(freq_bins, time_bins, spectrogram)
#Time is on the horizontal axis and frequency is along the vertical.
'''
power = complex_to_power(compamp.complex_data(), compamp.header()['over_sampling'])
return reshape_to_2d(power)
def scale_to_png(arr):
if arr.min() < 0:
sh_arr = arr + -1.0*arr.min()
else:
sh_arr = arr
return np.clip(sh_arr * 255.0/sh_arr.max(), 0, 255).astype(np.uint8)
def compamp_to_ac(compamp, window=np.hanning): # convert single or multi-subband compamps into autocorrelation waterfall
'''
Adapted from Gerry Harp at SETI.
'''
header = compamp.header()
cdata = compamp.complex_data()
#Apply Windowing and Padding
cdata = np.multiply(cdata, window(cdata.shape[2])) # window for smoothing sharp time series start/end in freq. dom.
cdata_normal = cdata - cdata.mean(axis=2)[:, :, np.newaxis] # zero mean, does influence a minority of lines in some plots
cdata = np.zeros((cdata.shape[0], cdata.shape[1], 2 * cdata.shape[2]), complex)
cdata[:, :, cdata.shape[2]/2:cdata.shape[2] + cdata.shape[2]/2] = cdata_normal # zero-pad to 2N
#Perform Autocorrelation
cdata = np.fft.fftshift(np.fft.fft(cdata), 2) # FFT all blocks separately and arrange correctly
cdata = cdata.real**2 + cdata.imag**2 # FFT(AC(x)) = FFT(x)FFT*(x) = abs(x)^2
cdata = np.fft.ifftshift(np.fft.ifft(cdata), 2) # AC(x) = iFFT(abs(x)^2) and arrange correctly
cdata = np.abs(cdata) # magnitude of AC
# normalize each row to sqrt of AC triangle
cdata = np.divide(cdata, np.sqrt(np.sum(cdata, axis=2))[:, :, np.newaxis])
return cdata
def ac_viz(acdata):
'''
Adapted from Gerry Harp at SETI.
Slightly massages the autocorrelated calculation result for better visualization.
In particular, the natural log of the data are calculated and the
values along the subband edges are set to the maximum value of the data,
and the t=0 delay of the autocorrelation result are set to the value of the t=-1 delay.
This is allowed because the t=0, and subband edges do not carry any information.
To avoid log(0), a value of 0.000001 is added to all array elements before being logged.
'''
acdata = np.log(acdata+0.000001) # log to reduce darkening on sides of spectrum, due to AC triangling
acdata[:, :, acdata.shape[2]/2] = acdata[:, :, acdata.shape[2]/2 - 1] # vals at zero delay set to symmetric neighbor vals
acdata[:, :, acdata.shape[2] - 1] = np.max(acdata) # visualize subband edges
return acdata
|
ibm-watson-data-lab/ibmseti | ibmseti/dsp.py | compamp_to_spectrogram | python | def compamp_to_spectrogram(compamp):
'''
Returns spectrogram, with each row containing the measured power spectrum for a XX second time sample.
Using this function is shorthand for:
aca = ibmseti.compamp.Compamp(raw_data)
power = ibmseti.dsp.complex_to_power(aca.complex_data(), aca.header()['over_sampling'])
spectrogram = ibmseti.dsp.reshape_to_2d(power)
Example Usage:
import ibmseti
import matplotlib.pyplot as plt
plt.ion()
aca = ibmseti.compamp.Compamp(raw_data)
spectrogram = ibmseti.dsp.compamp_to_spectrogram(aca)
time_bins = ibmseti.dsp.time_bins( aca.header() )
freq_bins = ibmseti.dsp.frequency_bins( aca.header() )
fig, ax = plt.subplots()
ax.pcolormesh(freq_bins, time_bins, spectrogram)
#Time is on the horizontal axis and frequency is along the vertical.
'''
power = complex_to_power(compamp.complex_data(), compamp.header()['over_sampling'])
return reshape_to_2d(power) | Returns spectrogram, with each row containing the measured power spectrum for a XX second time sample.
Using this function is shorthand for:
aca = ibmseti.compamp.Compamp(raw_data)
power = ibmseti.dsp.complex_to_power(aca.complex_data(), aca.header()['over_sampling'])
spectrogram = ibmseti.dsp.reshape_to_2d(power)
Example Usage:
import ibmseti
import matplotlib.pyplot as plt
plt.ion()
aca = ibmseti.compamp.Compamp(raw_data)
spectrogram = ibmseti.dsp.compamp_to_spectrogram(aca)
time_bins = ibmseti.dsp.time_bins( aca.header() )
freq_bins = ibmseti.dsp.frequency_bins( aca.header() )
fig, ax = plt.subplots()
ax.pcolormesh(freq_bins, time_bins, spectrogram)
#Time is on the horizontal axis and frequency is along the vertical. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/dsp.py#L137-L166 | [
"def complex_to_power(cdata, over_sampling): \n '''\n cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())\n over_sampling: The fraction of oversampling across subbands (typically 0.25). \n\n returns a 3D spectrogram\n\n Example:\n aca = ibmseti.compamp.Com... | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Utilities to work with Spectrograms -- Power spectra v time (aka "waterfall" plots)
'''
import numpy as np
from . import constants
def time_bins(header):
'''
Returns the time-axis lower bin edge values for the spectrogram.
'''
return np.arange(header['number_of_half_frames'], dtype=np.float64)*constants.bins_per_half_frame\
*(1.0 - header['over_sampling']) / header['subband_spacing_hz']
def frequency_bins(header):
'''
Returnes the frequency-axis lower bin edge values for the spectrogram.
'''
center_frequency = 1.0e6*header['rf_center_frequency']
if header["number_of_subbands"] > 1:
center_frequency += header["subband_spacing_hz"]*(header["number_of_subbands"]/2.0 - 0.5)
return np.fft.fftshift(\
np.fft.fftfreq( int(header["number_of_subbands"] * constants.bins_per_half_frame*(1.0 - header['over_sampling'])), \
1.0/(header["number_of_subbands"]*header["subband_spacing_hz"])) + center_frequency
)
def complex_to_fourier(cdata, over_sampling, norm=None):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25)
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
returns the signal in complex fourier space. The output fourier data are shifted so the central frequency
is at the center of the values. All over-sampled frequencies have been removed so that all frequency bins
can be properly arranged next to each other.
'''
# FFT all blocks separately and rearrange output
fftcdata = np.fft.fftshift(np.fft.fft(cdata, norm=norm), 2)
# slice out oversampled frequencies
if over_sampling > 0:
fftcdata = fftcdata[:, :, int(cdata.shape[2]*over_sampling/2):-int(cdata.shape[2]*over_sampling/2)]
return fftcdata
def fourier_to_time(fcdata, norm=None):
'''
Converts the data from 2D fourier space signal to a 1D time-series.
fcdata: Complex fourier spectrum as a 2D array, The axis=0 is for each "half frame", and axis=1 contains the
fourier-space data for that half frame. Typically there are 129 "half frames" in the data.
Furthermore, it's assumed that fftshift has placed the central frequency at the center of axis=1.
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
Usage:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data() #cdata is a 3D numpy array in the time domain.
#can manipulate cdata in time-space if desired (use various windowing functions, for example)
fcdata = ibmseti.dsp.complex_to_fourier(cdata, aca.header()['over_sampling'])
fcdata_2d = ibmseti.dsp.reshape_to_2d(fcdata)
tcdata_1d = ibmseti.dsp.fourier_to_time(fcdata_2d)
One can recover the Fourier Spectrum of cdata_1d by:
cdata_2d = cdata_1d.reshape(aca.header()['number_of_half_frames'], int(aca.header()['number_of_subbands'] * ibmseti.constants.bins_per_half_frame*(1 - aca.header()['over_sampling'])))
fcdata_2d_v2 = np.fft.fftshift(np.fft.fft(cdata_2d), 1)
#fcdata_2d_v2 and fcdata_2d should be the same
np.sum(np.sum(fcdata_2d - fcdata_2d_v2)) # should equal to approximately 0
'''
return np.fft.ifft(np.fft.ifftshift(fcdata, 1),norm=norm).reshape(fcdata.shape[0] * fcdata.shape[1]) # single complex time series
def complex_to_power(cdata, over_sampling):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25).
returns a 3D spectrogram
Example:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data()
#can perform any transformations on cdata here, such as applying hanning windows for smoother FFT results.
#cdata = np.multiply(cdata, np.hanning(constants.bins_per_half_frame))
power = ibmseti.dsp.complex_to_power(, aca.header()['over_sampling'])
Typically, this 3D spectrogram is rehaped so that the subbands are aligned next to each other
in a 2D spectrogram
spectrogram = ibmseti.dsp.reshape_to_2d(power)
'''
fftcdata = complex_to_fourier(cdata, over_sampling)
# calculate power, normalize and amplify by factor 15 (what is the factor of 15 for?)
fftcdata = np.multiply(fftcdata.real**2 + fftcdata.imag**2, 15.0/cdata.shape[2])
return fftcdata
def reshape_to_2d(arr):
'''
Assumes a 3D Numpy array, and reshapes like
arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
This is useful for converting processed data from `complex_to_power`
and from `autocorrelation` into a 2D array for image analysis and display.
'''
return arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
def compamp_to_spectrogram(compamp):
'''
Returns spectrogram, with each row containing the measured power spectrum for a XX second time sample.
Using this function is shorthand for:
aca = ibmseti.compamp.Compamp(raw_data)
power = ibmseti.dsp.complex_to_power(aca.complex_data(), aca.header()['over_sampling'])
spectrogram = ibmseti.dsp.reshape_to_2d(power)
Example Usage:
import ibmseti
import matplotlib.pyplot as plt
plt.ion()
aca = ibmseti.compamp.Compamp(raw_data)
spectrogram = ibmseti.dsp.compamp_to_spectrogram(aca)
time_bins = ibmseti.dsp.time_bins( aca.header() )
freq_bins = ibmseti.dsp.frequency_bins( aca.header() )
fig, ax = plt.subplots()
ax.pcolormesh(freq_bins, time_bins, spectrogram)
#Time is on the horizontal axis and frequency is along the vertical.
'''
power = complex_to_power(compamp.complex_data(), compamp.header()['over_sampling'])
return reshape_to_2d(power)
def scale_to_png(arr):
if arr.min() < 0:
sh_arr = arr + -1.0*arr.min()
else:
sh_arr = arr
return np.clip(sh_arr * 255.0/sh_arr.max(), 0, 255).astype(np.uint8)
def compamp_to_ac(compamp, window=np.hanning): # convert single or multi-subband compamps into autocorrelation waterfall
'''
Adapted from Gerry Harp at SETI.
'''
header = compamp.header()
cdata = compamp.complex_data()
#Apply Windowing and Padding
cdata = np.multiply(cdata, window(cdata.shape[2])) # window for smoothing sharp time series start/end in freq. dom.
cdata_normal = cdata - cdata.mean(axis=2)[:, :, np.newaxis] # zero mean, does influence a minority of lines in some plots
cdata = np.zeros((cdata.shape[0], cdata.shape[1], 2 * cdata.shape[2]), complex)
cdata[:, :, cdata.shape[2]/2:cdata.shape[2] + cdata.shape[2]/2] = cdata_normal # zero-pad to 2N
#Perform Autocorrelation
cdata = np.fft.fftshift(np.fft.fft(cdata), 2) # FFT all blocks separately and arrange correctly
cdata = cdata.real**2 + cdata.imag**2 # FFT(AC(x)) = FFT(x)FFT*(x) = abs(x)^2
cdata = np.fft.ifftshift(np.fft.ifft(cdata), 2) # AC(x) = iFFT(abs(x)^2) and arrange correctly
cdata = np.abs(cdata) # magnitude of AC
# normalize each row to sqrt of AC triangle
cdata = np.divide(cdata, np.sqrt(np.sum(cdata, axis=2))[:, :, np.newaxis])
return cdata
def ac_viz(acdata):
'''
Adapted from Gerry Harp at SETI.
Slightly massages the autocorrelated calculation result for better visualization.
In particular, the natural log of the data are calculated and the
values along the subband edges are set to the maximum value of the data,
and the t=0 delay of the autocorrelation result are set to the value of the t=-1 delay.
This is allowed because the t=0, and subband edges do not carry any information.
To avoid log(0), a value of 0.000001 is added to all array elements before being logged.
'''
acdata = np.log(acdata+0.000001) # log to reduce darkening on sides of spectrum, due to AC triangling
acdata[:, :, acdata.shape[2]/2] = acdata[:, :, acdata.shape[2]/2 - 1] # vals at zero delay set to symmetric neighbor vals
acdata[:, :, acdata.shape[2] - 1] = np.max(acdata) # visualize subband edges
return acdata
|
ibm-watson-data-lab/ibmseti | ibmseti/dsp.py | compamp_to_ac | python | def compamp_to_ac(compamp, window=np.hanning): # convert single or multi-subband compamps into autocorrelation waterfall
'''
Adapted from Gerry Harp at SETI.
'''
header = compamp.header()
cdata = compamp.complex_data()
#Apply Windowing and Padding
cdata = np.multiply(cdata, window(cdata.shape[2])) # window for smoothing sharp time series start/end in freq. dom.
cdata_normal = cdata - cdata.mean(axis=2)[:, :, np.newaxis] # zero mean, does influence a minority of lines in some plots
cdata = np.zeros((cdata.shape[0], cdata.shape[1], 2 * cdata.shape[2]), complex)
cdata[:, :, cdata.shape[2]/2:cdata.shape[2] + cdata.shape[2]/2] = cdata_normal # zero-pad to 2N
#Perform Autocorrelation
cdata = np.fft.fftshift(np.fft.fft(cdata), 2) # FFT all blocks separately and arrange correctly
cdata = cdata.real**2 + cdata.imag**2 # FFT(AC(x)) = FFT(x)FFT*(x) = abs(x)^2
cdata = np.fft.ifftshift(np.fft.ifft(cdata), 2) # AC(x) = iFFT(abs(x)^2) and arrange correctly
cdata = np.abs(cdata) # magnitude of AC
# normalize each row to sqrt of AC triangle
cdata = np.divide(cdata, np.sqrt(np.sum(cdata, axis=2))[:, :, np.newaxis])
return cdata | Adapted from Gerry Harp at SETI. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/dsp.py#L176-L202 | null | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Utilities to work with Spectrograms -- Power spectra v time (aka "waterfall" plots)
'''
import numpy as np
from . import constants
def time_bins(header):
'''
Returns the time-axis lower bin edge values for the spectrogram.
'''
return np.arange(header['number_of_half_frames'], dtype=np.float64)*constants.bins_per_half_frame\
*(1.0 - header['over_sampling']) / header['subband_spacing_hz']
def frequency_bins(header):
'''
Returnes the frequency-axis lower bin edge values for the spectrogram.
'''
center_frequency = 1.0e6*header['rf_center_frequency']
if header["number_of_subbands"] > 1:
center_frequency += header["subband_spacing_hz"]*(header["number_of_subbands"]/2.0 - 0.5)
return np.fft.fftshift(\
np.fft.fftfreq( int(header["number_of_subbands"] * constants.bins_per_half_frame*(1.0 - header['over_sampling'])), \
1.0/(header["number_of_subbands"]*header["subband_spacing_hz"])) + center_frequency
)
def complex_to_fourier(cdata, over_sampling, norm=None):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25)
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
returns the signal in complex fourier space. The output fourier data are shifted so the central frequency
is at the center of the values. All over-sampled frequencies have been removed so that all frequency bins
can be properly arranged next to each other.
'''
# FFT all blocks separately and rearrange output
fftcdata = np.fft.fftshift(np.fft.fft(cdata, norm=norm), 2)
# slice out oversampled frequencies
if over_sampling > 0:
fftcdata = fftcdata[:, :, int(cdata.shape[2]*over_sampling/2):-int(cdata.shape[2]*over_sampling/2)]
return fftcdata
def fourier_to_time(fcdata, norm=None):
'''
Converts the data from 2D fourier space signal to a 1D time-series.
fcdata: Complex fourier spectrum as a 2D array, The axis=0 is for each "half frame", and axis=1 contains the
fourier-space data for that half frame. Typically there are 129 "half frames" in the data.
Furthermore, it's assumed that fftshift has placed the central frequency at the center of axis=1.
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
Usage:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data() #cdata is a 3D numpy array in the time domain.
#can manipulate cdata in time-space if desired (use various windowing functions, for example)
fcdata = ibmseti.dsp.complex_to_fourier(cdata, aca.header()['over_sampling'])
fcdata_2d = ibmseti.dsp.reshape_to_2d(fcdata)
tcdata_1d = ibmseti.dsp.fourier_to_time(fcdata_2d)
One can recover the Fourier Spectrum of cdata_1d by:
cdata_2d = cdata_1d.reshape(aca.header()['number_of_half_frames'], int(aca.header()['number_of_subbands'] * ibmseti.constants.bins_per_half_frame*(1 - aca.header()['over_sampling'])))
fcdata_2d_v2 = np.fft.fftshift(np.fft.fft(cdata_2d), 1)
#fcdata_2d_v2 and fcdata_2d should be the same
np.sum(np.sum(fcdata_2d - fcdata_2d_v2)) # should equal to approximately 0
'''
return np.fft.ifft(np.fft.ifftshift(fcdata, 1),norm=norm).reshape(fcdata.shape[0] * fcdata.shape[1]) # single complex time series
def complex_to_power(cdata, over_sampling):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25).
returns a 3D spectrogram
Example:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data()
#can perform any transformations on cdata here, such as applying hanning windows for smoother FFT results.
#cdata = np.multiply(cdata, np.hanning(constants.bins_per_half_frame))
power = ibmseti.dsp.complex_to_power(, aca.header()['over_sampling'])
Typically, this 3D spectrogram is rehaped so that the subbands are aligned next to each other
in a 2D spectrogram
spectrogram = ibmseti.dsp.reshape_to_2d(power)
'''
fftcdata = complex_to_fourier(cdata, over_sampling)
# calculate power, normalize and amplify by factor 15 (what is the factor of 15 for?)
fftcdata = np.multiply(fftcdata.real**2 + fftcdata.imag**2, 15.0/cdata.shape[2])
return fftcdata
def reshape_to_2d(arr):
'''
Assumes a 3D Numpy array, and reshapes like
arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
This is useful for converting processed data from `complex_to_power`
and from `autocorrelation` into a 2D array for image analysis and display.
'''
return arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
def compamp_to_spectrogram(compamp):
'''
Returns spectrogram, with each row containing the measured power spectrum for a XX second time sample.
Using this function is shorthand for:
aca = ibmseti.compamp.Compamp(raw_data)
power = ibmseti.dsp.complex_to_power(aca.complex_data(), aca.header()['over_sampling'])
spectrogram = ibmseti.dsp.reshape_to_2d(power)
Example Usage:
import ibmseti
import matplotlib.pyplot as plt
plt.ion()
aca = ibmseti.compamp.Compamp(raw_data)
spectrogram = ibmseti.dsp.compamp_to_spectrogram(aca)
time_bins = ibmseti.dsp.time_bins( aca.header() )
freq_bins = ibmseti.dsp.frequency_bins( aca.header() )
fig, ax = plt.subplots()
ax.pcolormesh(freq_bins, time_bins, spectrogram)
#Time is on the horizontal axis and frequency is along the vertical.
'''
power = complex_to_power(compamp.complex_data(), compamp.header()['over_sampling'])
return reshape_to_2d(power)
def scale_to_png(arr):
if arr.min() < 0:
sh_arr = arr + -1.0*arr.min()
else:
sh_arr = arr
return np.clip(sh_arr * 255.0/sh_arr.max(), 0, 255).astype(np.uint8)
def compamp_to_ac(compamp, window=np.hanning): # convert single or multi-subband compamps into autocorrelation waterfall
'''
Adapted from Gerry Harp at SETI.
'''
header = compamp.header()
cdata = compamp.complex_data()
#Apply Windowing and Padding
cdata = np.multiply(cdata, window(cdata.shape[2])) # window for smoothing sharp time series start/end in freq. dom.
cdata_normal = cdata - cdata.mean(axis=2)[:, :, np.newaxis] # zero mean, does influence a minority of lines in some plots
cdata = np.zeros((cdata.shape[0], cdata.shape[1], 2 * cdata.shape[2]), complex)
cdata[:, :, cdata.shape[2]/2:cdata.shape[2] + cdata.shape[2]/2] = cdata_normal # zero-pad to 2N
#Perform Autocorrelation
cdata = np.fft.fftshift(np.fft.fft(cdata), 2) # FFT all blocks separately and arrange correctly
cdata = cdata.real**2 + cdata.imag**2 # FFT(AC(x)) = FFT(x)FFT*(x) = abs(x)^2
cdata = np.fft.ifftshift(np.fft.ifft(cdata), 2) # AC(x) = iFFT(abs(x)^2) and arrange correctly
cdata = np.abs(cdata) # magnitude of AC
# normalize each row to sqrt of AC triangle
cdata = np.divide(cdata, np.sqrt(np.sum(cdata, axis=2))[:, :, np.newaxis])
return cdata
def ac_viz(acdata):
'''
Adapted from Gerry Harp at SETI.
Slightly massages the autocorrelated calculation result for better visualization.
In particular, the natural log of the data are calculated and the
values along the subband edges are set to the maximum value of the data,
and the t=0 delay of the autocorrelation result are set to the value of the t=-1 delay.
This is allowed because the t=0, and subband edges do not carry any information.
To avoid log(0), a value of 0.000001 is added to all array elements before being logged.
'''
acdata = np.log(acdata+0.000001) # log to reduce darkening on sides of spectrum, due to AC triangling
acdata[:, :, acdata.shape[2]/2] = acdata[:, :, acdata.shape[2]/2 - 1] # vals at zero delay set to symmetric neighbor vals
acdata[:, :, acdata.shape[2] - 1] = np.max(acdata) # visualize subband edges
return acdata
|
ibm-watson-data-lab/ibmseti | ibmseti/dsp.py | ac_viz | python | def ac_viz(acdata):
'''
Adapted from Gerry Harp at SETI.
Slightly massages the autocorrelated calculation result for better visualization.
In particular, the natural log of the data are calculated and the
values along the subband edges are set to the maximum value of the data,
and the t=0 delay of the autocorrelation result are set to the value of the t=-1 delay.
This is allowed because the t=0, and subband edges do not carry any information.
To avoid log(0), a value of 0.000001 is added to all array elements before being logged.
'''
acdata = np.log(acdata+0.000001) # log to reduce darkening on sides of spectrum, due to AC triangling
acdata[:, :, acdata.shape[2]/2] = acdata[:, :, acdata.shape[2]/2 - 1] # vals at zero delay set to symmetric neighbor vals
acdata[:, :, acdata.shape[2] - 1] = np.max(acdata) # visualize subband edges
return acdata | Adapted from Gerry Harp at SETI.
Slightly massages the autocorrelated calculation result for better visualization.
In particular, the natural log of the data are calculated and the
values along the subband edges are set to the maximum value of the data,
and the t=0 delay of the autocorrelation result are set to the value of the t=-1 delay.
This is allowed because the t=0, and subband edges do not carry any information.
To avoid log(0), a value of 0.000001 is added to all array elements before being logged. | train | https://github.com/ibm-watson-data-lab/ibmseti/blob/3361bc0adb4770dc7a554ed7cda292503892acee/ibmseti/dsp.py#L204-L223 | null | # Copyright (c) 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Utilities to work with Spectrograms -- Power spectra v time (aka "waterfall" plots)
'''
import numpy as np
from . import constants
def time_bins(header):
'''
Returns the time-axis lower bin edge values for the spectrogram.
'''
return np.arange(header['number_of_half_frames'], dtype=np.float64)*constants.bins_per_half_frame\
*(1.0 - header['over_sampling']) / header['subband_spacing_hz']
def frequency_bins(header):
'''
Returnes the frequency-axis lower bin edge values for the spectrogram.
'''
center_frequency = 1.0e6*header['rf_center_frequency']
if header["number_of_subbands"] > 1:
center_frequency += header["subband_spacing_hz"]*(header["number_of_subbands"]/2.0 - 0.5)
return np.fft.fftshift(\
np.fft.fftfreq( int(header["number_of_subbands"] * constants.bins_per_half_frame*(1.0 - header['over_sampling'])), \
1.0/(header["number_of_subbands"]*header["subband_spacing_hz"])) + center_frequency
)
def complex_to_fourier(cdata, over_sampling, norm=None):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25)
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
returns the signal in complex fourier space. The output fourier data are shifted so the central frequency
is at the center of the values. All over-sampled frequencies have been removed so that all frequency bins
can be properly arranged next to each other.
'''
# FFT all blocks separately and rearrange output
fftcdata = np.fft.fftshift(np.fft.fft(cdata, norm=norm), 2)
# slice out oversampled frequencies
if over_sampling > 0:
fftcdata = fftcdata[:, :, int(cdata.shape[2]*over_sampling/2):-int(cdata.shape[2]*over_sampling/2)]
return fftcdata
def fourier_to_time(fcdata, norm=None):
'''
Converts the data from 2D fourier space signal to a 1D time-series.
fcdata: Complex fourier spectrum as a 2D array, The axis=0 is for each "half frame", and axis=1 contains the
fourier-space data for that half frame. Typically there are 129 "half frames" in the data.
Furthermore, it's assumed that fftshift has placed the central frequency at the center of axis=1.
norm: None or "ortho" -- see Numpy FFT Normalization documentation. Default is None.
Usage:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data() #cdata is a 3D numpy array in the time domain.
#can manipulate cdata in time-space if desired (use various windowing functions, for example)
fcdata = ibmseti.dsp.complex_to_fourier(cdata, aca.header()['over_sampling'])
fcdata_2d = ibmseti.dsp.reshape_to_2d(fcdata)
tcdata_1d = ibmseti.dsp.fourier_to_time(fcdata_2d)
One can recover the Fourier Spectrum of cdata_1d by:
cdata_2d = cdata_1d.reshape(aca.header()['number_of_half_frames'], int(aca.header()['number_of_subbands'] * ibmseti.constants.bins_per_half_frame*(1 - aca.header()['over_sampling'])))
fcdata_2d_v2 = np.fft.fftshift(np.fft.fft(cdata_2d), 1)
#fcdata_2d_v2 and fcdata_2d should be the same
np.sum(np.sum(fcdata_2d - fcdata_2d_v2)) # should equal to approximately 0
'''
return np.fft.ifft(np.fft.ifftshift(fcdata, 1),norm=norm).reshape(fcdata.shape[0] * fcdata.shape[1]) # single complex time series
def complex_to_power(cdata, over_sampling):
'''
cdata: 3D complex data (shaped by subbands and half_frames, as returned from Compamp.complex_data())
over_sampling: The fraction of oversampling across subbands (typically 0.25).
returns a 3D spectrogram
Example:
aca = ibmseti.compamp.Compamp(raw_data)
cdata = aca.complex_data()
#can perform any transformations on cdata here, such as applying hanning windows for smoother FFT results.
#cdata = np.multiply(cdata, np.hanning(constants.bins_per_half_frame))
power = ibmseti.dsp.complex_to_power(, aca.header()['over_sampling'])
Typically, this 3D spectrogram is rehaped so that the subbands are aligned next to each other
in a 2D spectrogram
spectrogram = ibmseti.dsp.reshape_to_2d(power)
'''
fftcdata = complex_to_fourier(cdata, over_sampling)
# calculate power, normalize and amplify by factor 15 (what is the factor of 15 for?)
fftcdata = np.multiply(fftcdata.real**2 + fftcdata.imag**2, 15.0/cdata.shape[2])
return fftcdata
def reshape_to_2d(arr):
'''
Assumes a 3D Numpy array, and reshapes like
arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
This is useful for converting processed data from `complex_to_power`
and from `autocorrelation` into a 2D array for image analysis and display.
'''
return arr.reshape((arr.shape[0], arr.shape[1]*arr.shape[2]))
def compamp_to_spectrogram(compamp):
'''
Returns spectrogram, with each row containing the measured power spectrum for a XX second time sample.
Using this function is shorthand for:
aca = ibmseti.compamp.Compamp(raw_data)
power = ibmseti.dsp.complex_to_power(aca.complex_data(), aca.header()['over_sampling'])
spectrogram = ibmseti.dsp.reshape_to_2d(power)
Example Usage:
import ibmseti
import matplotlib.pyplot as plt
plt.ion()
aca = ibmseti.compamp.Compamp(raw_data)
spectrogram = ibmseti.dsp.compamp_to_spectrogram(aca)
time_bins = ibmseti.dsp.time_bins( aca.header() )
freq_bins = ibmseti.dsp.frequency_bins( aca.header() )
fig, ax = plt.subplots()
ax.pcolormesh(freq_bins, time_bins, spectrogram)
#Time is on the horizontal axis and frequency is along the vertical.
'''
power = complex_to_power(compamp.complex_data(), compamp.header()['over_sampling'])
return reshape_to_2d(power)
def scale_to_png(arr):
if arr.min() < 0:
sh_arr = arr + -1.0*arr.min()
else:
sh_arr = arr
return np.clip(sh_arr * 255.0/sh_arr.max(), 0, 255).astype(np.uint8)
def compamp_to_ac(compamp, window=np.hanning): # convert single or multi-subband compamps into autocorrelation waterfall
'''
Adapted from Gerry Harp at SETI.
'''
header = compamp.header()
cdata = compamp.complex_data()
#Apply Windowing and Padding
cdata = np.multiply(cdata, window(cdata.shape[2])) # window for smoothing sharp time series start/end in freq. dom.
cdata_normal = cdata - cdata.mean(axis=2)[:, :, np.newaxis] # zero mean, does influence a minority of lines in some plots
cdata = np.zeros((cdata.shape[0], cdata.shape[1], 2 * cdata.shape[2]), complex)
cdata[:, :, cdata.shape[2]/2:cdata.shape[2] + cdata.shape[2]/2] = cdata_normal # zero-pad to 2N
#Perform Autocorrelation
cdata = np.fft.fftshift(np.fft.fft(cdata), 2) # FFT all blocks separately and arrange correctly
cdata = cdata.real**2 + cdata.imag**2 # FFT(AC(x)) = FFT(x)FFT*(x) = abs(x)^2
cdata = np.fft.ifftshift(np.fft.ifft(cdata), 2) # AC(x) = iFFT(abs(x)^2) and arrange correctly
cdata = np.abs(cdata) # magnitude of AC
# normalize each row to sqrt of AC triangle
cdata = np.divide(cdata, np.sqrt(np.sum(cdata, axis=2))[:, :, np.newaxis])
return cdata
def ac_viz(acdata):
'''
Adapted from Gerry Harp at SETI.
Slightly massages the autocorrelated calculation result for better visualization.
In particular, the natural log of the data are calculated and the
values along the subband edges are set to the maximum value of the data,
and the t=0 delay of the autocorrelation result are set to the value of the t=-1 delay.
This is allowed because the t=0, and subband edges do not carry any information.
To avoid log(0), a value of 0.000001 is added to all array elements before being logged.
'''
acdata = np.log(acdata+0.000001) # log to reduce darkening on sides of spectrum, due to AC triangling
acdata[:, :, acdata.shape[2]/2] = acdata[:, :, acdata.shape[2]/2 - 1] # vals at zero delay set to symmetric neighbor vals
acdata[:, :, acdata.shape[2] - 1] = np.max(acdata) # visualize subband edges
return acdata
|
whyscream/dspam-milter | dspam/milter.py | DspamMilter.connect | python | def connect(self, hostname, family, hostaddr):
self.client_ip = hostaddr[0]
self.client_port = hostaddr[1]
self.time_start = time.time()
logger.debug('<{}> Connect from {}[{}]:{}'.format(
self.id, hostname, self.client_ip, self.client_port))
return Milter.CONTINUE | Log new connections. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/milter.py#L74-L84 | null | class DspamMilter(Milter.Base):
"""
A milter interface to the DSPAM daemon.
This milter can be added to an MTA setup so messages can be inspected
by a DSPAM server, and optionally rejected or quarantined based on the
classification results.
"""
# Constants defining possible return codes for compute_verdict()
VERDICT_ACCEPT = 1
VERDICT_QUARANTINE = 2
VERDICT_REJECT = 3
# Default configuration
static_user = None
headers = {
'Processed': 0,
'Confidence': 0,
'Probability': 0,
'Result': 0,
'Signature': 0
}
header_prefix = 'X-DSPAM-'
reject_classes = {'Blacklisted': 0, 'Blocklisted': 0, 'Spam': 0.9}
quarantine_classes = {'Virus': 0}
accept_classes = {'Innocent': 0, 'Whitelisted': 0}
recipient_delimiter = '+'
def __init__(self):
"""
Create a new milter instance.
"""
self.id = Milter.uniqueID()
self.message = ''
self.recipients = []
self.dspam = None
self.remove_headers = []
if self.recipient_delimiter:
self.recipient_delimiter_re = re.compile('[{}][^@]*'.format(
re.escape(self.recipient_delimiter)))
else:
self.recipient_delimiter_re = None
def envrcpt(self, rcpt, *params):
"""
Send all recipients to DSPAM.
"""
if rcpt.startswith('<'):
rcpt = rcpt[1:]
if rcpt.endswith('>'):
rcpt = rcpt[:-1]
if self.recipient_delimiter_re:
rcpt = self.recipient_delimiter_re.sub('', rcpt)
if rcpt not in self.recipients:
self.recipients.append(rcpt)
logger.debug('<{}> Received RCPT {}'.format(self.id, rcpt))
return Milter.CONTINUE
@Milter.noreply
def header(self, name, value):
"""
Store all message headers, optionally clean them up.
This simply stores all message headers so we can send them to DSPAM.
Additionally, headers that have the same prefix as the ones we're
about to add are deleted.
"""
self.message += "{}: {}\r\n".format(name, value)
logger.debug('<{}> Received {} header'.format(self.id, name))
if name.lower().startswith(self.header_prefix.lower()):
self.remove_headers.append(name)
logger.debug('<{}> Going to remove {} header'.format(
self.id, name))
return Milter.CONTINUE
@Milter.noreply
def eoh(self):
"""
Store end of message headers.
"""
self.message += "\r\n"
return Milter.CONTINUE
@Milter.noreply
def body(self, block):
"""
Store message body.
"""
self.message += block
logger.debug('<{}> Received {} bytes of message body'.format(
self.id, len(block)))
return Milter.CONTINUE
def eom(self):
"""
Send the message to DSPAM for classification and a return a milter
response based on the results.
If <DspamMilter>.static_user is set, that single DSPAM user account
will be used for processing the message. If it is unset, all envelope
recipients will be passed to DSPAM, and the final decision is based on
the least invasive result in all their classification results.
"""
for header in self.remove_headers:
self.chgheader(header, 1, '')
logger.info('<{}> Removing existing {} header'.format(
self.id, header))
queue_id = self.getsymval('i')
logger.debug(
'<{}> Sending message with MTA queue id {} to DSPAM'.format(
self.id, queue_id))
try:
if not self.dspam:
self.dspam = DspamClient()
self.dspam.connect()
self.dspam.lhlo()
if not self.dspam.dlmtp:
logger.warning(
'<{}> Connection to DSPAM is established, but DLMTP '
'seems unavailable'.format(self.id))
else:
self.dspam.rset()
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while connecting to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
try:
self.dspam.mailfrom(client_args='--process --deliver=summary')
if self.static_user:
self.dspam.rcptto((self.static_user,))
else:
self.dspam.rcptto(self.recipients)
self.dspam.data(self.message)
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while talking to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
# Clear caches
self.message = ''
self.recipients = []
# With multiple recipients, if different verdicts were returned, always
# use the 'lowest' verdict as final, so mail is not lost unexpected.
final_verdict = None
for rcpt in self.dspam.results:
results = self.dspam.results[rcpt]
logger.info(
'<{0}> DSPAM returned results for message with queue id {1} '
'and RCPT {2}: {3}'.format(
self.id, queue_id, rcpt,
' '.join('{}={}'.format(k, v) for k, v in results.iteritems())))
verdict = self.compute_verdict(results)
if final_verdict is None or verdict < final_verdict:
final_verdict = verdict
final_results = results
if final_verdict == self.VERDICT_REJECT:
logger.info(
'<{0}> Rejecting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.setreply('550', '5.7.1', 'Message is {0[class]}'.format(
final_results))
return Milter.REJECT
elif final_verdict == self.VERDICT_QUARANTINE:
logger.info(
'<{0}> Quarantining message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
self.quarantine('Message is {0[class]} according to DSPAM'.format(
final_results))
return Milter.ACCEPT
else:
logger.info(
'<{0}> Accepting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
return Milter.ACCEPT
def close(self):
"""
Log disconnects.
"""
time_spent = time.time() - self.time_start
logger.debug(
'<{}> Disconnect from [{}]:{}, time spent {:.3f} seconds'.format(
self.id, self.client_ip, self.client_port, time_spent))
return Milter.CONTINUE
def compute_verdict(self, results):
"""
Match results to the configured reject, quarantine and accept classes,
and return a verdict based on that.
The verdict classes are matched in the order: reject_classes,
quarantine_classes, accept_classes. This means that you can configure
different verdicts for different confidence results, for instance:
reject_classes= Spam:0.99 # Reject obvious spam
quarantine_classes = Spam:0.7 # Quarantine spam with confidence
# between 0.7 and 0.99
accept_classes = Spam # Accept low confidence spam (good
# for FP and retraining)
Args:
results -- A results dictionary from DspamClient.
"""
if results['class'] in self.reject_classes:
threshold = self.reject_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to reject the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_REJECT
if results['class'] in self.quarantine_classes:
threshold = self.quarantine_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to quarantine the message based on '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_QUARANTINE
if results['class'] in self.accept_classes:
threshold = self.accept_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to accept the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
logger.debug(
'<{0}> Suggesting to accept the message, no verdict class matched '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
def add_dspam_headers(self, results):
"""
Format DSPAM headers with passed results, and add them to the message.
Args:
results -- A results dictionary from DspamClient.
"""
for header in self.headers:
hname = self.header_prefix + header
if header.lower() in results:
hvalue = results[header.lower()]
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
elif header == 'Processed':
# X-DSPAM-Processed: Wed Dec 12 02:19:23 2012
hvalue = datetime.datetime.now().strftime(
'%a %b %d %H:%M:%S %Y')
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
else:
logger.warning(
'<{}> Not adding header {}, no data available in '
'DSPAM results'.format(self.id, hname))
|
whyscream/dspam-milter | dspam/milter.py | DspamMilter.envrcpt | python | def envrcpt(self, rcpt, *params):
if rcpt.startswith('<'):
rcpt = rcpt[1:]
if rcpt.endswith('>'):
rcpt = rcpt[:-1]
if self.recipient_delimiter_re:
rcpt = self.recipient_delimiter_re.sub('', rcpt)
if rcpt not in self.recipients:
self.recipients.append(rcpt)
logger.debug('<{}> Received RCPT {}'.format(self.id, rcpt))
return Milter.CONTINUE | Send all recipients to DSPAM. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/milter.py#L86-L100 | null | class DspamMilter(Milter.Base):
"""
A milter interface to the DSPAM daemon.
This milter can be added to an MTA setup so messages can be inspected
by a DSPAM server, and optionally rejected or quarantined based on the
classification results.
"""
# Constants defining possible return codes for compute_verdict()
VERDICT_ACCEPT = 1
VERDICT_QUARANTINE = 2
VERDICT_REJECT = 3
# Default configuration
static_user = None
headers = {
'Processed': 0,
'Confidence': 0,
'Probability': 0,
'Result': 0,
'Signature': 0
}
header_prefix = 'X-DSPAM-'
reject_classes = {'Blacklisted': 0, 'Blocklisted': 0, 'Spam': 0.9}
quarantine_classes = {'Virus': 0}
accept_classes = {'Innocent': 0, 'Whitelisted': 0}
recipient_delimiter = '+'
def __init__(self):
"""
Create a new milter instance.
"""
self.id = Milter.uniqueID()
self.message = ''
self.recipients = []
self.dspam = None
self.remove_headers = []
if self.recipient_delimiter:
self.recipient_delimiter_re = re.compile('[{}][^@]*'.format(
re.escape(self.recipient_delimiter)))
else:
self.recipient_delimiter_re = None
def connect(self, hostname, family, hostaddr):
"""
Log new connections.
"""
self.client_ip = hostaddr[0]
self.client_port = hostaddr[1]
self.time_start = time.time()
logger.debug('<{}> Connect from {}[{}]:{}'.format(
self.id, hostname, self.client_ip, self.client_port))
return Milter.CONTINUE
@Milter.noreply
def header(self, name, value):
"""
Store all message headers, optionally clean them up.
This simply stores all message headers so we can send them to DSPAM.
Additionally, headers that have the same prefix as the ones we're
about to add are deleted.
"""
self.message += "{}: {}\r\n".format(name, value)
logger.debug('<{}> Received {} header'.format(self.id, name))
if name.lower().startswith(self.header_prefix.lower()):
self.remove_headers.append(name)
logger.debug('<{}> Going to remove {} header'.format(
self.id, name))
return Milter.CONTINUE
@Milter.noreply
def eoh(self):
"""
Store end of message headers.
"""
self.message += "\r\n"
return Milter.CONTINUE
@Milter.noreply
def body(self, block):
"""
Store message body.
"""
self.message += block
logger.debug('<{}> Received {} bytes of message body'.format(
self.id, len(block)))
return Milter.CONTINUE
def eom(self):
"""
Send the message to DSPAM for classification and a return a milter
response based on the results.
If <DspamMilter>.static_user is set, that single DSPAM user account
will be used for processing the message. If it is unset, all envelope
recipients will be passed to DSPAM, and the final decision is based on
the least invasive result in all their classification results.
"""
for header in self.remove_headers:
self.chgheader(header, 1, '')
logger.info('<{}> Removing existing {} header'.format(
self.id, header))
queue_id = self.getsymval('i')
logger.debug(
'<{}> Sending message with MTA queue id {} to DSPAM'.format(
self.id, queue_id))
try:
if not self.dspam:
self.dspam = DspamClient()
self.dspam.connect()
self.dspam.lhlo()
if not self.dspam.dlmtp:
logger.warning(
'<{}> Connection to DSPAM is established, but DLMTP '
'seems unavailable'.format(self.id))
else:
self.dspam.rset()
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while connecting to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
try:
self.dspam.mailfrom(client_args='--process --deliver=summary')
if self.static_user:
self.dspam.rcptto((self.static_user,))
else:
self.dspam.rcptto(self.recipients)
self.dspam.data(self.message)
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while talking to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
# Clear caches
self.message = ''
self.recipients = []
# With multiple recipients, if different verdicts were returned, always
# use the 'lowest' verdict as final, so mail is not lost unexpected.
final_verdict = None
for rcpt in self.dspam.results:
results = self.dspam.results[rcpt]
logger.info(
'<{0}> DSPAM returned results for message with queue id {1} '
'and RCPT {2}: {3}'.format(
self.id, queue_id, rcpt,
' '.join('{}={}'.format(k, v) for k, v in results.iteritems())))
verdict = self.compute_verdict(results)
if final_verdict is None or verdict < final_verdict:
final_verdict = verdict
final_results = results
if final_verdict == self.VERDICT_REJECT:
logger.info(
'<{0}> Rejecting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.setreply('550', '5.7.1', 'Message is {0[class]}'.format(
final_results))
return Milter.REJECT
elif final_verdict == self.VERDICT_QUARANTINE:
logger.info(
'<{0}> Quarantining message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
self.quarantine('Message is {0[class]} according to DSPAM'.format(
final_results))
return Milter.ACCEPT
else:
logger.info(
'<{0}> Accepting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
return Milter.ACCEPT
def close(self):
"""
Log disconnects.
"""
time_spent = time.time() - self.time_start
logger.debug(
'<{}> Disconnect from [{}]:{}, time spent {:.3f} seconds'.format(
self.id, self.client_ip, self.client_port, time_spent))
return Milter.CONTINUE
def compute_verdict(self, results):
"""
Match results to the configured reject, quarantine and accept classes,
and return a verdict based on that.
The verdict classes are matched in the order: reject_classes,
quarantine_classes, accept_classes. This means that you can configure
different verdicts for different confidence results, for instance:
reject_classes= Spam:0.99 # Reject obvious spam
quarantine_classes = Spam:0.7 # Quarantine spam with confidence
# between 0.7 and 0.99
accept_classes = Spam # Accept low confidence spam (good
# for FP and retraining)
Args:
results -- A results dictionary from DspamClient.
"""
if results['class'] in self.reject_classes:
threshold = self.reject_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to reject the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_REJECT
if results['class'] in self.quarantine_classes:
threshold = self.quarantine_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to quarantine the message based on '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_QUARANTINE
if results['class'] in self.accept_classes:
threshold = self.accept_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to accept the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
logger.debug(
'<{0}> Suggesting to accept the message, no verdict class matched '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
def add_dspam_headers(self, results):
"""
Format DSPAM headers with passed results, and add them to the message.
Args:
results -- A results dictionary from DspamClient.
"""
for header in self.headers:
hname = self.header_prefix + header
if header.lower() in results:
hvalue = results[header.lower()]
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
elif header == 'Processed':
# X-DSPAM-Processed: Wed Dec 12 02:19:23 2012
hvalue = datetime.datetime.now().strftime(
'%a %b %d %H:%M:%S %Y')
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
else:
logger.warning(
'<{}> Not adding header {}, no data available in '
'DSPAM results'.format(self.id, hname))
|
whyscream/dspam-milter | dspam/milter.py | DspamMilter.header | python | def header(self, name, value):
self.message += "{}: {}\r\n".format(name, value)
logger.debug('<{}> Received {} header'.format(self.id, name))
if name.lower().startswith(self.header_prefix.lower()):
self.remove_headers.append(name)
logger.debug('<{}> Going to remove {} header'.format(
self.id, name))
return Milter.CONTINUE | Store all message headers, optionally clean them up.
This simply stores all message headers so we can send them to DSPAM.
Additionally, headers that have the same prefix as the ones we're
about to add are deleted. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/milter.py#L103-L118 | null | class DspamMilter(Milter.Base):
"""
A milter interface to the DSPAM daemon.
This milter can be added to an MTA setup so messages can be inspected
by a DSPAM server, and optionally rejected or quarantined based on the
classification results.
"""
# Constants defining possible return codes for compute_verdict()
VERDICT_ACCEPT = 1
VERDICT_QUARANTINE = 2
VERDICT_REJECT = 3
# Default configuration
static_user = None
headers = {
'Processed': 0,
'Confidence': 0,
'Probability': 0,
'Result': 0,
'Signature': 0
}
header_prefix = 'X-DSPAM-'
reject_classes = {'Blacklisted': 0, 'Blocklisted': 0, 'Spam': 0.9}
quarantine_classes = {'Virus': 0}
accept_classes = {'Innocent': 0, 'Whitelisted': 0}
recipient_delimiter = '+'
def __init__(self):
"""
Create a new milter instance.
"""
self.id = Milter.uniqueID()
self.message = ''
self.recipients = []
self.dspam = None
self.remove_headers = []
if self.recipient_delimiter:
self.recipient_delimiter_re = re.compile('[{}][^@]*'.format(
re.escape(self.recipient_delimiter)))
else:
self.recipient_delimiter_re = None
def connect(self, hostname, family, hostaddr):
"""
Log new connections.
"""
self.client_ip = hostaddr[0]
self.client_port = hostaddr[1]
self.time_start = time.time()
logger.debug('<{}> Connect from {}[{}]:{}'.format(
self.id, hostname, self.client_ip, self.client_port))
return Milter.CONTINUE
def envrcpt(self, rcpt, *params):
"""
Send all recipients to DSPAM.
"""
if rcpt.startswith('<'):
rcpt = rcpt[1:]
if rcpt.endswith('>'):
rcpt = rcpt[:-1]
if self.recipient_delimiter_re:
rcpt = self.recipient_delimiter_re.sub('', rcpt)
if rcpt not in self.recipients:
self.recipients.append(rcpt)
logger.debug('<{}> Received RCPT {}'.format(self.id, rcpt))
return Milter.CONTINUE
@Milter.noreply
@Milter.noreply
def eoh(self):
"""
Store end of message headers.
"""
self.message += "\r\n"
return Milter.CONTINUE
@Milter.noreply
def body(self, block):
"""
Store message body.
"""
self.message += block
logger.debug('<{}> Received {} bytes of message body'.format(
self.id, len(block)))
return Milter.CONTINUE
def eom(self):
"""
Send the message to DSPAM for classification and a return a milter
response based on the results.
If <DspamMilter>.static_user is set, that single DSPAM user account
will be used for processing the message. If it is unset, all envelope
recipients will be passed to DSPAM, and the final decision is based on
the least invasive result in all their classification results.
"""
for header in self.remove_headers:
self.chgheader(header, 1, '')
logger.info('<{}> Removing existing {} header'.format(
self.id, header))
queue_id = self.getsymval('i')
logger.debug(
'<{}> Sending message with MTA queue id {} to DSPAM'.format(
self.id, queue_id))
try:
if not self.dspam:
self.dspam = DspamClient()
self.dspam.connect()
self.dspam.lhlo()
if not self.dspam.dlmtp:
logger.warning(
'<{}> Connection to DSPAM is established, but DLMTP '
'seems unavailable'.format(self.id))
else:
self.dspam.rset()
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while connecting to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
try:
self.dspam.mailfrom(client_args='--process --deliver=summary')
if self.static_user:
self.dspam.rcptto((self.static_user,))
else:
self.dspam.rcptto(self.recipients)
self.dspam.data(self.message)
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while talking to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
# Clear caches
self.message = ''
self.recipients = []
# With multiple recipients, if different verdicts were returned, always
# use the 'lowest' verdict as final, so mail is not lost unexpected.
final_verdict = None
for rcpt in self.dspam.results:
results = self.dspam.results[rcpt]
logger.info(
'<{0}> DSPAM returned results for message with queue id {1} '
'and RCPT {2}: {3}'.format(
self.id, queue_id, rcpt,
' '.join('{}={}'.format(k, v) for k, v in results.iteritems())))
verdict = self.compute_verdict(results)
if final_verdict is None or verdict < final_verdict:
final_verdict = verdict
final_results = results
if final_verdict == self.VERDICT_REJECT:
logger.info(
'<{0}> Rejecting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.setreply('550', '5.7.1', 'Message is {0[class]}'.format(
final_results))
return Milter.REJECT
elif final_verdict == self.VERDICT_QUARANTINE:
logger.info(
'<{0}> Quarantining message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
self.quarantine('Message is {0[class]} according to DSPAM'.format(
final_results))
return Milter.ACCEPT
else:
logger.info(
'<{0}> Accepting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
return Milter.ACCEPT
def close(self):
"""
Log disconnects.
"""
time_spent = time.time() - self.time_start
logger.debug(
'<{}> Disconnect from [{}]:{}, time spent {:.3f} seconds'.format(
self.id, self.client_ip, self.client_port, time_spent))
return Milter.CONTINUE
def compute_verdict(self, results):
"""
Match results to the configured reject, quarantine and accept classes,
and return a verdict based on that.
The verdict classes are matched in the order: reject_classes,
quarantine_classes, accept_classes. This means that you can configure
different verdicts for different confidence results, for instance:
reject_classes= Spam:0.99 # Reject obvious spam
quarantine_classes = Spam:0.7 # Quarantine spam with confidence
# between 0.7 and 0.99
accept_classes = Spam # Accept low confidence spam (good
# for FP and retraining)
Args:
results -- A results dictionary from DspamClient.
"""
if results['class'] in self.reject_classes:
threshold = self.reject_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to reject the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_REJECT
if results['class'] in self.quarantine_classes:
threshold = self.quarantine_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to quarantine the message based on '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_QUARANTINE
if results['class'] in self.accept_classes:
threshold = self.accept_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to accept the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
logger.debug(
'<{0}> Suggesting to accept the message, no verdict class matched '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
def add_dspam_headers(self, results):
"""
Format DSPAM headers with passed results, and add them to the message.
Args:
results -- A results dictionary from DspamClient.
"""
for header in self.headers:
hname = self.header_prefix + header
if header.lower() in results:
hvalue = results[header.lower()]
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
elif header == 'Processed':
# X-DSPAM-Processed: Wed Dec 12 02:19:23 2012
hvalue = datetime.datetime.now().strftime(
'%a %b %d %H:%M:%S %Y')
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
else:
logger.warning(
'<{}> Not adding header {}, no data available in '
'DSPAM results'.format(self.id, hname))
|
whyscream/dspam-milter | dspam/milter.py | DspamMilter.body | python | def body(self, block):
self.message += block
logger.debug('<{}> Received {} bytes of message body'.format(
self.id, len(block)))
return Milter.CONTINUE | Store message body. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/milter.py#L130-L138 | null | class DspamMilter(Milter.Base):
"""
A milter interface to the DSPAM daemon.
This milter can be added to an MTA setup so messages can be inspected
by a DSPAM server, and optionally rejected or quarantined based on the
classification results.
"""
# Constants defining possible return codes for compute_verdict()
VERDICT_ACCEPT = 1
VERDICT_QUARANTINE = 2
VERDICT_REJECT = 3
# Default configuration
static_user = None
headers = {
'Processed': 0,
'Confidence': 0,
'Probability': 0,
'Result': 0,
'Signature': 0
}
header_prefix = 'X-DSPAM-'
reject_classes = {'Blacklisted': 0, 'Blocklisted': 0, 'Spam': 0.9}
quarantine_classes = {'Virus': 0}
accept_classes = {'Innocent': 0, 'Whitelisted': 0}
recipient_delimiter = '+'
def __init__(self):
"""
Create a new milter instance.
"""
self.id = Milter.uniqueID()
self.message = ''
self.recipients = []
self.dspam = None
self.remove_headers = []
if self.recipient_delimiter:
self.recipient_delimiter_re = re.compile('[{}][^@]*'.format(
re.escape(self.recipient_delimiter)))
else:
self.recipient_delimiter_re = None
def connect(self, hostname, family, hostaddr):
"""
Log new connections.
"""
self.client_ip = hostaddr[0]
self.client_port = hostaddr[1]
self.time_start = time.time()
logger.debug('<{}> Connect from {}[{}]:{}'.format(
self.id, hostname, self.client_ip, self.client_port))
return Milter.CONTINUE
def envrcpt(self, rcpt, *params):
"""
Send all recipients to DSPAM.
"""
if rcpt.startswith('<'):
rcpt = rcpt[1:]
if rcpt.endswith('>'):
rcpt = rcpt[:-1]
if self.recipient_delimiter_re:
rcpt = self.recipient_delimiter_re.sub('', rcpt)
if rcpt not in self.recipients:
self.recipients.append(rcpt)
logger.debug('<{}> Received RCPT {}'.format(self.id, rcpt))
return Milter.CONTINUE
@Milter.noreply
def header(self, name, value):
"""
Store all message headers, optionally clean them up.
This simply stores all message headers so we can send them to DSPAM.
Additionally, headers that have the same prefix as the ones we're
about to add are deleted.
"""
self.message += "{}: {}\r\n".format(name, value)
logger.debug('<{}> Received {} header'.format(self.id, name))
if name.lower().startswith(self.header_prefix.lower()):
self.remove_headers.append(name)
logger.debug('<{}> Going to remove {} header'.format(
self.id, name))
return Milter.CONTINUE
@Milter.noreply
def eoh(self):
"""
Store end of message headers.
"""
self.message += "\r\n"
return Milter.CONTINUE
@Milter.noreply
def eom(self):
"""
Send the message to DSPAM for classification and a return a milter
response based on the results.
If <DspamMilter>.static_user is set, that single DSPAM user account
will be used for processing the message. If it is unset, all envelope
recipients will be passed to DSPAM, and the final decision is based on
the least invasive result in all their classification results.
"""
for header in self.remove_headers:
self.chgheader(header, 1, '')
logger.info('<{}> Removing existing {} header'.format(
self.id, header))
queue_id = self.getsymval('i')
logger.debug(
'<{}> Sending message with MTA queue id {} to DSPAM'.format(
self.id, queue_id))
try:
if not self.dspam:
self.dspam = DspamClient()
self.dspam.connect()
self.dspam.lhlo()
if not self.dspam.dlmtp:
logger.warning(
'<{}> Connection to DSPAM is established, but DLMTP '
'seems unavailable'.format(self.id))
else:
self.dspam.rset()
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while connecting to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
try:
self.dspam.mailfrom(client_args='--process --deliver=summary')
if self.static_user:
self.dspam.rcptto((self.static_user,))
else:
self.dspam.rcptto(self.recipients)
self.dspam.data(self.message)
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while talking to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
# Clear caches
self.message = ''
self.recipients = []
# With multiple recipients, if different verdicts were returned, always
# use the 'lowest' verdict as final, so mail is not lost unexpected.
final_verdict = None
for rcpt in self.dspam.results:
results = self.dspam.results[rcpt]
logger.info(
'<{0}> DSPAM returned results for message with queue id {1} '
'and RCPT {2}: {3}'.format(
self.id, queue_id, rcpt,
' '.join('{}={}'.format(k, v) for k, v in results.iteritems())))
verdict = self.compute_verdict(results)
if final_verdict is None or verdict < final_verdict:
final_verdict = verdict
final_results = results
if final_verdict == self.VERDICT_REJECT:
logger.info(
'<{0}> Rejecting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.setreply('550', '5.7.1', 'Message is {0[class]}'.format(
final_results))
return Milter.REJECT
elif final_verdict == self.VERDICT_QUARANTINE:
logger.info(
'<{0}> Quarantining message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
self.quarantine('Message is {0[class]} according to DSPAM'.format(
final_results))
return Milter.ACCEPT
else:
logger.info(
'<{0}> Accepting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
return Milter.ACCEPT
def close(self):
"""
Log disconnects.
"""
time_spent = time.time() - self.time_start
logger.debug(
'<{}> Disconnect from [{}]:{}, time spent {:.3f} seconds'.format(
self.id, self.client_ip, self.client_port, time_spent))
return Milter.CONTINUE
def compute_verdict(self, results):
"""
Match results to the configured reject, quarantine and accept classes,
and return a verdict based on that.
The verdict classes are matched in the order: reject_classes,
quarantine_classes, accept_classes. This means that you can configure
different verdicts for different confidence results, for instance:
reject_classes= Spam:0.99 # Reject obvious spam
quarantine_classes = Spam:0.7 # Quarantine spam with confidence
# between 0.7 and 0.99
accept_classes = Spam # Accept low confidence spam (good
# for FP and retraining)
Args:
results -- A results dictionary from DspamClient.
"""
if results['class'] in self.reject_classes:
threshold = self.reject_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to reject the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_REJECT
if results['class'] in self.quarantine_classes:
threshold = self.quarantine_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to quarantine the message based on '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_QUARANTINE
if results['class'] in self.accept_classes:
threshold = self.accept_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to accept the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
logger.debug(
'<{0}> Suggesting to accept the message, no verdict class matched '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
def add_dspam_headers(self, results):
"""
Format DSPAM headers with passed results, and add them to the message.
Args:
results -- A results dictionary from DspamClient.
"""
for header in self.headers:
hname = self.header_prefix + header
if header.lower() in results:
hvalue = results[header.lower()]
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
elif header == 'Processed':
# X-DSPAM-Processed: Wed Dec 12 02:19:23 2012
hvalue = datetime.datetime.now().strftime(
'%a %b %d %H:%M:%S %Y')
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
else:
logger.warning(
'<{}> Not adding header {}, no data available in '
'DSPAM results'.format(self.id, hname))
|
whyscream/dspam-milter | dspam/milter.py | DspamMilter.eom | python | def eom(self):
for header in self.remove_headers:
self.chgheader(header, 1, '')
logger.info('<{}> Removing existing {} header'.format(
self.id, header))
queue_id = self.getsymval('i')
logger.debug(
'<{}> Sending message with MTA queue id {} to DSPAM'.format(
self.id, queue_id))
try:
if not self.dspam:
self.dspam = DspamClient()
self.dspam.connect()
self.dspam.lhlo()
if not self.dspam.dlmtp:
logger.warning(
'<{}> Connection to DSPAM is established, but DLMTP '
'seems unavailable'.format(self.id))
else:
self.dspam.rset()
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while connecting to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
try:
self.dspam.mailfrom(client_args='--process --deliver=summary')
if self.static_user:
self.dspam.rcptto((self.static_user,))
else:
self.dspam.rcptto(self.recipients)
self.dspam.data(self.message)
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while talking to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
# Clear caches
self.message = ''
self.recipients = []
# With multiple recipients, if different verdicts were returned, always
# use the 'lowest' verdict as final, so mail is not lost unexpected.
final_verdict = None
for rcpt in self.dspam.results:
results = self.dspam.results[rcpt]
logger.info(
'<{0}> DSPAM returned results for message with queue id {1} '
'and RCPT {2}: {3}'.format(
self.id, queue_id, rcpt,
' '.join('{}={}'.format(k, v) for k, v in results.iteritems())))
verdict = self.compute_verdict(results)
if final_verdict is None or verdict < final_verdict:
final_verdict = verdict
final_results = results
if final_verdict == self.VERDICT_REJECT:
logger.info(
'<{0}> Rejecting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.setreply('550', '5.7.1', 'Message is {0[class]}'.format(
final_results))
return Milter.REJECT
elif final_verdict == self.VERDICT_QUARANTINE:
logger.info(
'<{0}> Quarantining message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
self.quarantine('Message is {0[class]} according to DSPAM'.format(
final_results))
return Milter.ACCEPT
else:
logger.info(
'<{0}> Accepting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
return Milter.ACCEPT | Send the message to DSPAM for classification and a return a milter
response based on the results.
If <DspamMilter>.static_user is set, that single DSPAM user account
will be used for processing the message. If it is unset, all envelope
recipients will be passed to DSPAM, and the final decision is based on
the least invasive result in all their classification results. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/milter.py#L140-L236 | [
"def connect(self):\n \"\"\"\n Connect to TCP or domain socket, and process the server LMTP greeting.\n\n \"\"\"\n # extract proto from socket setting\n try:\n (proto, spec) = self.socket.split(':')\n except ValueError:\n raise DspamClientError(\n 'Failed to parse DSPAM so... | class DspamMilter(Milter.Base):
"""
A milter interface to the DSPAM daemon.
This milter can be added to an MTA setup so messages can be inspected
by a DSPAM server, and optionally rejected or quarantined based on the
classification results.
"""
# Constants defining possible return codes for compute_verdict()
VERDICT_ACCEPT = 1
VERDICT_QUARANTINE = 2
VERDICT_REJECT = 3
# Default configuration
static_user = None
headers = {
'Processed': 0,
'Confidence': 0,
'Probability': 0,
'Result': 0,
'Signature': 0
}
header_prefix = 'X-DSPAM-'
reject_classes = {'Blacklisted': 0, 'Blocklisted': 0, 'Spam': 0.9}
quarantine_classes = {'Virus': 0}
accept_classes = {'Innocent': 0, 'Whitelisted': 0}
recipient_delimiter = '+'
def __init__(self):
"""
Create a new milter instance.
"""
self.id = Milter.uniqueID()
self.message = ''
self.recipients = []
self.dspam = None
self.remove_headers = []
if self.recipient_delimiter:
self.recipient_delimiter_re = re.compile('[{}][^@]*'.format(
re.escape(self.recipient_delimiter)))
else:
self.recipient_delimiter_re = None
def connect(self, hostname, family, hostaddr):
"""
Log new connections.
"""
self.client_ip = hostaddr[0]
self.client_port = hostaddr[1]
self.time_start = time.time()
logger.debug('<{}> Connect from {}[{}]:{}'.format(
self.id, hostname, self.client_ip, self.client_port))
return Milter.CONTINUE
def envrcpt(self, rcpt, *params):
"""
Send all recipients to DSPAM.
"""
if rcpt.startswith('<'):
rcpt = rcpt[1:]
if rcpt.endswith('>'):
rcpt = rcpt[:-1]
if self.recipient_delimiter_re:
rcpt = self.recipient_delimiter_re.sub('', rcpt)
if rcpt not in self.recipients:
self.recipients.append(rcpt)
logger.debug('<{}> Received RCPT {}'.format(self.id, rcpt))
return Milter.CONTINUE
@Milter.noreply
def header(self, name, value):
"""
Store all message headers, optionally clean them up.
This simply stores all message headers so we can send them to DSPAM.
Additionally, headers that have the same prefix as the ones we're
about to add are deleted.
"""
self.message += "{}: {}\r\n".format(name, value)
logger.debug('<{}> Received {} header'.format(self.id, name))
if name.lower().startswith(self.header_prefix.lower()):
self.remove_headers.append(name)
logger.debug('<{}> Going to remove {} header'.format(
self.id, name))
return Milter.CONTINUE
@Milter.noreply
def eoh(self):
"""
Store end of message headers.
"""
self.message += "\r\n"
return Milter.CONTINUE
@Milter.noreply
def body(self, block):
"""
Store message body.
"""
self.message += block
logger.debug('<{}> Received {} bytes of message body'.format(
self.id, len(block)))
return Milter.CONTINUE
def close(self):
"""
Log disconnects.
"""
time_spent = time.time() - self.time_start
logger.debug(
'<{}> Disconnect from [{}]:{}, time spent {:.3f} seconds'.format(
self.id, self.client_ip, self.client_port, time_spent))
return Milter.CONTINUE
def compute_verdict(self, results):
"""
Match results to the configured reject, quarantine and accept classes,
and return a verdict based on that.
The verdict classes are matched in the order: reject_classes,
quarantine_classes, accept_classes. This means that you can configure
different verdicts for different confidence results, for instance:
reject_classes= Spam:0.99 # Reject obvious spam
quarantine_classes = Spam:0.7 # Quarantine spam with confidence
# between 0.7 and 0.99
accept_classes = Spam # Accept low confidence spam (good
# for FP and retraining)
Args:
results -- A results dictionary from DspamClient.
"""
if results['class'] in self.reject_classes:
threshold = self.reject_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to reject the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_REJECT
if results['class'] in self.quarantine_classes:
threshold = self.quarantine_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to quarantine the message based on '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_QUARANTINE
if results['class'] in self.accept_classes:
threshold = self.accept_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to accept the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
logger.debug(
'<{0}> Suggesting to accept the message, no verdict class matched '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
def add_dspam_headers(self, results):
"""
Format DSPAM headers with passed results, and add them to the message.
Args:
results -- A results dictionary from DspamClient.
"""
for header in self.headers:
hname = self.header_prefix + header
if header.lower() in results:
hvalue = results[header.lower()]
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
elif header == 'Processed':
# X-DSPAM-Processed: Wed Dec 12 02:19:23 2012
hvalue = datetime.datetime.now().strftime(
'%a %b %d %H:%M:%S %Y')
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
else:
logger.warning(
'<{}> Not adding header {}, no data available in '
'DSPAM results'.format(self.id, hname))
|
whyscream/dspam-milter | dspam/milter.py | DspamMilter.close | python | def close(self):
time_spent = time.time() - self.time_start
logger.debug(
'<{}> Disconnect from [{}]:{}, time spent {:.3f} seconds'.format(
self.id, self.client_ip, self.client_port, time_spent))
return Milter.CONTINUE | Log disconnects. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/milter.py#L238-L247 | null | class DspamMilter(Milter.Base):
"""
A milter interface to the DSPAM daemon.
This milter can be added to an MTA setup so messages can be inspected
by a DSPAM server, and optionally rejected or quarantined based on the
classification results.
"""
# Constants defining possible return codes for compute_verdict()
VERDICT_ACCEPT = 1
VERDICT_QUARANTINE = 2
VERDICT_REJECT = 3
# Default configuration
static_user = None
headers = {
'Processed': 0,
'Confidence': 0,
'Probability': 0,
'Result': 0,
'Signature': 0
}
header_prefix = 'X-DSPAM-'
reject_classes = {'Blacklisted': 0, 'Blocklisted': 0, 'Spam': 0.9}
quarantine_classes = {'Virus': 0}
accept_classes = {'Innocent': 0, 'Whitelisted': 0}
recipient_delimiter = '+'
def __init__(self):
"""
Create a new milter instance.
"""
self.id = Milter.uniqueID()
self.message = ''
self.recipients = []
self.dspam = None
self.remove_headers = []
if self.recipient_delimiter:
self.recipient_delimiter_re = re.compile('[{}][^@]*'.format(
re.escape(self.recipient_delimiter)))
else:
self.recipient_delimiter_re = None
def connect(self, hostname, family, hostaddr):
"""
Log new connections.
"""
self.client_ip = hostaddr[0]
self.client_port = hostaddr[1]
self.time_start = time.time()
logger.debug('<{}> Connect from {}[{}]:{}'.format(
self.id, hostname, self.client_ip, self.client_port))
return Milter.CONTINUE
def envrcpt(self, rcpt, *params):
"""
Send all recipients to DSPAM.
"""
if rcpt.startswith('<'):
rcpt = rcpt[1:]
if rcpt.endswith('>'):
rcpt = rcpt[:-1]
if self.recipient_delimiter_re:
rcpt = self.recipient_delimiter_re.sub('', rcpt)
if rcpt not in self.recipients:
self.recipients.append(rcpt)
logger.debug('<{}> Received RCPT {}'.format(self.id, rcpt))
return Milter.CONTINUE
@Milter.noreply
def header(self, name, value):
"""
Store all message headers, optionally clean them up.
This simply stores all message headers so we can send them to DSPAM.
Additionally, headers that have the same prefix as the ones we're
about to add are deleted.
"""
self.message += "{}: {}\r\n".format(name, value)
logger.debug('<{}> Received {} header'.format(self.id, name))
if name.lower().startswith(self.header_prefix.lower()):
self.remove_headers.append(name)
logger.debug('<{}> Going to remove {} header'.format(
self.id, name))
return Milter.CONTINUE
@Milter.noreply
def eoh(self):
"""
Store end of message headers.
"""
self.message += "\r\n"
return Milter.CONTINUE
@Milter.noreply
def body(self, block):
"""
Store message body.
"""
self.message += block
logger.debug('<{}> Received {} bytes of message body'.format(
self.id, len(block)))
return Milter.CONTINUE
def eom(self):
"""
Send the message to DSPAM for classification and a return a milter
response based on the results.
If <DspamMilter>.static_user is set, that single DSPAM user account
will be used for processing the message. If it is unset, all envelope
recipients will be passed to DSPAM, and the final decision is based on
the least invasive result in all their classification results.
"""
for header in self.remove_headers:
self.chgheader(header, 1, '')
logger.info('<{}> Removing existing {} header'.format(
self.id, header))
queue_id = self.getsymval('i')
logger.debug(
'<{}> Sending message with MTA queue id {} to DSPAM'.format(
self.id, queue_id))
try:
if not self.dspam:
self.dspam = DspamClient()
self.dspam.connect()
self.dspam.lhlo()
if not self.dspam.dlmtp:
logger.warning(
'<{}> Connection to DSPAM is established, but DLMTP '
'seems unavailable'.format(self.id))
else:
self.dspam.rset()
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while connecting to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
try:
self.dspam.mailfrom(client_args='--process --deliver=summary')
if self.static_user:
self.dspam.rcptto((self.static_user,))
else:
self.dspam.rcptto(self.recipients)
self.dspam.data(self.message)
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while talking to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
# Clear caches
self.message = ''
self.recipients = []
# With multiple recipients, if different verdicts were returned, always
# use the 'lowest' verdict as final, so mail is not lost unexpected.
final_verdict = None
for rcpt in self.dspam.results:
results = self.dspam.results[rcpt]
logger.info(
'<{0}> DSPAM returned results for message with queue id {1} '
'and RCPT {2}: {3}'.format(
self.id, queue_id, rcpt,
' '.join('{}={}'.format(k, v) for k, v in results.iteritems())))
verdict = self.compute_verdict(results)
if final_verdict is None or verdict < final_verdict:
final_verdict = verdict
final_results = results
if final_verdict == self.VERDICT_REJECT:
logger.info(
'<{0}> Rejecting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.setreply('550', '5.7.1', 'Message is {0[class]}'.format(
final_results))
return Milter.REJECT
elif final_verdict == self.VERDICT_QUARANTINE:
logger.info(
'<{0}> Quarantining message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
self.quarantine('Message is {0[class]} according to DSPAM'.format(
final_results))
return Milter.ACCEPT
else:
logger.info(
'<{0}> Accepting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
return Milter.ACCEPT
def compute_verdict(self, results):
"""
Match results to the configured reject, quarantine and accept classes,
and return a verdict based on that.
The verdict classes are matched in the order: reject_classes,
quarantine_classes, accept_classes. This means that you can configure
different verdicts for different confidence results, for instance:
reject_classes= Spam:0.99 # Reject obvious spam
quarantine_classes = Spam:0.7 # Quarantine spam with confidence
# between 0.7 and 0.99
accept_classes = Spam # Accept low confidence spam (good
# for FP and retraining)
Args:
results -- A results dictionary from DspamClient.
"""
if results['class'] in self.reject_classes:
threshold = self.reject_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to reject the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_REJECT
if results['class'] in self.quarantine_classes:
threshold = self.quarantine_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to quarantine the message based on '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_QUARANTINE
if results['class'] in self.accept_classes:
threshold = self.accept_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to accept the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
logger.debug(
'<{0}> Suggesting to accept the message, no verdict class matched '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
def add_dspam_headers(self, results):
"""
Format DSPAM headers with passed results, and add them to the message.
Args:
results -- A results dictionary from DspamClient.
"""
for header in self.headers:
hname = self.header_prefix + header
if header.lower() in results:
hvalue = results[header.lower()]
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
elif header == 'Processed':
# X-DSPAM-Processed: Wed Dec 12 02:19:23 2012
hvalue = datetime.datetime.now().strftime(
'%a %b %d %H:%M:%S %Y')
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
else:
logger.warning(
'<{}> Not adding header {}, no data available in '
'DSPAM results'.format(self.id, hname))
|
whyscream/dspam-milter | dspam/milter.py | DspamMilter.compute_verdict | python | def compute_verdict(self, results):
if results['class'] in self.reject_classes:
threshold = self.reject_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to reject the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_REJECT
if results['class'] in self.quarantine_classes:
threshold = self.quarantine_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to quarantine the message based on '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_QUARANTINE
if results['class'] in self.accept_classes:
threshold = self.accept_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to accept the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
logger.debug(
'<{0}> Suggesting to accept the message, no verdict class matched '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT | Match results to the configured reject, quarantine and accept classes,
and return a verdict based on that.
The verdict classes are matched in the order: reject_classes,
quarantine_classes, accept_classes. This means that you can configure
different verdicts for different confidence results, for instance:
reject_classes= Spam:0.99 # Reject obvious spam
quarantine_classes = Spam:0.7 # Quarantine spam with confidence
# between 0.7 and 0.99
accept_classes = Spam # Accept low confidence spam (good
# for FP and retraining)
Args:
results -- A results dictionary from DspamClient. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/milter.py#L249-L298 | null | class DspamMilter(Milter.Base):
"""
A milter interface to the DSPAM daemon.
This milter can be added to an MTA setup so messages can be inspected
by a DSPAM server, and optionally rejected or quarantined based on the
classification results.
"""
# Constants defining possible return codes for compute_verdict()
VERDICT_ACCEPT = 1
VERDICT_QUARANTINE = 2
VERDICT_REJECT = 3
# Default configuration
static_user = None
headers = {
'Processed': 0,
'Confidence': 0,
'Probability': 0,
'Result': 0,
'Signature': 0
}
header_prefix = 'X-DSPAM-'
reject_classes = {'Blacklisted': 0, 'Blocklisted': 0, 'Spam': 0.9}
quarantine_classes = {'Virus': 0}
accept_classes = {'Innocent': 0, 'Whitelisted': 0}
recipient_delimiter = '+'
def __init__(self):
"""
Create a new milter instance.
"""
self.id = Milter.uniqueID()
self.message = ''
self.recipients = []
self.dspam = None
self.remove_headers = []
if self.recipient_delimiter:
self.recipient_delimiter_re = re.compile('[{}][^@]*'.format(
re.escape(self.recipient_delimiter)))
else:
self.recipient_delimiter_re = None
def connect(self, hostname, family, hostaddr):
"""
Log new connections.
"""
self.client_ip = hostaddr[0]
self.client_port = hostaddr[1]
self.time_start = time.time()
logger.debug('<{}> Connect from {}[{}]:{}'.format(
self.id, hostname, self.client_ip, self.client_port))
return Milter.CONTINUE
def envrcpt(self, rcpt, *params):
"""
Send all recipients to DSPAM.
"""
if rcpt.startswith('<'):
rcpt = rcpt[1:]
if rcpt.endswith('>'):
rcpt = rcpt[:-1]
if self.recipient_delimiter_re:
rcpt = self.recipient_delimiter_re.sub('', rcpt)
if rcpt not in self.recipients:
self.recipients.append(rcpt)
logger.debug('<{}> Received RCPT {}'.format(self.id, rcpt))
return Milter.CONTINUE
@Milter.noreply
def header(self, name, value):
"""
Store all message headers, optionally clean them up.
This simply stores all message headers so we can send them to DSPAM.
Additionally, headers that have the same prefix as the ones we're
about to add are deleted.
"""
self.message += "{}: {}\r\n".format(name, value)
logger.debug('<{}> Received {} header'.format(self.id, name))
if name.lower().startswith(self.header_prefix.lower()):
self.remove_headers.append(name)
logger.debug('<{}> Going to remove {} header'.format(
self.id, name))
return Milter.CONTINUE
@Milter.noreply
def eoh(self):
"""
Store end of message headers.
"""
self.message += "\r\n"
return Milter.CONTINUE
@Milter.noreply
def body(self, block):
"""
Store message body.
"""
self.message += block
logger.debug('<{}> Received {} bytes of message body'.format(
self.id, len(block)))
return Milter.CONTINUE
def eom(self):
"""
Send the message to DSPAM for classification and a return a milter
response based on the results.
If <DspamMilter>.static_user is set, that single DSPAM user account
will be used for processing the message. If it is unset, all envelope
recipients will be passed to DSPAM, and the final decision is based on
the least invasive result in all their classification results.
"""
for header in self.remove_headers:
self.chgheader(header, 1, '')
logger.info('<{}> Removing existing {} header'.format(
self.id, header))
queue_id = self.getsymval('i')
logger.debug(
'<{}> Sending message with MTA queue id {} to DSPAM'.format(
self.id, queue_id))
try:
if not self.dspam:
self.dspam = DspamClient()
self.dspam.connect()
self.dspam.lhlo()
if not self.dspam.dlmtp:
logger.warning(
'<{}> Connection to DSPAM is established, but DLMTP '
'seems unavailable'.format(self.id))
else:
self.dspam.rset()
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while connecting to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
try:
self.dspam.mailfrom(client_args='--process --deliver=summary')
if self.static_user:
self.dspam.rcptto((self.static_user,))
else:
self.dspam.rcptto(self.recipients)
self.dspam.data(self.message)
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while talking to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
# Clear caches
self.message = ''
self.recipients = []
# With multiple recipients, if different verdicts were returned, always
# use the 'lowest' verdict as final, so mail is not lost unexpected.
final_verdict = None
for rcpt in self.dspam.results:
results = self.dspam.results[rcpt]
logger.info(
'<{0}> DSPAM returned results for message with queue id {1} '
'and RCPT {2}: {3}'.format(
self.id, queue_id, rcpt,
' '.join('{}={}'.format(k, v) for k, v in results.iteritems())))
verdict = self.compute_verdict(results)
if final_verdict is None or verdict < final_verdict:
final_verdict = verdict
final_results = results
if final_verdict == self.VERDICT_REJECT:
logger.info(
'<{0}> Rejecting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.setreply('550', '5.7.1', 'Message is {0[class]}'.format(
final_results))
return Milter.REJECT
elif final_verdict == self.VERDICT_QUARANTINE:
logger.info(
'<{0}> Quarantining message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
self.quarantine('Message is {0[class]} according to DSPAM'.format(
final_results))
return Milter.ACCEPT
else:
logger.info(
'<{0}> Accepting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
return Milter.ACCEPT
def close(self):
"""
Log disconnects.
"""
time_spent = time.time() - self.time_start
logger.debug(
'<{}> Disconnect from [{}]:{}, time spent {:.3f} seconds'.format(
self.id, self.client_ip, self.client_port, time_spent))
return Milter.CONTINUE
def add_dspam_headers(self, results):
"""
Format DSPAM headers with passed results, and add them to the message.
Args:
results -- A results dictionary from DspamClient.
"""
for header in self.headers:
hname = self.header_prefix + header
if header.lower() in results:
hvalue = results[header.lower()]
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
elif header == 'Processed':
# X-DSPAM-Processed: Wed Dec 12 02:19:23 2012
hvalue = datetime.datetime.now().strftime(
'%a %b %d %H:%M:%S %Y')
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
else:
logger.warning(
'<{}> Not adding header {}, no data available in '
'DSPAM results'.format(self.id, hname))
|
whyscream/dspam-milter | dspam/milter.py | DspamMilter.add_dspam_headers | python | def add_dspam_headers(self, results):
for header in self.headers:
hname = self.header_prefix + header
if header.lower() in results:
hvalue = results[header.lower()]
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
elif header == 'Processed':
# X-DSPAM-Processed: Wed Dec 12 02:19:23 2012
hvalue = datetime.datetime.now().strftime(
'%a %b %d %H:%M:%S %Y')
logger.debug(
'<{}> Adding header {}: {}'.format(self.id, hname, hvalue))
self.addheader(hname, hvalue)
else:
logger.warning(
'<{}> Not adding header {}, no data available in '
'DSPAM results'.format(self.id, hname)) | Format DSPAM headers with passed results, and add them to the message.
Args:
results -- A results dictionary from DspamClient. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/milter.py#L300-L324 | null | class DspamMilter(Milter.Base):
"""
A milter interface to the DSPAM daemon.
This milter can be added to an MTA setup so messages can be inspected
by a DSPAM server, and optionally rejected or quarantined based on the
classification results.
"""
# Constants defining possible return codes for compute_verdict()
VERDICT_ACCEPT = 1
VERDICT_QUARANTINE = 2
VERDICT_REJECT = 3
# Default configuration
static_user = None
headers = {
'Processed': 0,
'Confidence': 0,
'Probability': 0,
'Result': 0,
'Signature': 0
}
header_prefix = 'X-DSPAM-'
reject_classes = {'Blacklisted': 0, 'Blocklisted': 0, 'Spam': 0.9}
quarantine_classes = {'Virus': 0}
accept_classes = {'Innocent': 0, 'Whitelisted': 0}
recipient_delimiter = '+'
def __init__(self):
"""
Create a new milter instance.
"""
self.id = Milter.uniqueID()
self.message = ''
self.recipients = []
self.dspam = None
self.remove_headers = []
if self.recipient_delimiter:
self.recipient_delimiter_re = re.compile('[{}][^@]*'.format(
re.escape(self.recipient_delimiter)))
else:
self.recipient_delimiter_re = None
def connect(self, hostname, family, hostaddr):
"""
Log new connections.
"""
self.client_ip = hostaddr[0]
self.client_port = hostaddr[1]
self.time_start = time.time()
logger.debug('<{}> Connect from {}[{}]:{}'.format(
self.id, hostname, self.client_ip, self.client_port))
return Milter.CONTINUE
def envrcpt(self, rcpt, *params):
"""
Send all recipients to DSPAM.
"""
if rcpt.startswith('<'):
rcpt = rcpt[1:]
if rcpt.endswith('>'):
rcpt = rcpt[:-1]
if self.recipient_delimiter_re:
rcpt = self.recipient_delimiter_re.sub('', rcpt)
if rcpt not in self.recipients:
self.recipients.append(rcpt)
logger.debug('<{}> Received RCPT {}'.format(self.id, rcpt))
return Milter.CONTINUE
@Milter.noreply
def header(self, name, value):
"""
Store all message headers, optionally clean them up.
This simply stores all message headers so we can send them to DSPAM.
Additionally, headers that have the same prefix as the ones we're
about to add are deleted.
"""
self.message += "{}: {}\r\n".format(name, value)
logger.debug('<{}> Received {} header'.format(self.id, name))
if name.lower().startswith(self.header_prefix.lower()):
self.remove_headers.append(name)
logger.debug('<{}> Going to remove {} header'.format(
self.id, name))
return Milter.CONTINUE
@Milter.noreply
def eoh(self):
"""
Store end of message headers.
"""
self.message += "\r\n"
return Milter.CONTINUE
@Milter.noreply
def body(self, block):
"""
Store message body.
"""
self.message += block
logger.debug('<{}> Received {} bytes of message body'.format(
self.id, len(block)))
return Milter.CONTINUE
def eom(self):
"""
Send the message to DSPAM for classification and a return a milter
response based on the results.
If <DspamMilter>.static_user is set, that single DSPAM user account
will be used for processing the message. If it is unset, all envelope
recipients will be passed to DSPAM, and the final decision is based on
the least invasive result in all their classification results.
"""
for header in self.remove_headers:
self.chgheader(header, 1, '')
logger.info('<{}> Removing existing {} header'.format(
self.id, header))
queue_id = self.getsymval('i')
logger.debug(
'<{}> Sending message with MTA queue id {} to DSPAM'.format(
self.id, queue_id))
try:
if not self.dspam:
self.dspam = DspamClient()
self.dspam.connect()
self.dspam.lhlo()
if not self.dspam.dlmtp:
logger.warning(
'<{}> Connection to DSPAM is established, but DLMTP '
'seems unavailable'.format(self.id))
else:
self.dspam.rset()
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while connecting to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
try:
self.dspam.mailfrom(client_args='--process --deliver=summary')
if self.static_user:
self.dspam.rcptto((self.static_user,))
else:
self.dspam.rcptto(self.recipients)
self.dspam.data(self.message)
except DspamClientError as err:
logger.error(
'<{}> An error ocurred while talking to DSPAM: {}'.format(
self.id, err))
return Milter.TEMPFAIL
# Clear caches
self.message = ''
self.recipients = []
# With multiple recipients, if different verdicts were returned, always
# use the 'lowest' verdict as final, so mail is not lost unexpected.
final_verdict = None
for rcpt in self.dspam.results:
results = self.dspam.results[rcpt]
logger.info(
'<{0}> DSPAM returned results for message with queue id {1} '
'and RCPT {2}: {3}'.format(
self.id, queue_id, rcpt,
' '.join('{}={}'.format(k, v) for k, v in results.iteritems())))
verdict = self.compute_verdict(results)
if final_verdict is None or verdict < final_verdict:
final_verdict = verdict
final_results = results
if final_verdict == self.VERDICT_REJECT:
logger.info(
'<{0}> Rejecting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.setreply('550', '5.7.1', 'Message is {0[class]}'.format(
final_results))
return Milter.REJECT
elif final_verdict == self.VERDICT_QUARANTINE:
logger.info(
'<{0}> Quarantining message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
self.quarantine('Message is {0[class]} according to DSPAM'.format(
final_results))
return Milter.ACCEPT
else:
logger.info(
'<{0}> Accepting message with queue id {1} based on DSPAM '
'results: user={2[user]} class={2[class]} '
'confidence={2[confidence]}'.format(
self.id, queue_id, final_results))
self.add_dspam_headers(final_results)
return Milter.ACCEPT
def close(self):
"""
Log disconnects.
"""
time_spent = time.time() - self.time_start
logger.debug(
'<{}> Disconnect from [{}]:{}, time spent {:.3f} seconds'.format(
self.id, self.client_ip, self.client_port, time_spent))
return Milter.CONTINUE
def compute_verdict(self, results):
"""
Match results to the configured reject, quarantine and accept classes,
and return a verdict based on that.
The verdict classes are matched in the order: reject_classes,
quarantine_classes, accept_classes. This means that you can configure
different verdicts for different confidence results, for instance:
reject_classes= Spam:0.99 # Reject obvious spam
quarantine_classes = Spam:0.7 # Quarantine spam with confidence
# between 0.7 and 0.99
accept_classes = Spam # Accept low confidence spam (good
# for FP and retraining)
Args:
results -- A results dictionary from DspamClient.
"""
if results['class'] in self.reject_classes:
threshold = self.reject_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to reject the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_REJECT
if results['class'] in self.quarantine_classes:
threshold = self.quarantine_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to quarantine the message based on '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_QUARANTINE
if results['class'] in self.accept_classes:
threshold = self.accept_classes[results['class']]
if float(results['confidence']) >= threshold:
logger.debug(
'<{0}> Suggesting to accept the message based on DSPAM '
'results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
logger.debug(
'<{0}> Suggesting to accept the message, no verdict class matched '
'DSPAM results: user={1[user]}, class={1[class]}, '
'confidence={1[confidence]}'.format(self.id, results))
return self.VERDICT_ACCEPT
|
whyscream/dspam-milter | dspam/milter.py | DspamMilterDaemon.configure | python | def configure(self, config_file):
cfg = configparser.RawConfigParser()
try:
cfg.readfp(open(config_file))
except IOError as err:
logger.critical(
'Error while reading config file {}: {}'.format(
config_file, err.strerror))
sys.exit(1)
logger.info('Parsed config file ' + config_file)
# Extract user-defined log level from configuration
if cfg.has_option('milter', 'loglevel'):
loglevel = cfg.get('milter', 'loglevel')
loglevel_numeric = getattr(logging, loglevel.upper(), None)
if not isinstance(loglevel_numeric, int):
logger.critical(
'Config contains unsupported loglevel: ' + loglevel)
exit(1)
rl = logging.getLogger()
rl.setLevel(loglevel_numeric)
logger.debug(
'Config option applied: milter->loglevel: {}'.format(loglevel))
# Apply all config options to their respective classes
section_class_map = {
'milter': self,
'dspam': DspamClient,
'classification': DspamMilter,
}
for section in cfg.sections():
try:
class_ = section_class_map[section]
except KeyError:
logger.warning('Config contains unknown section: ' + section)
continue
logger.debug('Handling config section: ' + section)
dict_options = [
'headers',
'reject_classes',
'quarantine_classes',
'accept_classes'
]
for option in cfg.options(section):
# Kludge: static_user needs to be set on the milter,
# not on the client
if section == 'dspam' and option == 'static_user':
value = cfg.get('dspam', 'static_user')
DspamMilter.static_user = value
logger.debug(
'Config option applied: dspam->static_user: {}'.format(
value))
continue
if not hasattr(class_, option):
logger.warning(
'Config contains unknown option: {}->{}'.format(
section, option))
continue
value = cfg.get(section, option)
if option in dict_options:
value = utils.config_str2dict(value)
elif value.lower() in ['false', 'no']:
value = False
elif value.lower() in ['true', 'yes']:
value = True
setattr(class_, option, value)
logger.debug(
'Config option applied: {}->{}: {}'.format(
section, option, value))
logger.debug('Configuration completed') | Parse configuration, and setup objects to use it. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/milter.py#L352-L429 | null | class DspamMilterDaemon(object):
"""
Run the Milter as a UNIX daemon process.
"""
# Default configuration
socket = 'inet:2425@localhost'
timeout = 300
loglevel = 'INFO'
pidfile = '/var/run/dspam/dspam-milter.pid'
daemonize = True
def run(self, config_file=None):
utils.log_to_syslog()
logger.info('DSPAM Milter startup (v{})'.format(VERSION))
if config_file is not None:
self.configure(config_file)
if self.daemonize:
utils.daemonize(self.pidfile)
Milter.factory = DspamMilter
Milter.runmilter('DspamMilter', self.socket, self.timeout)
logger.info('DSPAM Milter shutdown (v{})'.format(VERSION))
logging.shutdown()
|
whyscream/dspam-milter | dspam/utils.py | daemonize | python | def daemonize(pidfile=None):
# Prevent core dumps
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
# Change working directory
os.chdir("/")
# Change file creation mask
os.umask(0)
# Detach process context: do double fork
pid = os.fork()
if pid > 0:
os._exit(0)
os.setsid()
pid = os.fork()
if pid > 0:
os._exit(0)
# Create signal handler for SIGTERM
def terminate(signal, stack_frame):
msg = 'Terminating on signal {}'.format(signal)
logger.info(msg)
raise SystemExit(msg)
signal.signal(signal.SIGTERM, terminate)
# Redirect input/output streams
streams = [sys.stdin, sys.stdout, sys.stderr]
for stream in streams:
devnull = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull, stream.fileno())
# Close file descriptors
for fd in [stream.fileno() for stream in streams]:
try:
os.close(fd)
except OSError as err:
if err.errno == errno.EBADF:
# File descriptor was not open
pass
# Create pidfile
if pidfile is None or pidfile.strip() == '':
logger.debug('Empty pidfile set')
else:
pid = os.getpid()
try:
with open(pidfile, 'w') as f:
f.write('{}\n'.format(pid))
f.close()
except EnvironmentError:
logger.error('Failed to create pidfile at {}'.format(pidfile))
def remove_pid_file():
os.remove(pidfile)
atexit.register(remove_pid_file)
logger.debug('Process daemonized') | Turn the running process into a proper daemon according to PEP3143.
Args:
pidfile --The pidfile to create. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/utils.py#L18-L84 | null | # Copyright (c) 2013, Tom Hendrikx
# All rights reserved.
#
# See LICENSE for the license.
import atexit
import errno
import logging
from logging.handlers import SysLogHandler
import os
import resource
import signal
import sys
logger = logging.getLogger(__name__)
def config_str2dict(option_value):
"""
Parse the value of a config option and convert it to a dictionary.
The configuration allows lines formatted like:
foo = Bar:1,Baz,Flub:0.75
This gets converted to a dictionary:
foo = { 'Bar': 1, 'Baz': 0, 'Flub': 0.75 }
Args:
option_value -- The config string to parse.
"""
dict = {}
for key in option_value.split(','):
if ':' in key:
key, value = pair.split(':')
value = float(value)
else:
value = 0
dict[key] = value
return dict
def log_to_syslog():
"""
Configure logging to syslog.
"""
# Get root logger
rl = logging.getLogger()
rl.setLevel('INFO')
# Stderr gets critical messages (mostly config/setup issues)
# only when not daemonized
stderr = logging.StreamHandler(stream=sys.stderr)
stderr.setLevel(logging.CRITICAL)
stderr.setFormatter(logging.Formatter(
'%(asctime)s %(name)s: %(levelname)s %(message)s'))
rl.addHandler(stderr)
# All interesting data goes to syslog, using root logger's loglevel
syslog = SysLogHandler(address='/dev/log', facility=SysLogHandler.LOG_MAIL)
syslog.setFormatter(logging.Formatter(
'%(name)s[%(process)d]: %(levelname)s %(message)s'))
rl.addHandler(syslog)
|
whyscream/dspam-milter | dspam/utils.py | config_str2dict | python | def config_str2dict(option_value):
dict = {}
for key in option_value.split(','):
if ':' in key:
key, value = pair.split(':')
value = float(value)
else:
value = 0
dict[key] = value
return dict | Parse the value of a config option and convert it to a dictionary.
The configuration allows lines formatted like:
foo = Bar:1,Baz,Flub:0.75
This gets converted to a dictionary:
foo = { 'Bar': 1, 'Baz': 0, 'Flub': 0.75 }
Args:
option_value -- The config string to parse. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/utils.py#L87-L108 | null | # Copyright (c) 2013, Tom Hendrikx
# All rights reserved.
#
# See LICENSE for the license.
import atexit
import errno
import logging
from logging.handlers import SysLogHandler
import os
import resource
import signal
import sys
logger = logging.getLogger(__name__)
def daemonize(pidfile=None):
"""
Turn the running process into a proper daemon according to PEP3143.
Args:
pidfile --The pidfile to create.
"""
# Prevent core dumps
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
# Change working directory
os.chdir("/")
# Change file creation mask
os.umask(0)
# Detach process context: do double fork
pid = os.fork()
if pid > 0:
os._exit(0)
os.setsid()
pid = os.fork()
if pid > 0:
os._exit(0)
# Create signal handler for SIGTERM
def terminate(signal, stack_frame):
msg = 'Terminating on signal {}'.format(signal)
logger.info(msg)
raise SystemExit(msg)
signal.signal(signal.SIGTERM, terminate)
# Redirect input/output streams
streams = [sys.stdin, sys.stdout, sys.stderr]
for stream in streams:
devnull = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull, stream.fileno())
# Close file descriptors
for fd in [stream.fileno() for stream in streams]:
try:
os.close(fd)
except OSError as err:
if err.errno == errno.EBADF:
# File descriptor was not open
pass
# Create pidfile
if pidfile is None or pidfile.strip() == '':
logger.debug('Empty pidfile set')
else:
pid = os.getpid()
try:
with open(pidfile, 'w') as f:
f.write('{}\n'.format(pid))
f.close()
except EnvironmentError:
logger.error('Failed to create pidfile at {}'.format(pidfile))
def remove_pid_file():
os.remove(pidfile)
atexit.register(remove_pid_file)
logger.debug('Process daemonized')
def log_to_syslog():
"""
Configure logging to syslog.
"""
# Get root logger
rl = logging.getLogger()
rl.setLevel('INFO')
# Stderr gets critical messages (mostly config/setup issues)
# only when not daemonized
stderr = logging.StreamHandler(stream=sys.stderr)
stderr.setLevel(logging.CRITICAL)
stderr.setFormatter(logging.Formatter(
'%(asctime)s %(name)s: %(levelname)s %(message)s'))
rl.addHandler(stderr)
# All interesting data goes to syslog, using root logger's loglevel
syslog = SysLogHandler(address='/dev/log', facility=SysLogHandler.LOG_MAIL)
syslog.setFormatter(logging.Formatter(
'%(name)s[%(process)d]: %(levelname)s %(message)s'))
rl.addHandler(syslog)
|
whyscream/dspam-milter | dspam/utils.py | log_to_syslog | python | def log_to_syslog():
# Get root logger
rl = logging.getLogger()
rl.setLevel('INFO')
# Stderr gets critical messages (mostly config/setup issues)
# only when not daemonized
stderr = logging.StreamHandler(stream=sys.stderr)
stderr.setLevel(logging.CRITICAL)
stderr.setFormatter(logging.Formatter(
'%(asctime)s %(name)s: %(levelname)s %(message)s'))
rl.addHandler(stderr)
# All interesting data goes to syslog, using root logger's loglevel
syslog = SysLogHandler(address='/dev/log', facility=SysLogHandler.LOG_MAIL)
syslog.setFormatter(logging.Formatter(
'%(name)s[%(process)d]: %(levelname)s %(message)s'))
rl.addHandler(syslog) | Configure logging to syslog. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/utils.py#L111-L132 | null | # Copyright (c) 2013, Tom Hendrikx
# All rights reserved.
#
# See LICENSE for the license.
import atexit
import errno
import logging
from logging.handlers import SysLogHandler
import os
import resource
import signal
import sys
logger = logging.getLogger(__name__)
def daemonize(pidfile=None):
"""
Turn the running process into a proper daemon according to PEP3143.
Args:
pidfile --The pidfile to create.
"""
# Prevent core dumps
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
# Change working directory
os.chdir("/")
# Change file creation mask
os.umask(0)
# Detach process context: do double fork
pid = os.fork()
if pid > 0:
os._exit(0)
os.setsid()
pid = os.fork()
if pid > 0:
os._exit(0)
# Create signal handler for SIGTERM
def terminate(signal, stack_frame):
msg = 'Terminating on signal {}'.format(signal)
logger.info(msg)
raise SystemExit(msg)
signal.signal(signal.SIGTERM, terminate)
# Redirect input/output streams
streams = [sys.stdin, sys.stdout, sys.stderr]
for stream in streams:
devnull = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull, stream.fileno())
# Close file descriptors
for fd in [stream.fileno() for stream in streams]:
try:
os.close(fd)
except OSError as err:
if err.errno == errno.EBADF:
# File descriptor was not open
pass
# Create pidfile
if pidfile is None or pidfile.strip() == '':
logger.debug('Empty pidfile set')
else:
pid = os.getpid()
try:
with open(pidfile, 'w') as f:
f.write('{}\n'.format(pid))
f.close()
except EnvironmentError:
logger.error('Failed to create pidfile at {}'.format(pidfile))
def remove_pid_file():
os.remove(pidfile)
atexit.register(remove_pid_file)
logger.debug('Process daemonized')
def config_str2dict(option_value):
"""
Parse the value of a config option and convert it to a dictionary.
The configuration allows lines formatted like:
foo = Bar:1,Baz,Flub:0.75
This gets converted to a dictionary:
foo = { 'Bar': 1, 'Baz': 0, 'Flub': 0.75 }
Args:
option_value -- The config string to parse.
"""
dict = {}
for key in option_value.split(','):
if ':' in key:
key, value = pair.split(':')
value = float(value)
else:
value = 0
dict[key] = value
return dict
|
whyscream/dspam-milter | dspam/client.py | DspamClient._send | python | def _send(self, line):
if not line.endswith('\r\n'):
if line.endswith('\n'):
logger.debug('Fixing bare LF before sending data to socket')
line = line[0:-1] + '\r\n'
else:
logger.debug(
'Fixing missing CRLF before sending data to socket')
line = line + '\r\n'
logger.debug('Client sent: ' + line.rstrip())
self._socket.send(line) | Write a line of data to the server.
Args:
line -- A single line of data to write to the socket. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/client.py#L92-L109 | null | class DspamClient(object):
"""
A DSPAM client can be used to interact with a DSPAM server.
The client is able to speak to a DSPAM server over both a TCP or UNIX
domain socket exposed by a running DSPAM server, and interact with it
through its supported protocols: LMTP and DLMTP. The latter is an
enhanced version of LMTP to facilitate some options that are not possible
when using strict LMTP.
Some common DSPAM operations are included in this class, custom
operations can be built by creating a new LMTP dialog with the
low-level LMTP commands.
DSPAM server setup
==================
To use the client to speak with a DSPAM server, the server must be
configured to expose a TCP (dspam.conf: ServerHost, ServerPort) or
UNIX domain socket (dspam.conf: ServerDomainSocketPath).
The server can support mulitple modes (dspam.conf: ServerMode) for
interaction with connecting clients. Which mode you need, depends on
the operations you need to perform. Most of the time you'll want to
use DLMTP though, which means that you'll also need to setup
authentication (dspam.conf: ServerPass.<ident>).
Python DspamClient setup
========================
Each DspamClient instance needs to talk to a DSPAM server.
You need to specify the socket where DSPAM is listening when creating
a new instance. If you need to use DLMTP features (probably most of the
time), you also need to pass the ident and password.
"""
# Default configuration
socket = 'inet:24@localhost'
dlmtp_ident = None
dlmtp_pass = None
def __init__(self, socket=None, dlmtp_ident=None, dlmtp_pass=None):
"""
Initialize new DSPAM client.
The socket specifies where DSPAM is listening. Specify it in the form:
unix:PATH or inet:PORT[@HOST]. For example, the default UNIX domain
socket in dspam.conf would look like: unix:/var/run/dspam/dspam.sock,
and the default TCP socket: inet:24@localhost.
Args:
socket -- The socket on which DSPAM is listening.
dlmtp_ident -- The authentication identifier.
dlmtp_pass -- The authentication password.
"""
if socket is not None:
self.socket = socket
if dlmtp_ident is not None:
self.dlmtp_ident = dlmtp_ident
if dlmtp_pass is not None:
self.dlmtp_pass = dlmtp_pass
self.dlmtp = False
self.results = {}
# Some internal structures
self._socket = None
self._recipients = []
def __del__(self):
"""
Destroy the DSPAM client object.
"""
if self._socket:
self.quit()
def _read(self):
"""
Read a single response line from the server.
"""
line = ''
finished = False
while not finished:
char = self._socket.recv(1)
if char == '':
return ''
elif char == '\r':
continue
elif char == '\n':
finished = True
continue
else:
line = line + char
logger.debug('Server sent: ' + line.rstrip())
return line
def _peek(self, chars=1):
"""
Peek at the data in the server response.
Peeking should only be done when the response can be predicted.
Make sure that the socket will not block by requesting too
much data from it while peeking.
Args:
chars -- the number of characters to peek.
"""
line = self._socket.recv(chars, socket.MSG_PEEK)
logger.debug('Server sent (peek): ' + line.rstrip())
return line
def connect(self):
"""
Connect to TCP or domain socket, and process the server LMTP greeting.
"""
# extract proto from socket setting
try:
(proto, spec) = self.socket.split(':')
except ValueError:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'no proto found: ' + self.socket)
if proto == 'unix':
# connect to UNIX domain socket
try:
self._socket = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(spec)
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server '
'at socket {}: {}'.format(spec, err))
logger.debug('Connected to DSPAM server at socket {}'.format(spec))
elif proto == 'inet' or proto == 'inet6':
# connect to TCP socket
try:
(port, host) = spec.split('@')
port = int(port)
if host == '':
host = 'localhost'
except ValueError:
port = int(spec)
host = 'localhost'
try:
self._socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server at host {} '
'port {}: {}'.format(host, port, err))
logger.debug(
'Connected to DSPAM server at host {}, port {}'.format(
host, port))
else:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'unknown proto ' + proto)
resp = self._read()
if not resp.startswith('220'):
raise DspamClientError(
'Unexpected server response at connect: ' + resp)
def lhlo(self):
"""
Send LMTP LHLO greeting, and process the server response.
A regular LMTP greeting is sent, and if accepted by the server, the
capabilities it returns are parsed.
DLMTP authentication starts here by announcing the dlmtp_ident in
the LHLO as our hostname. When the ident is accepted and DLMTP
mode is enabled (dspam.conf: ServerMode=dspam|auto), the
DSPAMPROCESSMODE capability is announced by the server.
When this capability is detected, the <DspamClient>.dlmtp flag
will be enabled.
"""
if self.dlmtp_ident is not None:
host = self.dlmtp_ident
else:
host = socket.getfqdn()
self._send('LHLO ' + host + '\r\n')
finished = False
while not finished:
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at LHLO: ' + resp)
if resp[4:20] == 'DSPAMPROCESSMODE':
self.dlmtp = True
logger.debug('Detected DLMTP extension in LHLO response')
if resp[3] == ' ':
# difference between "250-8BITMIME" and "250 SIZE"
finished = True
def mailfrom(self, sender=None, client_args=None):
"""
Send LMTP MAIL FROM command, and process the server response.
In DLMTP mode, the server expects the client to identify itself.
Because the envelope sender is of no importance to DSPAM, the client
is expected to send an identity and a password (dspam.conf:
ServerPass.<ident>="<password>") in stead of the actual sender.
When you need want DSPAM to deliver the message itself and need to
pass the server an actual envelope sender for that, add the
--mail-from parameter in client_args.
When the server is setup in LMTP mode only (dspam.conf:
ServerMode=standard), the envelope sender is a regular envelope
sender, and is re-used when delivering the message after processing.
Client args
===========
When in DLMTP mode (and with proper auth credentials), the server
accepts parameters specified by the client. These are in the form
as they are passed to the command-line 'dspam' program.
See man dspam(1) for details, and the process() or classify() methods
in this class for simple examples.
Args:
sender -- The envelope sender to use in LMTP mode.
client_args -- DSPAM parameters to pass to the server in DLMTP mode.
"""
if sender and client_args:
raise DspamClientError('Arguments are mutually exclusive')
if client_args and not self.dlmtp:
raise DspamClientError(
'Cannot send client args, server does not support DLMTP')
command = 'MAIL FROM:'
if not sender:
if self.dlmtp_ident and self.dlmtp_pass:
sender = self.dlmtp_pass + '@' + self.dlmtp_ident
else:
sender = ''
command = command + '<' + sender + '>'
if client_args:
command = command + ' DSPAMPROCESSMODE="{}"'.format(client_args)
self._send(command + '\r\n')
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at MAIL FROM: ' + resp)
def rcptto(self, recipients):
"""
Send LMTP RCPT TO command, and process the server response.
The DSPAM server expects to find one or more valid DSPAM users as
envelope recipients. The set recipient will be the user DSPAM
processes mail for.
When you need want DSPAM to deliver the message itself, and need to
pass the server an envelope recipient for this that differs from the
DSPAM user account name, use the --rcpt-to parameter in client_args
at mailfrom().
args:
recipients -- A list of recipients
"""
for rcpt in recipients:
self._send('RCPT TO:<{}>\r\n'.format(rcpt))
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at RCPT TO for '
'recipient {}: {}'.format(rcpt, resp))
self._recipients.append(rcpt)
def data(self, message):
"""
Send LMTP DATA command and process the server response.
The server response is stored as a list of dicts in
<DspamClient>.results, keyed on the recipient name(s). Depending
on the server return data, different formats are available:
* LMTP mode -- Dict containing 'accepted', a bool indicating
that the message was handed to the server.
* Summary mode -- Dict containing 'username', 'result',
'classification', 'probability', 'confidence'
and 'signature'.
* Stdout mode -- Dict containing 'result' and 'message', the
complete message payload including added headers.
The return data is always parsed and stored, independent of its format.
If you requested a regular LMTP response, but the server
responded with an DLMTP summary, the summary is still stored in
<DspamClient>.results, and you will need to check the result format
yourself and decide whether that was acceptable for your use case.
This is due to the fact that it's possible to configure the server to
return non LMTP responses, even when in LMTP mode (see dspam.conf:
ServerParameters).
Note: while processing response data in stdout mode, it's not possible
to relate the returned messages to a specific recipient, when multiple
recipients were specified in rcptto(). There is no guarantee
that the message stored in <DspamClient>.results['foo'] actually
belongs to the recipient 'foo'. If this relationship needs to be
guaranteed, send each message with a single recipient in rcptto().
args:
message -- The full message payload to pass to the server.
"""
self._send('DATA\r\n')
resp = self._read()
if not resp.startswith('354'):
raise DspamClientError(
'Unexpected server response at DATA: ' + resp)
# Send message payload
for line in message.split('\n'):
if line == '.':
# Dot stuffing
line = '..'
self._send(line)
# Send end-of-data
self._send('.\r\n')
# Depending on server configuration, several responses are possible:
# * Standard LMTP response code, once for each recipient:
# 250 2.6.0 <bar> Message accepted for delivery
# * Summary response (--deliver=summary), once for each recipient:
# X-DSPAM-Result: bar; result="Spam"; class="Spam"; \
# probability=1.0000; confidence=0.85; \
# signature=50c50c0f315636261418125
# (after the last summary line, a single dot is sent)
# * Stdout response (--delivery=stdout), once for each recipient:
# X-Daemon-Classification: INNOCENT
# <complete mail body>
#
# Note that when an unknown recipient is passed in, DSPAM will simply
# deliver the message (dspam.conf: (Un)TrustedDeliveryAgent,
# DeliveryHost) unaltered and unfiltered. The response for unknown
# recipients will still be something indicating 'accepted'.
peek = self._peek(24)
if peek.startswith('250'):
# Response is LTMP formatted
regex = re.compile('250 \d\.\d\.\d <([^>]+)>')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
self.results[rcpt] = {'accepted': True}
logger.debug(
'Message accepted for recipient {} in LMTP mode'.format(
rcpt))
if len(self.results) == len(self._recipients):
finished = True
elif peek.startswith('X-DSPAM-Result:'):
# Response is in summary format
regex = re.compile('X-DSPAM-Result: ([^;]+); result="(\w+)"; '
'class="(\w+)"; probability=([\d\.]+); '
'confidence=([\d\.]+); signature=([\w,/]+)')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
# map results to their DSPAM classification result names
fields = ('user', 'result', 'class',
'probability', 'confidence', 'signature')
self.results[rcpt] = dict(zip(fields, match.groups()))
if self.results[rcpt]['signature'] == 'N/A':
del(self.results[rcpt]['signature'])
logger.debug(
'Message handled for recipient {} in DLMTP summary mode, '
'result is {}'.format(rcpt, match.group(2)))
if len(self.results) == len(self._recipients):
# we received responses for all accepted recipients
finished = True
# read final dot
resp = self._read()
if resp != '.':
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
elif peek.startswith('X-Daemon-Classification:'):
# Response is in stdout format
finished = False
message = ''
while not finished:
resp = self._read()
if resp.startswith('X-Daemon-Classification:'):
if message != '':
# A new message body starts, store the previous one
rcpt = self._recipients.pop(0)
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message handled for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
message = ''
# Remember next result
result = resp[25:]
elif resp == '.':
# A single dot can signal end-of-data, or might be just
# regular mail data.
self._socket.setblocking(False)
try:
# If _peek() succeeds, we did not reach end-of-data yet
# so it was message content.
peek = self._peek(1)
message = message + '\r\n' + resp
except socket.error:
# reached end-of-data, store message and finish
finished = True
rcpt = self._recipients.pop(0)
# strip final newline
message = message[0:-2]
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message accepted for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
self._socket.setblocking(True)
else:
# regular message data
if message == '':
message = resp
else:
message = message + '\r\n' + resp
else:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
def rset(self):
"""
Send LMTP RSET command and process the server response.
"""
self._send('RSET\r\n')
resp = self._read()
if not resp.startswith('250'):
logger.warn('Unexpected server response at RSET: ' + resp)
self._recipients = []
self.results = {}
def quit(self):
"""
Send LMTP QUIT command, read the server response and disconnect.
"""
self._send('QUIT\r\n')
resp = self._read()
if not resp.startswith('221'):
logger.warning('Unexpected server response at QUIT: ' + resp)
self._socket.close()
self._socket = None
self._recipients = []
self.results = {}
def process(self, message, user):
"""
Process a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--process --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def classify(self, message, user):
"""
Classify a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--classify --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def train(self, message, user, class_):
"""
Train DSPAM with a message.
"""
raise NotImplementedError
def retrain_message(self, message, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
def retrain_signature(self, signature, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
|
whyscream/dspam-milter | dspam/client.py | DspamClient._read | python | def _read(self):
line = ''
finished = False
while not finished:
char = self._socket.recv(1)
if char == '':
return ''
elif char == '\r':
continue
elif char == '\n':
finished = True
continue
else:
line = line + char
logger.debug('Server sent: ' + line.rstrip())
return line | Read a single response line from the server. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/client.py#L111-L130 | null | class DspamClient(object):
"""
A DSPAM client can be used to interact with a DSPAM server.
The client is able to speak to a DSPAM server over both a TCP or UNIX
domain socket exposed by a running DSPAM server, and interact with it
through its supported protocols: LMTP and DLMTP. The latter is an
enhanced version of LMTP to facilitate some options that are not possible
when using strict LMTP.
Some common DSPAM operations are included in this class, custom
operations can be built by creating a new LMTP dialog with the
low-level LMTP commands.
DSPAM server setup
==================
To use the client to speak with a DSPAM server, the server must be
configured to expose a TCP (dspam.conf: ServerHost, ServerPort) or
UNIX domain socket (dspam.conf: ServerDomainSocketPath).
The server can support mulitple modes (dspam.conf: ServerMode) for
interaction with connecting clients. Which mode you need, depends on
the operations you need to perform. Most of the time you'll want to
use DLMTP though, which means that you'll also need to setup
authentication (dspam.conf: ServerPass.<ident>).
Python DspamClient setup
========================
Each DspamClient instance needs to talk to a DSPAM server.
You need to specify the socket where DSPAM is listening when creating
a new instance. If you need to use DLMTP features (probably most of the
time), you also need to pass the ident and password.
"""
# Default configuration
socket = 'inet:24@localhost'
dlmtp_ident = None
dlmtp_pass = None
def __init__(self, socket=None, dlmtp_ident=None, dlmtp_pass=None):
"""
Initialize new DSPAM client.
The socket specifies where DSPAM is listening. Specify it in the form:
unix:PATH or inet:PORT[@HOST]. For example, the default UNIX domain
socket in dspam.conf would look like: unix:/var/run/dspam/dspam.sock,
and the default TCP socket: inet:24@localhost.
Args:
socket -- The socket on which DSPAM is listening.
dlmtp_ident -- The authentication identifier.
dlmtp_pass -- The authentication password.
"""
if socket is not None:
self.socket = socket
if dlmtp_ident is not None:
self.dlmtp_ident = dlmtp_ident
if dlmtp_pass is not None:
self.dlmtp_pass = dlmtp_pass
self.dlmtp = False
self.results = {}
# Some internal structures
self._socket = None
self._recipients = []
def __del__(self):
"""
Destroy the DSPAM client object.
"""
if self._socket:
self.quit()
def _send(self, line):
"""
Write a line of data to the server.
Args:
line -- A single line of data to write to the socket.
"""
if not line.endswith('\r\n'):
if line.endswith('\n'):
logger.debug('Fixing bare LF before sending data to socket')
line = line[0:-1] + '\r\n'
else:
logger.debug(
'Fixing missing CRLF before sending data to socket')
line = line + '\r\n'
logger.debug('Client sent: ' + line.rstrip())
self._socket.send(line)
def _peek(self, chars=1):
"""
Peek at the data in the server response.
Peeking should only be done when the response can be predicted.
Make sure that the socket will not block by requesting too
much data from it while peeking.
Args:
chars -- the number of characters to peek.
"""
line = self._socket.recv(chars, socket.MSG_PEEK)
logger.debug('Server sent (peek): ' + line.rstrip())
return line
def connect(self):
"""
Connect to TCP or domain socket, and process the server LMTP greeting.
"""
# extract proto from socket setting
try:
(proto, spec) = self.socket.split(':')
except ValueError:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'no proto found: ' + self.socket)
if proto == 'unix':
# connect to UNIX domain socket
try:
self._socket = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(spec)
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server '
'at socket {}: {}'.format(spec, err))
logger.debug('Connected to DSPAM server at socket {}'.format(spec))
elif proto == 'inet' or proto == 'inet6':
# connect to TCP socket
try:
(port, host) = spec.split('@')
port = int(port)
if host == '':
host = 'localhost'
except ValueError:
port = int(spec)
host = 'localhost'
try:
self._socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server at host {} '
'port {}: {}'.format(host, port, err))
logger.debug(
'Connected to DSPAM server at host {}, port {}'.format(
host, port))
else:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'unknown proto ' + proto)
resp = self._read()
if not resp.startswith('220'):
raise DspamClientError(
'Unexpected server response at connect: ' + resp)
def lhlo(self):
"""
Send LMTP LHLO greeting, and process the server response.
A regular LMTP greeting is sent, and if accepted by the server, the
capabilities it returns are parsed.
DLMTP authentication starts here by announcing the dlmtp_ident in
the LHLO as our hostname. When the ident is accepted and DLMTP
mode is enabled (dspam.conf: ServerMode=dspam|auto), the
DSPAMPROCESSMODE capability is announced by the server.
When this capability is detected, the <DspamClient>.dlmtp flag
will be enabled.
"""
if self.dlmtp_ident is not None:
host = self.dlmtp_ident
else:
host = socket.getfqdn()
self._send('LHLO ' + host + '\r\n')
finished = False
while not finished:
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at LHLO: ' + resp)
if resp[4:20] == 'DSPAMPROCESSMODE':
self.dlmtp = True
logger.debug('Detected DLMTP extension in LHLO response')
if resp[3] == ' ':
# difference between "250-8BITMIME" and "250 SIZE"
finished = True
def mailfrom(self, sender=None, client_args=None):
"""
Send LMTP MAIL FROM command, and process the server response.
In DLMTP mode, the server expects the client to identify itself.
Because the envelope sender is of no importance to DSPAM, the client
is expected to send an identity and a password (dspam.conf:
ServerPass.<ident>="<password>") in stead of the actual sender.
When you need want DSPAM to deliver the message itself and need to
pass the server an actual envelope sender for that, add the
--mail-from parameter in client_args.
When the server is setup in LMTP mode only (dspam.conf:
ServerMode=standard), the envelope sender is a regular envelope
sender, and is re-used when delivering the message after processing.
Client args
===========
When in DLMTP mode (and with proper auth credentials), the server
accepts parameters specified by the client. These are in the form
as they are passed to the command-line 'dspam' program.
See man dspam(1) for details, and the process() or classify() methods
in this class for simple examples.
Args:
sender -- The envelope sender to use in LMTP mode.
client_args -- DSPAM parameters to pass to the server in DLMTP mode.
"""
if sender and client_args:
raise DspamClientError('Arguments are mutually exclusive')
if client_args and not self.dlmtp:
raise DspamClientError(
'Cannot send client args, server does not support DLMTP')
command = 'MAIL FROM:'
if not sender:
if self.dlmtp_ident and self.dlmtp_pass:
sender = self.dlmtp_pass + '@' + self.dlmtp_ident
else:
sender = ''
command = command + '<' + sender + '>'
if client_args:
command = command + ' DSPAMPROCESSMODE="{}"'.format(client_args)
self._send(command + '\r\n')
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at MAIL FROM: ' + resp)
def rcptto(self, recipients):
"""
Send LMTP RCPT TO command, and process the server response.
The DSPAM server expects to find one or more valid DSPAM users as
envelope recipients. The set recipient will be the user DSPAM
processes mail for.
When you need want DSPAM to deliver the message itself, and need to
pass the server an envelope recipient for this that differs from the
DSPAM user account name, use the --rcpt-to parameter in client_args
at mailfrom().
args:
recipients -- A list of recipients
"""
for rcpt in recipients:
self._send('RCPT TO:<{}>\r\n'.format(rcpt))
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at RCPT TO for '
'recipient {}: {}'.format(rcpt, resp))
self._recipients.append(rcpt)
def data(self, message):
"""
Send LMTP DATA command and process the server response.
The server response is stored as a list of dicts in
<DspamClient>.results, keyed on the recipient name(s). Depending
on the server return data, different formats are available:
* LMTP mode -- Dict containing 'accepted', a bool indicating
that the message was handed to the server.
* Summary mode -- Dict containing 'username', 'result',
'classification', 'probability', 'confidence'
and 'signature'.
* Stdout mode -- Dict containing 'result' and 'message', the
complete message payload including added headers.
The return data is always parsed and stored, independent of its format.
If you requested a regular LMTP response, but the server
responded with an DLMTP summary, the summary is still stored in
<DspamClient>.results, and you will need to check the result format
yourself and decide whether that was acceptable for your use case.
This is due to the fact that it's possible to configure the server to
return non LMTP responses, even when in LMTP mode (see dspam.conf:
ServerParameters).
Note: while processing response data in stdout mode, it's not possible
to relate the returned messages to a specific recipient, when multiple
recipients were specified in rcptto(). There is no guarantee
that the message stored in <DspamClient>.results['foo'] actually
belongs to the recipient 'foo'. If this relationship needs to be
guaranteed, send each message with a single recipient in rcptto().
args:
message -- The full message payload to pass to the server.
"""
self._send('DATA\r\n')
resp = self._read()
if not resp.startswith('354'):
raise DspamClientError(
'Unexpected server response at DATA: ' + resp)
# Send message payload
for line in message.split('\n'):
if line == '.':
# Dot stuffing
line = '..'
self._send(line)
# Send end-of-data
self._send('.\r\n')
# Depending on server configuration, several responses are possible:
# * Standard LMTP response code, once for each recipient:
# 250 2.6.0 <bar> Message accepted for delivery
# * Summary response (--deliver=summary), once for each recipient:
# X-DSPAM-Result: bar; result="Spam"; class="Spam"; \
# probability=1.0000; confidence=0.85; \
# signature=50c50c0f315636261418125
# (after the last summary line, a single dot is sent)
# * Stdout response (--delivery=stdout), once for each recipient:
# X-Daemon-Classification: INNOCENT
# <complete mail body>
#
# Note that when an unknown recipient is passed in, DSPAM will simply
# deliver the message (dspam.conf: (Un)TrustedDeliveryAgent,
# DeliveryHost) unaltered and unfiltered. The response for unknown
# recipients will still be something indicating 'accepted'.
peek = self._peek(24)
if peek.startswith('250'):
# Response is LTMP formatted
regex = re.compile('250 \d\.\d\.\d <([^>]+)>')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
self.results[rcpt] = {'accepted': True}
logger.debug(
'Message accepted for recipient {} in LMTP mode'.format(
rcpt))
if len(self.results) == len(self._recipients):
finished = True
elif peek.startswith('X-DSPAM-Result:'):
# Response is in summary format
regex = re.compile('X-DSPAM-Result: ([^;]+); result="(\w+)"; '
'class="(\w+)"; probability=([\d\.]+); '
'confidence=([\d\.]+); signature=([\w,/]+)')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
# map results to their DSPAM classification result names
fields = ('user', 'result', 'class',
'probability', 'confidence', 'signature')
self.results[rcpt] = dict(zip(fields, match.groups()))
if self.results[rcpt]['signature'] == 'N/A':
del(self.results[rcpt]['signature'])
logger.debug(
'Message handled for recipient {} in DLMTP summary mode, '
'result is {}'.format(rcpt, match.group(2)))
if len(self.results) == len(self._recipients):
# we received responses for all accepted recipients
finished = True
# read final dot
resp = self._read()
if resp != '.':
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
elif peek.startswith('X-Daemon-Classification:'):
# Response is in stdout format
finished = False
message = ''
while not finished:
resp = self._read()
if resp.startswith('X-Daemon-Classification:'):
if message != '':
# A new message body starts, store the previous one
rcpt = self._recipients.pop(0)
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message handled for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
message = ''
# Remember next result
result = resp[25:]
elif resp == '.':
# A single dot can signal end-of-data, or might be just
# regular mail data.
self._socket.setblocking(False)
try:
# If _peek() succeeds, we did not reach end-of-data yet
# so it was message content.
peek = self._peek(1)
message = message + '\r\n' + resp
except socket.error:
# reached end-of-data, store message and finish
finished = True
rcpt = self._recipients.pop(0)
# strip final newline
message = message[0:-2]
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message accepted for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
self._socket.setblocking(True)
else:
# regular message data
if message == '':
message = resp
else:
message = message + '\r\n' + resp
else:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
def rset(self):
"""
Send LMTP RSET command and process the server response.
"""
self._send('RSET\r\n')
resp = self._read()
if not resp.startswith('250'):
logger.warn('Unexpected server response at RSET: ' + resp)
self._recipients = []
self.results = {}
def quit(self):
"""
Send LMTP QUIT command, read the server response and disconnect.
"""
self._send('QUIT\r\n')
resp = self._read()
if not resp.startswith('221'):
logger.warning('Unexpected server response at QUIT: ' + resp)
self._socket.close()
self._socket = None
self._recipients = []
self.results = {}
def process(self, message, user):
"""
Process a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--process --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def classify(self, message, user):
"""
Classify a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--classify --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def train(self, message, user, class_):
"""
Train DSPAM with a message.
"""
raise NotImplementedError
def retrain_message(self, message, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
def retrain_signature(self, signature, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
|
whyscream/dspam-milter | dspam/client.py | DspamClient._peek | python | def _peek(self, chars=1):
line = self._socket.recv(chars, socket.MSG_PEEK)
logger.debug('Server sent (peek): ' + line.rstrip())
return line | Peek at the data in the server response.
Peeking should only be done when the response can be predicted.
Make sure that the socket will not block by requesting too
much data from it while peeking.
Args:
chars -- the number of characters to peek. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/client.py#L132-L146 | null | class DspamClient(object):
"""
A DSPAM client can be used to interact with a DSPAM server.
The client is able to speak to a DSPAM server over both a TCP or UNIX
domain socket exposed by a running DSPAM server, and interact with it
through its supported protocols: LMTP and DLMTP. The latter is an
enhanced version of LMTP to facilitate some options that are not possible
when using strict LMTP.
Some common DSPAM operations are included in this class, custom
operations can be built by creating a new LMTP dialog with the
low-level LMTP commands.
DSPAM server setup
==================
To use the client to speak with a DSPAM server, the server must be
configured to expose a TCP (dspam.conf: ServerHost, ServerPort) or
UNIX domain socket (dspam.conf: ServerDomainSocketPath).
The server can support mulitple modes (dspam.conf: ServerMode) for
interaction with connecting clients. Which mode you need, depends on
the operations you need to perform. Most of the time you'll want to
use DLMTP though, which means that you'll also need to setup
authentication (dspam.conf: ServerPass.<ident>).
Python DspamClient setup
========================
Each DspamClient instance needs to talk to a DSPAM server.
You need to specify the socket where DSPAM is listening when creating
a new instance. If you need to use DLMTP features (probably most of the
time), you also need to pass the ident and password.
"""
# Default configuration
socket = 'inet:24@localhost'
dlmtp_ident = None
dlmtp_pass = None
def __init__(self, socket=None, dlmtp_ident=None, dlmtp_pass=None):
"""
Initialize new DSPAM client.
The socket specifies where DSPAM is listening. Specify it in the form:
unix:PATH or inet:PORT[@HOST]. For example, the default UNIX domain
socket in dspam.conf would look like: unix:/var/run/dspam/dspam.sock,
and the default TCP socket: inet:24@localhost.
Args:
socket -- The socket on which DSPAM is listening.
dlmtp_ident -- The authentication identifier.
dlmtp_pass -- The authentication password.
"""
if socket is not None:
self.socket = socket
if dlmtp_ident is not None:
self.dlmtp_ident = dlmtp_ident
if dlmtp_pass is not None:
self.dlmtp_pass = dlmtp_pass
self.dlmtp = False
self.results = {}
# Some internal structures
self._socket = None
self._recipients = []
def __del__(self):
"""
Destroy the DSPAM client object.
"""
if self._socket:
self.quit()
def _send(self, line):
"""
Write a line of data to the server.
Args:
line -- A single line of data to write to the socket.
"""
if not line.endswith('\r\n'):
if line.endswith('\n'):
logger.debug('Fixing bare LF before sending data to socket')
line = line[0:-1] + '\r\n'
else:
logger.debug(
'Fixing missing CRLF before sending data to socket')
line = line + '\r\n'
logger.debug('Client sent: ' + line.rstrip())
self._socket.send(line)
def _read(self):
"""
Read a single response line from the server.
"""
line = ''
finished = False
while not finished:
char = self._socket.recv(1)
if char == '':
return ''
elif char == '\r':
continue
elif char == '\n':
finished = True
continue
else:
line = line + char
logger.debug('Server sent: ' + line.rstrip())
return line
def connect(self):
"""
Connect to TCP or domain socket, and process the server LMTP greeting.
"""
# extract proto from socket setting
try:
(proto, spec) = self.socket.split(':')
except ValueError:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'no proto found: ' + self.socket)
if proto == 'unix':
# connect to UNIX domain socket
try:
self._socket = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(spec)
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server '
'at socket {}: {}'.format(spec, err))
logger.debug('Connected to DSPAM server at socket {}'.format(spec))
elif proto == 'inet' or proto == 'inet6':
# connect to TCP socket
try:
(port, host) = spec.split('@')
port = int(port)
if host == '':
host = 'localhost'
except ValueError:
port = int(spec)
host = 'localhost'
try:
self._socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server at host {} '
'port {}: {}'.format(host, port, err))
logger.debug(
'Connected to DSPAM server at host {}, port {}'.format(
host, port))
else:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'unknown proto ' + proto)
resp = self._read()
if not resp.startswith('220'):
raise DspamClientError(
'Unexpected server response at connect: ' + resp)
def lhlo(self):
"""
Send LMTP LHLO greeting, and process the server response.
A regular LMTP greeting is sent, and if accepted by the server, the
capabilities it returns are parsed.
DLMTP authentication starts here by announcing the dlmtp_ident in
the LHLO as our hostname. When the ident is accepted and DLMTP
mode is enabled (dspam.conf: ServerMode=dspam|auto), the
DSPAMPROCESSMODE capability is announced by the server.
When this capability is detected, the <DspamClient>.dlmtp flag
will be enabled.
"""
if self.dlmtp_ident is not None:
host = self.dlmtp_ident
else:
host = socket.getfqdn()
self._send('LHLO ' + host + '\r\n')
finished = False
while not finished:
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at LHLO: ' + resp)
if resp[4:20] == 'DSPAMPROCESSMODE':
self.dlmtp = True
logger.debug('Detected DLMTP extension in LHLO response')
if resp[3] == ' ':
# difference between "250-8BITMIME" and "250 SIZE"
finished = True
def mailfrom(self, sender=None, client_args=None):
"""
Send LMTP MAIL FROM command, and process the server response.
In DLMTP mode, the server expects the client to identify itself.
Because the envelope sender is of no importance to DSPAM, the client
is expected to send an identity and a password (dspam.conf:
ServerPass.<ident>="<password>") in stead of the actual sender.
When you need want DSPAM to deliver the message itself and need to
pass the server an actual envelope sender for that, add the
--mail-from parameter in client_args.
When the server is setup in LMTP mode only (dspam.conf:
ServerMode=standard), the envelope sender is a regular envelope
sender, and is re-used when delivering the message after processing.
Client args
===========
When in DLMTP mode (and with proper auth credentials), the server
accepts parameters specified by the client. These are in the form
as they are passed to the command-line 'dspam' program.
See man dspam(1) for details, and the process() or classify() methods
in this class for simple examples.
Args:
sender -- The envelope sender to use in LMTP mode.
client_args -- DSPAM parameters to pass to the server in DLMTP mode.
"""
if sender and client_args:
raise DspamClientError('Arguments are mutually exclusive')
if client_args and not self.dlmtp:
raise DspamClientError(
'Cannot send client args, server does not support DLMTP')
command = 'MAIL FROM:'
if not sender:
if self.dlmtp_ident and self.dlmtp_pass:
sender = self.dlmtp_pass + '@' + self.dlmtp_ident
else:
sender = ''
command = command + '<' + sender + '>'
if client_args:
command = command + ' DSPAMPROCESSMODE="{}"'.format(client_args)
self._send(command + '\r\n')
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at MAIL FROM: ' + resp)
def rcptto(self, recipients):
"""
Send LMTP RCPT TO command, and process the server response.
The DSPAM server expects to find one or more valid DSPAM users as
envelope recipients. The set recipient will be the user DSPAM
processes mail for.
When you need want DSPAM to deliver the message itself, and need to
pass the server an envelope recipient for this that differs from the
DSPAM user account name, use the --rcpt-to parameter in client_args
at mailfrom().
args:
recipients -- A list of recipients
"""
for rcpt in recipients:
self._send('RCPT TO:<{}>\r\n'.format(rcpt))
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at RCPT TO for '
'recipient {}: {}'.format(rcpt, resp))
self._recipients.append(rcpt)
def data(self, message):
"""
Send LMTP DATA command and process the server response.
The server response is stored as a list of dicts in
<DspamClient>.results, keyed on the recipient name(s). Depending
on the server return data, different formats are available:
* LMTP mode -- Dict containing 'accepted', a bool indicating
that the message was handed to the server.
* Summary mode -- Dict containing 'username', 'result',
'classification', 'probability', 'confidence'
and 'signature'.
* Stdout mode -- Dict containing 'result' and 'message', the
complete message payload including added headers.
The return data is always parsed and stored, independent of its format.
If you requested a regular LMTP response, but the server
responded with an DLMTP summary, the summary is still stored in
<DspamClient>.results, and you will need to check the result format
yourself and decide whether that was acceptable for your use case.
This is due to the fact that it's possible to configure the server to
return non LMTP responses, even when in LMTP mode (see dspam.conf:
ServerParameters).
Note: while processing response data in stdout mode, it's not possible
to relate the returned messages to a specific recipient, when multiple
recipients were specified in rcptto(). There is no guarantee
that the message stored in <DspamClient>.results['foo'] actually
belongs to the recipient 'foo'. If this relationship needs to be
guaranteed, send each message with a single recipient in rcptto().
args:
message -- The full message payload to pass to the server.
"""
self._send('DATA\r\n')
resp = self._read()
if not resp.startswith('354'):
raise DspamClientError(
'Unexpected server response at DATA: ' + resp)
# Send message payload
for line in message.split('\n'):
if line == '.':
# Dot stuffing
line = '..'
self._send(line)
# Send end-of-data
self._send('.\r\n')
# Depending on server configuration, several responses are possible:
# * Standard LMTP response code, once for each recipient:
# 250 2.6.0 <bar> Message accepted for delivery
# * Summary response (--deliver=summary), once for each recipient:
# X-DSPAM-Result: bar; result="Spam"; class="Spam"; \
# probability=1.0000; confidence=0.85; \
# signature=50c50c0f315636261418125
# (after the last summary line, a single dot is sent)
# * Stdout response (--delivery=stdout), once for each recipient:
# X-Daemon-Classification: INNOCENT
# <complete mail body>
#
# Note that when an unknown recipient is passed in, DSPAM will simply
# deliver the message (dspam.conf: (Un)TrustedDeliveryAgent,
# DeliveryHost) unaltered and unfiltered. The response for unknown
# recipients will still be something indicating 'accepted'.
peek = self._peek(24)
if peek.startswith('250'):
# Response is LTMP formatted
regex = re.compile('250 \d\.\d\.\d <([^>]+)>')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
self.results[rcpt] = {'accepted': True}
logger.debug(
'Message accepted for recipient {} in LMTP mode'.format(
rcpt))
if len(self.results) == len(self._recipients):
finished = True
elif peek.startswith('X-DSPAM-Result:'):
# Response is in summary format
regex = re.compile('X-DSPAM-Result: ([^;]+); result="(\w+)"; '
'class="(\w+)"; probability=([\d\.]+); '
'confidence=([\d\.]+); signature=([\w,/]+)')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
# map results to their DSPAM classification result names
fields = ('user', 'result', 'class',
'probability', 'confidence', 'signature')
self.results[rcpt] = dict(zip(fields, match.groups()))
if self.results[rcpt]['signature'] == 'N/A':
del(self.results[rcpt]['signature'])
logger.debug(
'Message handled for recipient {} in DLMTP summary mode, '
'result is {}'.format(rcpt, match.group(2)))
if len(self.results) == len(self._recipients):
# we received responses for all accepted recipients
finished = True
# read final dot
resp = self._read()
if resp != '.':
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
elif peek.startswith('X-Daemon-Classification:'):
# Response is in stdout format
finished = False
message = ''
while not finished:
resp = self._read()
if resp.startswith('X-Daemon-Classification:'):
if message != '':
# A new message body starts, store the previous one
rcpt = self._recipients.pop(0)
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message handled for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
message = ''
# Remember next result
result = resp[25:]
elif resp == '.':
# A single dot can signal end-of-data, or might be just
# regular mail data.
self._socket.setblocking(False)
try:
# If _peek() succeeds, we did not reach end-of-data yet
# so it was message content.
peek = self._peek(1)
message = message + '\r\n' + resp
except socket.error:
# reached end-of-data, store message and finish
finished = True
rcpt = self._recipients.pop(0)
# strip final newline
message = message[0:-2]
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message accepted for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
self._socket.setblocking(True)
else:
# regular message data
if message == '':
message = resp
else:
message = message + '\r\n' + resp
else:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
def rset(self):
"""
Send LMTP RSET command and process the server response.
"""
self._send('RSET\r\n')
resp = self._read()
if not resp.startswith('250'):
logger.warn('Unexpected server response at RSET: ' + resp)
self._recipients = []
self.results = {}
def quit(self):
"""
Send LMTP QUIT command, read the server response and disconnect.
"""
self._send('QUIT\r\n')
resp = self._read()
if not resp.startswith('221'):
logger.warning('Unexpected server response at QUIT: ' + resp)
self._socket.close()
self._socket = None
self._recipients = []
self.results = {}
def process(self, message, user):
"""
Process a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--process --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def classify(self, message, user):
"""
Classify a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--classify --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def train(self, message, user, class_):
"""
Train DSPAM with a message.
"""
raise NotImplementedError
def retrain_message(self, message, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
def retrain_signature(self, signature, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
|
whyscream/dspam-milter | dspam/client.py | DspamClient.connect | python | def connect(self):
# extract proto from socket setting
try:
(proto, spec) = self.socket.split(':')
except ValueError:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'no proto found: ' + self.socket)
if proto == 'unix':
# connect to UNIX domain socket
try:
self._socket = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(spec)
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server '
'at socket {}: {}'.format(spec, err))
logger.debug('Connected to DSPAM server at socket {}'.format(spec))
elif proto == 'inet' or proto == 'inet6':
# connect to TCP socket
try:
(port, host) = spec.split('@')
port = int(port)
if host == '':
host = 'localhost'
except ValueError:
port = int(spec)
host = 'localhost'
try:
self._socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server at host {} '
'port {}: {}'.format(host, port, err))
logger.debug(
'Connected to DSPAM server at host {}, port {}'.format(
host, port))
else:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'unknown proto ' + proto)
resp = self._read()
if not resp.startswith('220'):
raise DspamClientError(
'Unexpected server response at connect: ' + resp) | Connect to TCP or domain socket, and process the server LMTP greeting. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/client.py#L148-L205 | [
"def _read(self):\n \"\"\"\n Read a single response line from the server.\n\n \"\"\"\n line = ''\n finished = False\n while not finished:\n char = self._socket.recv(1)\n if char == '':\n return ''\n elif char == '\\r':\n continue\n elif char == '\\... | class DspamClient(object):
"""
A DSPAM client can be used to interact with a DSPAM server.
The client is able to speak to a DSPAM server over both a TCP or UNIX
domain socket exposed by a running DSPAM server, and interact with it
through its supported protocols: LMTP and DLMTP. The latter is an
enhanced version of LMTP to facilitate some options that are not possible
when using strict LMTP.
Some common DSPAM operations are included in this class, custom
operations can be built by creating a new LMTP dialog with the
low-level LMTP commands.
DSPAM server setup
==================
To use the client to speak with a DSPAM server, the server must be
configured to expose a TCP (dspam.conf: ServerHost, ServerPort) or
UNIX domain socket (dspam.conf: ServerDomainSocketPath).
The server can support mulitple modes (dspam.conf: ServerMode) for
interaction with connecting clients. Which mode you need, depends on
the operations you need to perform. Most of the time you'll want to
use DLMTP though, which means that you'll also need to setup
authentication (dspam.conf: ServerPass.<ident>).
Python DspamClient setup
========================
Each DspamClient instance needs to talk to a DSPAM server.
You need to specify the socket where DSPAM is listening when creating
a new instance. If you need to use DLMTP features (probably most of the
time), you also need to pass the ident and password.
"""
# Default configuration
socket = 'inet:24@localhost'
dlmtp_ident = None
dlmtp_pass = None
def __init__(self, socket=None, dlmtp_ident=None, dlmtp_pass=None):
"""
Initialize new DSPAM client.
The socket specifies where DSPAM is listening. Specify it in the form:
unix:PATH or inet:PORT[@HOST]. For example, the default UNIX domain
socket in dspam.conf would look like: unix:/var/run/dspam/dspam.sock,
and the default TCP socket: inet:24@localhost.
Args:
socket -- The socket on which DSPAM is listening.
dlmtp_ident -- The authentication identifier.
dlmtp_pass -- The authentication password.
"""
if socket is not None:
self.socket = socket
if dlmtp_ident is not None:
self.dlmtp_ident = dlmtp_ident
if dlmtp_pass is not None:
self.dlmtp_pass = dlmtp_pass
self.dlmtp = False
self.results = {}
# Some internal structures
self._socket = None
self._recipients = []
def __del__(self):
"""
Destroy the DSPAM client object.
"""
if self._socket:
self.quit()
def _send(self, line):
"""
Write a line of data to the server.
Args:
line -- A single line of data to write to the socket.
"""
if not line.endswith('\r\n'):
if line.endswith('\n'):
logger.debug('Fixing bare LF before sending data to socket')
line = line[0:-1] + '\r\n'
else:
logger.debug(
'Fixing missing CRLF before sending data to socket')
line = line + '\r\n'
logger.debug('Client sent: ' + line.rstrip())
self._socket.send(line)
def _read(self):
"""
Read a single response line from the server.
"""
line = ''
finished = False
while not finished:
char = self._socket.recv(1)
if char == '':
return ''
elif char == '\r':
continue
elif char == '\n':
finished = True
continue
else:
line = line + char
logger.debug('Server sent: ' + line.rstrip())
return line
def _peek(self, chars=1):
"""
Peek at the data in the server response.
Peeking should only be done when the response can be predicted.
Make sure that the socket will not block by requesting too
much data from it while peeking.
Args:
chars -- the number of characters to peek.
"""
line = self._socket.recv(chars, socket.MSG_PEEK)
logger.debug('Server sent (peek): ' + line.rstrip())
return line
def lhlo(self):
"""
Send LMTP LHLO greeting, and process the server response.
A regular LMTP greeting is sent, and if accepted by the server, the
capabilities it returns are parsed.
DLMTP authentication starts here by announcing the dlmtp_ident in
the LHLO as our hostname. When the ident is accepted and DLMTP
mode is enabled (dspam.conf: ServerMode=dspam|auto), the
DSPAMPROCESSMODE capability is announced by the server.
When this capability is detected, the <DspamClient>.dlmtp flag
will be enabled.
"""
if self.dlmtp_ident is not None:
host = self.dlmtp_ident
else:
host = socket.getfqdn()
self._send('LHLO ' + host + '\r\n')
finished = False
while not finished:
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at LHLO: ' + resp)
if resp[4:20] == 'DSPAMPROCESSMODE':
self.dlmtp = True
logger.debug('Detected DLMTP extension in LHLO response')
if resp[3] == ' ':
# difference between "250-8BITMIME" and "250 SIZE"
finished = True
def mailfrom(self, sender=None, client_args=None):
"""
Send LMTP MAIL FROM command, and process the server response.
In DLMTP mode, the server expects the client to identify itself.
Because the envelope sender is of no importance to DSPAM, the client
is expected to send an identity and a password (dspam.conf:
ServerPass.<ident>="<password>") in stead of the actual sender.
When you need want DSPAM to deliver the message itself and need to
pass the server an actual envelope sender for that, add the
--mail-from parameter in client_args.
When the server is setup in LMTP mode only (dspam.conf:
ServerMode=standard), the envelope sender is a regular envelope
sender, and is re-used when delivering the message after processing.
Client args
===========
When in DLMTP mode (and with proper auth credentials), the server
accepts parameters specified by the client. These are in the form
as they are passed to the command-line 'dspam' program.
See man dspam(1) for details, and the process() or classify() methods
in this class for simple examples.
Args:
sender -- The envelope sender to use in LMTP mode.
client_args -- DSPAM parameters to pass to the server in DLMTP mode.
"""
if sender and client_args:
raise DspamClientError('Arguments are mutually exclusive')
if client_args and not self.dlmtp:
raise DspamClientError(
'Cannot send client args, server does not support DLMTP')
command = 'MAIL FROM:'
if not sender:
if self.dlmtp_ident and self.dlmtp_pass:
sender = self.dlmtp_pass + '@' + self.dlmtp_ident
else:
sender = ''
command = command + '<' + sender + '>'
if client_args:
command = command + ' DSPAMPROCESSMODE="{}"'.format(client_args)
self._send(command + '\r\n')
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at MAIL FROM: ' + resp)
def rcptto(self, recipients):
"""
Send LMTP RCPT TO command, and process the server response.
The DSPAM server expects to find one or more valid DSPAM users as
envelope recipients. The set recipient will be the user DSPAM
processes mail for.
When you need want DSPAM to deliver the message itself, and need to
pass the server an envelope recipient for this that differs from the
DSPAM user account name, use the --rcpt-to parameter in client_args
at mailfrom().
args:
recipients -- A list of recipients
"""
for rcpt in recipients:
self._send('RCPT TO:<{}>\r\n'.format(rcpt))
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at RCPT TO for '
'recipient {}: {}'.format(rcpt, resp))
self._recipients.append(rcpt)
def data(self, message):
"""
Send LMTP DATA command and process the server response.
The server response is stored as a list of dicts in
<DspamClient>.results, keyed on the recipient name(s). Depending
on the server return data, different formats are available:
* LMTP mode -- Dict containing 'accepted', a bool indicating
that the message was handed to the server.
* Summary mode -- Dict containing 'username', 'result',
'classification', 'probability', 'confidence'
and 'signature'.
* Stdout mode -- Dict containing 'result' and 'message', the
complete message payload including added headers.
The return data is always parsed and stored, independent of its format.
If you requested a regular LMTP response, but the server
responded with an DLMTP summary, the summary is still stored in
<DspamClient>.results, and you will need to check the result format
yourself and decide whether that was acceptable for your use case.
This is due to the fact that it's possible to configure the server to
return non LMTP responses, even when in LMTP mode (see dspam.conf:
ServerParameters).
Note: while processing response data in stdout mode, it's not possible
to relate the returned messages to a specific recipient, when multiple
recipients were specified in rcptto(). There is no guarantee
that the message stored in <DspamClient>.results['foo'] actually
belongs to the recipient 'foo'. If this relationship needs to be
guaranteed, send each message with a single recipient in rcptto().
args:
message -- The full message payload to pass to the server.
"""
self._send('DATA\r\n')
resp = self._read()
if not resp.startswith('354'):
raise DspamClientError(
'Unexpected server response at DATA: ' + resp)
# Send message payload
for line in message.split('\n'):
if line == '.':
# Dot stuffing
line = '..'
self._send(line)
# Send end-of-data
self._send('.\r\n')
# Depending on server configuration, several responses are possible:
# * Standard LMTP response code, once for each recipient:
# 250 2.6.0 <bar> Message accepted for delivery
# * Summary response (--deliver=summary), once for each recipient:
# X-DSPAM-Result: bar; result="Spam"; class="Spam"; \
# probability=1.0000; confidence=0.85; \
# signature=50c50c0f315636261418125
# (after the last summary line, a single dot is sent)
# * Stdout response (--delivery=stdout), once for each recipient:
# X-Daemon-Classification: INNOCENT
# <complete mail body>
#
# Note that when an unknown recipient is passed in, DSPAM will simply
# deliver the message (dspam.conf: (Un)TrustedDeliveryAgent,
# DeliveryHost) unaltered and unfiltered. The response for unknown
# recipients will still be something indicating 'accepted'.
peek = self._peek(24)
if peek.startswith('250'):
# Response is LTMP formatted
regex = re.compile('250 \d\.\d\.\d <([^>]+)>')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
self.results[rcpt] = {'accepted': True}
logger.debug(
'Message accepted for recipient {} in LMTP mode'.format(
rcpt))
if len(self.results) == len(self._recipients):
finished = True
elif peek.startswith('X-DSPAM-Result:'):
# Response is in summary format
regex = re.compile('X-DSPAM-Result: ([^;]+); result="(\w+)"; '
'class="(\w+)"; probability=([\d\.]+); '
'confidence=([\d\.]+); signature=([\w,/]+)')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
# map results to their DSPAM classification result names
fields = ('user', 'result', 'class',
'probability', 'confidence', 'signature')
self.results[rcpt] = dict(zip(fields, match.groups()))
if self.results[rcpt]['signature'] == 'N/A':
del(self.results[rcpt]['signature'])
logger.debug(
'Message handled for recipient {} in DLMTP summary mode, '
'result is {}'.format(rcpt, match.group(2)))
if len(self.results) == len(self._recipients):
# we received responses for all accepted recipients
finished = True
# read final dot
resp = self._read()
if resp != '.':
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
elif peek.startswith('X-Daemon-Classification:'):
# Response is in stdout format
finished = False
message = ''
while not finished:
resp = self._read()
if resp.startswith('X-Daemon-Classification:'):
if message != '':
# A new message body starts, store the previous one
rcpt = self._recipients.pop(0)
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message handled for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
message = ''
# Remember next result
result = resp[25:]
elif resp == '.':
# A single dot can signal end-of-data, or might be just
# regular mail data.
self._socket.setblocking(False)
try:
# If _peek() succeeds, we did not reach end-of-data yet
# so it was message content.
peek = self._peek(1)
message = message + '\r\n' + resp
except socket.error:
# reached end-of-data, store message and finish
finished = True
rcpt = self._recipients.pop(0)
# strip final newline
message = message[0:-2]
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message accepted for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
self._socket.setblocking(True)
else:
# regular message data
if message == '':
message = resp
else:
message = message + '\r\n' + resp
else:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
def rset(self):
"""
Send LMTP RSET command and process the server response.
"""
self._send('RSET\r\n')
resp = self._read()
if not resp.startswith('250'):
logger.warn('Unexpected server response at RSET: ' + resp)
self._recipients = []
self.results = {}
def quit(self):
"""
Send LMTP QUIT command, read the server response and disconnect.
"""
self._send('QUIT\r\n')
resp = self._read()
if not resp.startswith('221'):
logger.warning('Unexpected server response at QUIT: ' + resp)
self._socket.close()
self._socket = None
self._recipients = []
self.results = {}
def process(self, message, user):
"""
Process a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--process --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def classify(self, message, user):
"""
Classify a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--classify --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def train(self, message, user, class_):
"""
Train DSPAM with a message.
"""
raise NotImplementedError
def retrain_message(self, message, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
def retrain_signature(self, signature, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
|
whyscream/dspam-milter | dspam/client.py | DspamClient.lhlo | python | def lhlo(self):
if self.dlmtp_ident is not None:
host = self.dlmtp_ident
else:
host = socket.getfqdn()
self._send('LHLO ' + host + '\r\n')
finished = False
while not finished:
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at LHLO: ' + resp)
if resp[4:20] == 'DSPAMPROCESSMODE':
self.dlmtp = True
logger.debug('Detected DLMTP extension in LHLO response')
if resp[3] == ' ':
# difference between "250-8BITMIME" and "250 SIZE"
finished = True | Send LMTP LHLO greeting, and process the server response.
A regular LMTP greeting is sent, and if accepted by the server, the
capabilities it returns are parsed.
DLMTP authentication starts here by announcing the dlmtp_ident in
the LHLO as our hostname. When the ident is accepted and DLMTP
mode is enabled (dspam.conf: ServerMode=dspam|auto), the
DSPAMPROCESSMODE capability is announced by the server.
When this capability is detected, the <DspamClient>.dlmtp flag
will be enabled. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/client.py#L207-L239 | [
"def _send(self, line):\n \"\"\"\n Write a line of data to the server.\n\n Args:\n line -- A single line of data to write to the socket.\n\n \"\"\"\n if not line.endswith('\\r\\n'):\n if line.endswith('\\n'):\n logger.debug('Fixing bare LF before sending data to socket')\n ... | class DspamClient(object):
"""
A DSPAM client can be used to interact with a DSPAM server.
The client is able to speak to a DSPAM server over both a TCP or UNIX
domain socket exposed by a running DSPAM server, and interact with it
through its supported protocols: LMTP and DLMTP. The latter is an
enhanced version of LMTP to facilitate some options that are not possible
when using strict LMTP.
Some common DSPAM operations are included in this class, custom
operations can be built by creating a new LMTP dialog with the
low-level LMTP commands.
DSPAM server setup
==================
To use the client to speak with a DSPAM server, the server must be
configured to expose a TCP (dspam.conf: ServerHost, ServerPort) or
UNIX domain socket (dspam.conf: ServerDomainSocketPath).
The server can support mulitple modes (dspam.conf: ServerMode) for
interaction with connecting clients. Which mode you need, depends on
the operations you need to perform. Most of the time you'll want to
use DLMTP though, which means that you'll also need to setup
authentication (dspam.conf: ServerPass.<ident>).
Python DspamClient setup
========================
Each DspamClient instance needs to talk to a DSPAM server.
You need to specify the socket where DSPAM is listening when creating
a new instance. If you need to use DLMTP features (probably most of the
time), you also need to pass the ident and password.
"""
# Default configuration
socket = 'inet:24@localhost'
dlmtp_ident = None
dlmtp_pass = None
def __init__(self, socket=None, dlmtp_ident=None, dlmtp_pass=None):
"""
Initialize new DSPAM client.
The socket specifies where DSPAM is listening. Specify it in the form:
unix:PATH or inet:PORT[@HOST]. For example, the default UNIX domain
socket in dspam.conf would look like: unix:/var/run/dspam/dspam.sock,
and the default TCP socket: inet:24@localhost.
Args:
socket -- The socket on which DSPAM is listening.
dlmtp_ident -- The authentication identifier.
dlmtp_pass -- The authentication password.
"""
if socket is not None:
self.socket = socket
if dlmtp_ident is not None:
self.dlmtp_ident = dlmtp_ident
if dlmtp_pass is not None:
self.dlmtp_pass = dlmtp_pass
self.dlmtp = False
self.results = {}
# Some internal structures
self._socket = None
self._recipients = []
def __del__(self):
"""
Destroy the DSPAM client object.
"""
if self._socket:
self.quit()
def _send(self, line):
"""
Write a line of data to the server.
Args:
line -- A single line of data to write to the socket.
"""
if not line.endswith('\r\n'):
if line.endswith('\n'):
logger.debug('Fixing bare LF before sending data to socket')
line = line[0:-1] + '\r\n'
else:
logger.debug(
'Fixing missing CRLF before sending data to socket')
line = line + '\r\n'
logger.debug('Client sent: ' + line.rstrip())
self._socket.send(line)
def _read(self):
"""
Read a single response line from the server.
"""
line = ''
finished = False
while not finished:
char = self._socket.recv(1)
if char == '':
return ''
elif char == '\r':
continue
elif char == '\n':
finished = True
continue
else:
line = line + char
logger.debug('Server sent: ' + line.rstrip())
return line
def _peek(self, chars=1):
"""
Peek at the data in the server response.
Peeking should only be done when the response can be predicted.
Make sure that the socket will not block by requesting too
much data from it while peeking.
Args:
chars -- the number of characters to peek.
"""
line = self._socket.recv(chars, socket.MSG_PEEK)
logger.debug('Server sent (peek): ' + line.rstrip())
return line
def connect(self):
"""
Connect to TCP or domain socket, and process the server LMTP greeting.
"""
# extract proto from socket setting
try:
(proto, spec) = self.socket.split(':')
except ValueError:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'no proto found: ' + self.socket)
if proto == 'unix':
# connect to UNIX domain socket
try:
self._socket = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(spec)
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server '
'at socket {}: {}'.format(spec, err))
logger.debug('Connected to DSPAM server at socket {}'.format(spec))
elif proto == 'inet' or proto == 'inet6':
# connect to TCP socket
try:
(port, host) = spec.split('@')
port = int(port)
if host == '':
host = 'localhost'
except ValueError:
port = int(spec)
host = 'localhost'
try:
self._socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server at host {} '
'port {}: {}'.format(host, port, err))
logger.debug(
'Connected to DSPAM server at host {}, port {}'.format(
host, port))
else:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'unknown proto ' + proto)
resp = self._read()
if not resp.startswith('220'):
raise DspamClientError(
'Unexpected server response at connect: ' + resp)
def mailfrom(self, sender=None, client_args=None):
"""
Send LMTP MAIL FROM command, and process the server response.
In DLMTP mode, the server expects the client to identify itself.
Because the envelope sender is of no importance to DSPAM, the client
is expected to send an identity and a password (dspam.conf:
ServerPass.<ident>="<password>") in stead of the actual sender.
When you need want DSPAM to deliver the message itself and need to
pass the server an actual envelope sender for that, add the
--mail-from parameter in client_args.
When the server is setup in LMTP mode only (dspam.conf:
ServerMode=standard), the envelope sender is a regular envelope
sender, and is re-used when delivering the message after processing.
Client args
===========
When in DLMTP mode (and with proper auth credentials), the server
accepts parameters specified by the client. These are in the form
as they are passed to the command-line 'dspam' program.
See man dspam(1) for details, and the process() or classify() methods
in this class for simple examples.
Args:
sender -- The envelope sender to use in LMTP mode.
client_args -- DSPAM parameters to pass to the server in DLMTP mode.
"""
if sender and client_args:
raise DspamClientError('Arguments are mutually exclusive')
if client_args and not self.dlmtp:
raise DspamClientError(
'Cannot send client args, server does not support DLMTP')
command = 'MAIL FROM:'
if not sender:
if self.dlmtp_ident and self.dlmtp_pass:
sender = self.dlmtp_pass + '@' + self.dlmtp_ident
else:
sender = ''
command = command + '<' + sender + '>'
if client_args:
command = command + ' DSPAMPROCESSMODE="{}"'.format(client_args)
self._send(command + '\r\n')
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at MAIL FROM: ' + resp)
def rcptto(self, recipients):
"""
Send LMTP RCPT TO command, and process the server response.
The DSPAM server expects to find one or more valid DSPAM users as
envelope recipients. The set recipient will be the user DSPAM
processes mail for.
When you need want DSPAM to deliver the message itself, and need to
pass the server an envelope recipient for this that differs from the
DSPAM user account name, use the --rcpt-to parameter in client_args
at mailfrom().
args:
recipients -- A list of recipients
"""
for rcpt in recipients:
self._send('RCPT TO:<{}>\r\n'.format(rcpt))
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at RCPT TO for '
'recipient {}: {}'.format(rcpt, resp))
self._recipients.append(rcpt)
def data(self, message):
"""
Send LMTP DATA command and process the server response.
The server response is stored as a list of dicts in
<DspamClient>.results, keyed on the recipient name(s). Depending
on the server return data, different formats are available:
* LMTP mode -- Dict containing 'accepted', a bool indicating
that the message was handed to the server.
* Summary mode -- Dict containing 'username', 'result',
'classification', 'probability', 'confidence'
and 'signature'.
* Stdout mode -- Dict containing 'result' and 'message', the
complete message payload including added headers.
The return data is always parsed and stored, independent of its format.
If you requested a regular LMTP response, but the server
responded with an DLMTP summary, the summary is still stored in
<DspamClient>.results, and you will need to check the result format
yourself and decide whether that was acceptable for your use case.
This is due to the fact that it's possible to configure the server to
return non LMTP responses, even when in LMTP mode (see dspam.conf:
ServerParameters).
Note: while processing response data in stdout mode, it's not possible
to relate the returned messages to a specific recipient, when multiple
recipients were specified in rcptto(). There is no guarantee
that the message stored in <DspamClient>.results['foo'] actually
belongs to the recipient 'foo'. If this relationship needs to be
guaranteed, send each message with a single recipient in rcptto().
args:
message -- The full message payload to pass to the server.
"""
self._send('DATA\r\n')
resp = self._read()
if not resp.startswith('354'):
raise DspamClientError(
'Unexpected server response at DATA: ' + resp)
# Send message payload
for line in message.split('\n'):
if line == '.':
# Dot stuffing
line = '..'
self._send(line)
# Send end-of-data
self._send('.\r\n')
# Depending on server configuration, several responses are possible:
# * Standard LMTP response code, once for each recipient:
# 250 2.6.0 <bar> Message accepted for delivery
# * Summary response (--deliver=summary), once for each recipient:
# X-DSPAM-Result: bar; result="Spam"; class="Spam"; \
# probability=1.0000; confidence=0.85; \
# signature=50c50c0f315636261418125
# (after the last summary line, a single dot is sent)
# * Stdout response (--delivery=stdout), once for each recipient:
# X-Daemon-Classification: INNOCENT
# <complete mail body>
#
# Note that when an unknown recipient is passed in, DSPAM will simply
# deliver the message (dspam.conf: (Un)TrustedDeliveryAgent,
# DeliveryHost) unaltered and unfiltered. The response for unknown
# recipients will still be something indicating 'accepted'.
peek = self._peek(24)
if peek.startswith('250'):
# Response is LTMP formatted
regex = re.compile('250 \d\.\d\.\d <([^>]+)>')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
self.results[rcpt] = {'accepted': True}
logger.debug(
'Message accepted for recipient {} in LMTP mode'.format(
rcpt))
if len(self.results) == len(self._recipients):
finished = True
elif peek.startswith('X-DSPAM-Result:'):
# Response is in summary format
regex = re.compile('X-DSPAM-Result: ([^;]+); result="(\w+)"; '
'class="(\w+)"; probability=([\d\.]+); '
'confidence=([\d\.]+); signature=([\w,/]+)')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
# map results to their DSPAM classification result names
fields = ('user', 'result', 'class',
'probability', 'confidence', 'signature')
self.results[rcpt] = dict(zip(fields, match.groups()))
if self.results[rcpt]['signature'] == 'N/A':
del(self.results[rcpt]['signature'])
logger.debug(
'Message handled for recipient {} in DLMTP summary mode, '
'result is {}'.format(rcpt, match.group(2)))
if len(self.results) == len(self._recipients):
# we received responses for all accepted recipients
finished = True
# read final dot
resp = self._read()
if resp != '.':
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
elif peek.startswith('X-Daemon-Classification:'):
# Response is in stdout format
finished = False
message = ''
while not finished:
resp = self._read()
if resp.startswith('X-Daemon-Classification:'):
if message != '':
# A new message body starts, store the previous one
rcpt = self._recipients.pop(0)
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message handled for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
message = ''
# Remember next result
result = resp[25:]
elif resp == '.':
# A single dot can signal end-of-data, or might be just
# regular mail data.
self._socket.setblocking(False)
try:
# If _peek() succeeds, we did not reach end-of-data yet
# so it was message content.
peek = self._peek(1)
message = message + '\r\n' + resp
except socket.error:
# reached end-of-data, store message and finish
finished = True
rcpt = self._recipients.pop(0)
# strip final newline
message = message[0:-2]
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message accepted for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
self._socket.setblocking(True)
else:
# regular message data
if message == '':
message = resp
else:
message = message + '\r\n' + resp
else:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
def rset(self):
"""
Send LMTP RSET command and process the server response.
"""
self._send('RSET\r\n')
resp = self._read()
if not resp.startswith('250'):
logger.warn('Unexpected server response at RSET: ' + resp)
self._recipients = []
self.results = {}
def quit(self):
"""
Send LMTP QUIT command, read the server response and disconnect.
"""
self._send('QUIT\r\n')
resp = self._read()
if not resp.startswith('221'):
logger.warning('Unexpected server response at QUIT: ' + resp)
self._socket.close()
self._socket = None
self._recipients = []
self.results = {}
def process(self, message, user):
"""
Process a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--process --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def classify(self, message, user):
"""
Classify a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--classify --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def train(self, message, user, class_):
"""
Train DSPAM with a message.
"""
raise NotImplementedError
def retrain_message(self, message, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
def retrain_signature(self, signature, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
|
whyscream/dspam-milter | dspam/client.py | DspamClient.mailfrom | python | def mailfrom(self, sender=None, client_args=None):
if sender and client_args:
raise DspamClientError('Arguments are mutually exclusive')
if client_args and not self.dlmtp:
raise DspamClientError(
'Cannot send client args, server does not support DLMTP')
command = 'MAIL FROM:'
if not sender:
if self.dlmtp_ident and self.dlmtp_pass:
sender = self.dlmtp_pass + '@' + self.dlmtp_ident
else:
sender = ''
command = command + '<' + sender + '>'
if client_args:
command = command + ' DSPAMPROCESSMODE="{}"'.format(client_args)
self._send(command + '\r\n')
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at MAIL FROM: ' + resp) | Send LMTP MAIL FROM command, and process the server response.
In DLMTP mode, the server expects the client to identify itself.
Because the envelope sender is of no importance to DSPAM, the client
is expected to send an identity and a password (dspam.conf:
ServerPass.<ident>="<password>") in stead of the actual sender.
When you need want DSPAM to deliver the message itself and need to
pass the server an actual envelope sender for that, add the
--mail-from parameter in client_args.
When the server is setup in LMTP mode only (dspam.conf:
ServerMode=standard), the envelope sender is a regular envelope
sender, and is re-used when delivering the message after processing.
Client args
===========
When in DLMTP mode (and with proper auth credentials), the server
accepts parameters specified by the client. These are in the form
as they are passed to the command-line 'dspam' program.
See man dspam(1) for details, and the process() or classify() methods
in this class for simple examples.
Args:
sender -- The envelope sender to use in LMTP mode.
client_args -- DSPAM parameters to pass to the server in DLMTP mode. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/client.py#L241-L293 | [
"def _send(self, line):\n \"\"\"\n Write a line of data to the server.\n\n Args:\n line -- A single line of data to write to the socket.\n\n \"\"\"\n if not line.endswith('\\r\\n'):\n if line.endswith('\\n'):\n logger.debug('Fixing bare LF before sending data to socket')\n ... | class DspamClient(object):
"""
A DSPAM client can be used to interact with a DSPAM server.
The client is able to speak to a DSPAM server over both a TCP or UNIX
domain socket exposed by a running DSPAM server, and interact with it
through its supported protocols: LMTP and DLMTP. The latter is an
enhanced version of LMTP to facilitate some options that are not possible
when using strict LMTP.
Some common DSPAM operations are included in this class, custom
operations can be built by creating a new LMTP dialog with the
low-level LMTP commands.
DSPAM server setup
==================
To use the client to speak with a DSPAM server, the server must be
configured to expose a TCP (dspam.conf: ServerHost, ServerPort) or
UNIX domain socket (dspam.conf: ServerDomainSocketPath).
The server can support mulitple modes (dspam.conf: ServerMode) for
interaction with connecting clients. Which mode you need, depends on
the operations you need to perform. Most of the time you'll want to
use DLMTP though, which means that you'll also need to setup
authentication (dspam.conf: ServerPass.<ident>).
Python DspamClient setup
========================
Each DspamClient instance needs to talk to a DSPAM server.
You need to specify the socket where DSPAM is listening when creating
a new instance. If you need to use DLMTP features (probably most of the
time), you also need to pass the ident and password.
"""
# Default configuration
socket = 'inet:24@localhost'
dlmtp_ident = None
dlmtp_pass = None
def __init__(self, socket=None, dlmtp_ident=None, dlmtp_pass=None):
"""
Initialize new DSPAM client.
The socket specifies where DSPAM is listening. Specify it in the form:
unix:PATH or inet:PORT[@HOST]. For example, the default UNIX domain
socket in dspam.conf would look like: unix:/var/run/dspam/dspam.sock,
and the default TCP socket: inet:24@localhost.
Args:
socket -- The socket on which DSPAM is listening.
dlmtp_ident -- The authentication identifier.
dlmtp_pass -- The authentication password.
"""
if socket is not None:
self.socket = socket
if dlmtp_ident is not None:
self.dlmtp_ident = dlmtp_ident
if dlmtp_pass is not None:
self.dlmtp_pass = dlmtp_pass
self.dlmtp = False
self.results = {}
# Some internal structures
self._socket = None
self._recipients = []
def __del__(self):
"""
Destroy the DSPAM client object.
"""
if self._socket:
self.quit()
def _send(self, line):
"""
Write a line of data to the server.
Args:
line -- A single line of data to write to the socket.
"""
if not line.endswith('\r\n'):
if line.endswith('\n'):
logger.debug('Fixing bare LF before sending data to socket')
line = line[0:-1] + '\r\n'
else:
logger.debug(
'Fixing missing CRLF before sending data to socket')
line = line + '\r\n'
logger.debug('Client sent: ' + line.rstrip())
self._socket.send(line)
def _read(self):
"""
Read a single response line from the server.
"""
line = ''
finished = False
while not finished:
char = self._socket.recv(1)
if char == '':
return ''
elif char == '\r':
continue
elif char == '\n':
finished = True
continue
else:
line = line + char
logger.debug('Server sent: ' + line.rstrip())
return line
def _peek(self, chars=1):
"""
Peek at the data in the server response.
Peeking should only be done when the response can be predicted.
Make sure that the socket will not block by requesting too
much data from it while peeking.
Args:
chars -- the number of characters to peek.
"""
line = self._socket.recv(chars, socket.MSG_PEEK)
logger.debug('Server sent (peek): ' + line.rstrip())
return line
def connect(self):
"""
Connect to TCP or domain socket, and process the server LMTP greeting.
"""
# extract proto from socket setting
try:
(proto, spec) = self.socket.split(':')
except ValueError:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'no proto found: ' + self.socket)
if proto == 'unix':
# connect to UNIX domain socket
try:
self._socket = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(spec)
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server '
'at socket {}: {}'.format(spec, err))
logger.debug('Connected to DSPAM server at socket {}'.format(spec))
elif proto == 'inet' or proto == 'inet6':
# connect to TCP socket
try:
(port, host) = spec.split('@')
port = int(port)
if host == '':
host = 'localhost'
except ValueError:
port = int(spec)
host = 'localhost'
try:
self._socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server at host {} '
'port {}: {}'.format(host, port, err))
logger.debug(
'Connected to DSPAM server at host {}, port {}'.format(
host, port))
else:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'unknown proto ' + proto)
resp = self._read()
if not resp.startswith('220'):
raise DspamClientError(
'Unexpected server response at connect: ' + resp)
def lhlo(self):
"""
Send LMTP LHLO greeting, and process the server response.
A regular LMTP greeting is sent, and if accepted by the server, the
capabilities it returns are parsed.
DLMTP authentication starts here by announcing the dlmtp_ident in
the LHLO as our hostname. When the ident is accepted and DLMTP
mode is enabled (dspam.conf: ServerMode=dspam|auto), the
DSPAMPROCESSMODE capability is announced by the server.
When this capability is detected, the <DspamClient>.dlmtp flag
will be enabled.
"""
if self.dlmtp_ident is not None:
host = self.dlmtp_ident
else:
host = socket.getfqdn()
self._send('LHLO ' + host + '\r\n')
finished = False
while not finished:
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at LHLO: ' + resp)
if resp[4:20] == 'DSPAMPROCESSMODE':
self.dlmtp = True
logger.debug('Detected DLMTP extension in LHLO response')
if resp[3] == ' ':
# difference between "250-8BITMIME" and "250 SIZE"
finished = True
def rcptto(self, recipients):
"""
Send LMTP RCPT TO command, and process the server response.
The DSPAM server expects to find one or more valid DSPAM users as
envelope recipients. The set recipient will be the user DSPAM
processes mail for.
When you need want DSPAM to deliver the message itself, and need to
pass the server an envelope recipient for this that differs from the
DSPAM user account name, use the --rcpt-to parameter in client_args
at mailfrom().
args:
recipients -- A list of recipients
"""
for rcpt in recipients:
self._send('RCPT TO:<{}>\r\n'.format(rcpt))
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at RCPT TO for '
'recipient {}: {}'.format(rcpt, resp))
self._recipients.append(rcpt)
def data(self, message):
"""
Send LMTP DATA command and process the server response.
The server response is stored as a list of dicts in
<DspamClient>.results, keyed on the recipient name(s). Depending
on the server return data, different formats are available:
* LMTP mode -- Dict containing 'accepted', a bool indicating
that the message was handed to the server.
* Summary mode -- Dict containing 'username', 'result',
'classification', 'probability', 'confidence'
and 'signature'.
* Stdout mode -- Dict containing 'result' and 'message', the
complete message payload including added headers.
The return data is always parsed and stored, independent of its format.
If you requested a regular LMTP response, but the server
responded with an DLMTP summary, the summary is still stored in
<DspamClient>.results, and you will need to check the result format
yourself and decide whether that was acceptable for your use case.
This is due to the fact that it's possible to configure the server to
return non LMTP responses, even when in LMTP mode (see dspam.conf:
ServerParameters).
Note: while processing response data in stdout mode, it's not possible
to relate the returned messages to a specific recipient, when multiple
recipients were specified in rcptto(). There is no guarantee
that the message stored in <DspamClient>.results['foo'] actually
belongs to the recipient 'foo'. If this relationship needs to be
guaranteed, send each message with a single recipient in rcptto().
args:
message -- The full message payload to pass to the server.
"""
self._send('DATA\r\n')
resp = self._read()
if not resp.startswith('354'):
raise DspamClientError(
'Unexpected server response at DATA: ' + resp)
# Send message payload
for line in message.split('\n'):
if line == '.':
# Dot stuffing
line = '..'
self._send(line)
# Send end-of-data
self._send('.\r\n')
# Depending on server configuration, several responses are possible:
# * Standard LMTP response code, once for each recipient:
# 250 2.6.0 <bar> Message accepted for delivery
# * Summary response (--deliver=summary), once for each recipient:
# X-DSPAM-Result: bar; result="Spam"; class="Spam"; \
# probability=1.0000; confidence=0.85; \
# signature=50c50c0f315636261418125
# (after the last summary line, a single dot is sent)
# * Stdout response (--delivery=stdout), once for each recipient:
# X-Daemon-Classification: INNOCENT
# <complete mail body>
#
# Note that when an unknown recipient is passed in, DSPAM will simply
# deliver the message (dspam.conf: (Un)TrustedDeliveryAgent,
# DeliveryHost) unaltered and unfiltered. The response for unknown
# recipients will still be something indicating 'accepted'.
peek = self._peek(24)
if peek.startswith('250'):
# Response is LTMP formatted
regex = re.compile('250 \d\.\d\.\d <([^>]+)>')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
self.results[rcpt] = {'accepted': True}
logger.debug(
'Message accepted for recipient {} in LMTP mode'.format(
rcpt))
if len(self.results) == len(self._recipients):
finished = True
elif peek.startswith('X-DSPAM-Result:'):
# Response is in summary format
regex = re.compile('X-DSPAM-Result: ([^;]+); result="(\w+)"; '
'class="(\w+)"; probability=([\d\.]+); '
'confidence=([\d\.]+); signature=([\w,/]+)')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
# map results to their DSPAM classification result names
fields = ('user', 'result', 'class',
'probability', 'confidence', 'signature')
self.results[rcpt] = dict(zip(fields, match.groups()))
if self.results[rcpt]['signature'] == 'N/A':
del(self.results[rcpt]['signature'])
logger.debug(
'Message handled for recipient {} in DLMTP summary mode, '
'result is {}'.format(rcpt, match.group(2)))
if len(self.results) == len(self._recipients):
# we received responses for all accepted recipients
finished = True
# read final dot
resp = self._read()
if resp != '.':
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
elif peek.startswith('X-Daemon-Classification:'):
# Response is in stdout format
finished = False
message = ''
while not finished:
resp = self._read()
if resp.startswith('X-Daemon-Classification:'):
if message != '':
# A new message body starts, store the previous one
rcpt = self._recipients.pop(0)
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message handled for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
message = ''
# Remember next result
result = resp[25:]
elif resp == '.':
# A single dot can signal end-of-data, or might be just
# regular mail data.
self._socket.setblocking(False)
try:
# If _peek() succeeds, we did not reach end-of-data yet
# so it was message content.
peek = self._peek(1)
message = message + '\r\n' + resp
except socket.error:
# reached end-of-data, store message and finish
finished = True
rcpt = self._recipients.pop(0)
# strip final newline
message = message[0:-2]
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message accepted for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
self._socket.setblocking(True)
else:
# regular message data
if message == '':
message = resp
else:
message = message + '\r\n' + resp
else:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
def rset(self):
"""
Send LMTP RSET command and process the server response.
"""
self._send('RSET\r\n')
resp = self._read()
if not resp.startswith('250'):
logger.warn('Unexpected server response at RSET: ' + resp)
self._recipients = []
self.results = {}
def quit(self):
"""
Send LMTP QUIT command, read the server response and disconnect.
"""
self._send('QUIT\r\n')
resp = self._read()
if not resp.startswith('221'):
logger.warning('Unexpected server response at QUIT: ' + resp)
self._socket.close()
self._socket = None
self._recipients = []
self.results = {}
def process(self, message, user):
"""
Process a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--process --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def classify(self, message, user):
"""
Classify a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--classify --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def train(self, message, user, class_):
"""
Train DSPAM with a message.
"""
raise NotImplementedError
def retrain_message(self, message, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
def retrain_signature(self, signature, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
|
whyscream/dspam-milter | dspam/client.py | DspamClient.rcptto | python | def rcptto(self, recipients):
for rcpt in recipients:
self._send('RCPT TO:<{}>\r\n'.format(rcpt))
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at RCPT TO for '
'recipient {}: {}'.format(rcpt, resp))
self._recipients.append(rcpt) | Send LMTP RCPT TO command, and process the server response.
The DSPAM server expects to find one or more valid DSPAM users as
envelope recipients. The set recipient will be the user DSPAM
processes mail for.
When you need want DSPAM to deliver the message itself, and need to
pass the server an envelope recipient for this that differs from the
DSPAM user account name, use the --rcpt-to parameter in client_args
at mailfrom().
args:
recipients -- A list of recipients | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/client.py#L295-L319 | [
"def _send(self, line):\n \"\"\"\n Write a line of data to the server.\n\n Args:\n line -- A single line of data to write to the socket.\n\n \"\"\"\n if not line.endswith('\\r\\n'):\n if line.endswith('\\n'):\n logger.debug('Fixing bare LF before sending data to socket')\n ... | class DspamClient(object):
"""
A DSPAM client can be used to interact with a DSPAM server.
The client is able to speak to a DSPAM server over both a TCP or UNIX
domain socket exposed by a running DSPAM server, and interact with it
through its supported protocols: LMTP and DLMTP. The latter is an
enhanced version of LMTP to facilitate some options that are not possible
when using strict LMTP.
Some common DSPAM operations are included in this class, custom
operations can be built by creating a new LMTP dialog with the
low-level LMTP commands.
DSPAM server setup
==================
To use the client to speak with a DSPAM server, the server must be
configured to expose a TCP (dspam.conf: ServerHost, ServerPort) or
UNIX domain socket (dspam.conf: ServerDomainSocketPath).
The server can support mulitple modes (dspam.conf: ServerMode) for
interaction with connecting clients. Which mode you need, depends on
the operations you need to perform. Most of the time you'll want to
use DLMTP though, which means that you'll also need to setup
authentication (dspam.conf: ServerPass.<ident>).
Python DspamClient setup
========================
Each DspamClient instance needs to talk to a DSPAM server.
You need to specify the socket where DSPAM is listening when creating
a new instance. If you need to use DLMTP features (probably most of the
time), you also need to pass the ident and password.
"""
# Default configuration
socket = 'inet:24@localhost'
dlmtp_ident = None
dlmtp_pass = None
def __init__(self, socket=None, dlmtp_ident=None, dlmtp_pass=None):
"""
Initialize new DSPAM client.
The socket specifies where DSPAM is listening. Specify it in the form:
unix:PATH or inet:PORT[@HOST]. For example, the default UNIX domain
socket in dspam.conf would look like: unix:/var/run/dspam/dspam.sock,
and the default TCP socket: inet:24@localhost.
Args:
socket -- The socket on which DSPAM is listening.
dlmtp_ident -- The authentication identifier.
dlmtp_pass -- The authentication password.
"""
if socket is not None:
self.socket = socket
if dlmtp_ident is not None:
self.dlmtp_ident = dlmtp_ident
if dlmtp_pass is not None:
self.dlmtp_pass = dlmtp_pass
self.dlmtp = False
self.results = {}
# Some internal structures
self._socket = None
self._recipients = []
def __del__(self):
"""
Destroy the DSPAM client object.
"""
if self._socket:
self.quit()
def _send(self, line):
"""
Write a line of data to the server.
Args:
line -- A single line of data to write to the socket.
"""
if not line.endswith('\r\n'):
if line.endswith('\n'):
logger.debug('Fixing bare LF before sending data to socket')
line = line[0:-1] + '\r\n'
else:
logger.debug(
'Fixing missing CRLF before sending data to socket')
line = line + '\r\n'
logger.debug('Client sent: ' + line.rstrip())
self._socket.send(line)
def _read(self):
"""
Read a single response line from the server.
"""
line = ''
finished = False
while not finished:
char = self._socket.recv(1)
if char == '':
return ''
elif char == '\r':
continue
elif char == '\n':
finished = True
continue
else:
line = line + char
logger.debug('Server sent: ' + line.rstrip())
return line
def _peek(self, chars=1):
"""
Peek at the data in the server response.
Peeking should only be done when the response can be predicted.
Make sure that the socket will not block by requesting too
much data from it while peeking.
Args:
chars -- the number of characters to peek.
"""
line = self._socket.recv(chars, socket.MSG_PEEK)
logger.debug('Server sent (peek): ' + line.rstrip())
return line
def connect(self):
"""
Connect to TCP or domain socket, and process the server LMTP greeting.
"""
# extract proto from socket setting
try:
(proto, spec) = self.socket.split(':')
except ValueError:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'no proto found: ' + self.socket)
if proto == 'unix':
# connect to UNIX domain socket
try:
self._socket = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(spec)
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server '
'at socket {}: {}'.format(spec, err))
logger.debug('Connected to DSPAM server at socket {}'.format(spec))
elif proto == 'inet' or proto == 'inet6':
# connect to TCP socket
try:
(port, host) = spec.split('@')
port = int(port)
if host == '':
host = 'localhost'
except ValueError:
port = int(spec)
host = 'localhost'
try:
self._socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server at host {} '
'port {}: {}'.format(host, port, err))
logger.debug(
'Connected to DSPAM server at host {}, port {}'.format(
host, port))
else:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'unknown proto ' + proto)
resp = self._read()
if not resp.startswith('220'):
raise DspamClientError(
'Unexpected server response at connect: ' + resp)
def lhlo(self):
"""
Send LMTP LHLO greeting, and process the server response.
A regular LMTP greeting is sent, and if accepted by the server, the
capabilities it returns are parsed.
DLMTP authentication starts here by announcing the dlmtp_ident in
the LHLO as our hostname. When the ident is accepted and DLMTP
mode is enabled (dspam.conf: ServerMode=dspam|auto), the
DSPAMPROCESSMODE capability is announced by the server.
When this capability is detected, the <DspamClient>.dlmtp flag
will be enabled.
"""
if self.dlmtp_ident is not None:
host = self.dlmtp_ident
else:
host = socket.getfqdn()
self._send('LHLO ' + host + '\r\n')
finished = False
while not finished:
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at LHLO: ' + resp)
if resp[4:20] == 'DSPAMPROCESSMODE':
self.dlmtp = True
logger.debug('Detected DLMTP extension in LHLO response')
if resp[3] == ' ':
# difference between "250-8BITMIME" and "250 SIZE"
finished = True
def mailfrom(self, sender=None, client_args=None):
"""
Send LMTP MAIL FROM command, and process the server response.
In DLMTP mode, the server expects the client to identify itself.
Because the envelope sender is of no importance to DSPAM, the client
is expected to send an identity and a password (dspam.conf:
ServerPass.<ident>="<password>") in stead of the actual sender.
When you need want DSPAM to deliver the message itself and need to
pass the server an actual envelope sender for that, add the
--mail-from parameter in client_args.
When the server is setup in LMTP mode only (dspam.conf:
ServerMode=standard), the envelope sender is a regular envelope
sender, and is re-used when delivering the message after processing.
Client args
===========
When in DLMTP mode (and with proper auth credentials), the server
accepts parameters specified by the client. These are in the form
as they are passed to the command-line 'dspam' program.
See man dspam(1) for details, and the process() or classify() methods
in this class for simple examples.
Args:
sender -- The envelope sender to use in LMTP mode.
client_args -- DSPAM parameters to pass to the server in DLMTP mode.
"""
if sender and client_args:
raise DspamClientError('Arguments are mutually exclusive')
if client_args and not self.dlmtp:
raise DspamClientError(
'Cannot send client args, server does not support DLMTP')
command = 'MAIL FROM:'
if not sender:
if self.dlmtp_ident and self.dlmtp_pass:
sender = self.dlmtp_pass + '@' + self.dlmtp_ident
else:
sender = ''
command = command + '<' + sender + '>'
if client_args:
command = command + ' DSPAMPROCESSMODE="{}"'.format(client_args)
self._send(command + '\r\n')
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at MAIL FROM: ' + resp)
def data(self, message):
"""
Send LMTP DATA command and process the server response.
The server response is stored as a list of dicts in
<DspamClient>.results, keyed on the recipient name(s). Depending
on the server return data, different formats are available:
* LMTP mode -- Dict containing 'accepted', a bool indicating
that the message was handed to the server.
* Summary mode -- Dict containing 'username', 'result',
'classification', 'probability', 'confidence'
and 'signature'.
* Stdout mode -- Dict containing 'result' and 'message', the
complete message payload including added headers.
The return data is always parsed and stored, independent of its format.
If you requested a regular LMTP response, but the server
responded with an DLMTP summary, the summary is still stored in
<DspamClient>.results, and you will need to check the result format
yourself and decide whether that was acceptable for your use case.
This is due to the fact that it's possible to configure the server to
return non LMTP responses, even when in LMTP mode (see dspam.conf:
ServerParameters).
Note: while processing response data in stdout mode, it's not possible
to relate the returned messages to a specific recipient, when multiple
recipients were specified in rcptto(). There is no guarantee
that the message stored in <DspamClient>.results['foo'] actually
belongs to the recipient 'foo'. If this relationship needs to be
guaranteed, send each message with a single recipient in rcptto().
args:
message -- The full message payload to pass to the server.
"""
self._send('DATA\r\n')
resp = self._read()
if not resp.startswith('354'):
raise DspamClientError(
'Unexpected server response at DATA: ' + resp)
# Send message payload
for line in message.split('\n'):
if line == '.':
# Dot stuffing
line = '..'
self._send(line)
# Send end-of-data
self._send('.\r\n')
# Depending on server configuration, several responses are possible:
# * Standard LMTP response code, once for each recipient:
# 250 2.6.0 <bar> Message accepted for delivery
# * Summary response (--deliver=summary), once for each recipient:
# X-DSPAM-Result: bar; result="Spam"; class="Spam"; \
# probability=1.0000; confidence=0.85; \
# signature=50c50c0f315636261418125
# (after the last summary line, a single dot is sent)
# * Stdout response (--delivery=stdout), once for each recipient:
# X-Daemon-Classification: INNOCENT
# <complete mail body>
#
# Note that when an unknown recipient is passed in, DSPAM will simply
# deliver the message (dspam.conf: (Un)TrustedDeliveryAgent,
# DeliveryHost) unaltered and unfiltered. The response for unknown
# recipients will still be something indicating 'accepted'.
peek = self._peek(24)
if peek.startswith('250'):
# Response is LTMP formatted
regex = re.compile('250 \d\.\d\.\d <([^>]+)>')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
self.results[rcpt] = {'accepted': True}
logger.debug(
'Message accepted for recipient {} in LMTP mode'.format(
rcpt))
if len(self.results) == len(self._recipients):
finished = True
elif peek.startswith('X-DSPAM-Result:'):
# Response is in summary format
regex = re.compile('X-DSPAM-Result: ([^;]+); result="(\w+)"; '
'class="(\w+)"; probability=([\d\.]+); '
'confidence=([\d\.]+); signature=([\w,/]+)')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
# map results to their DSPAM classification result names
fields = ('user', 'result', 'class',
'probability', 'confidence', 'signature')
self.results[rcpt] = dict(zip(fields, match.groups()))
if self.results[rcpt]['signature'] == 'N/A':
del(self.results[rcpt]['signature'])
logger.debug(
'Message handled for recipient {} in DLMTP summary mode, '
'result is {}'.format(rcpt, match.group(2)))
if len(self.results) == len(self._recipients):
# we received responses for all accepted recipients
finished = True
# read final dot
resp = self._read()
if resp != '.':
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
elif peek.startswith('X-Daemon-Classification:'):
# Response is in stdout format
finished = False
message = ''
while not finished:
resp = self._read()
if resp.startswith('X-Daemon-Classification:'):
if message != '':
# A new message body starts, store the previous one
rcpt = self._recipients.pop(0)
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message handled for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
message = ''
# Remember next result
result = resp[25:]
elif resp == '.':
# A single dot can signal end-of-data, or might be just
# regular mail data.
self._socket.setblocking(False)
try:
# If _peek() succeeds, we did not reach end-of-data yet
# so it was message content.
peek = self._peek(1)
message = message + '\r\n' + resp
except socket.error:
# reached end-of-data, store message and finish
finished = True
rcpt = self._recipients.pop(0)
# strip final newline
message = message[0:-2]
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message accepted for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
self._socket.setblocking(True)
else:
# regular message data
if message == '':
message = resp
else:
message = message + '\r\n' + resp
else:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
def rset(self):
"""
Send LMTP RSET command and process the server response.
"""
self._send('RSET\r\n')
resp = self._read()
if not resp.startswith('250'):
logger.warn('Unexpected server response at RSET: ' + resp)
self._recipients = []
self.results = {}
def quit(self):
"""
Send LMTP QUIT command, read the server response and disconnect.
"""
self._send('QUIT\r\n')
resp = self._read()
if not resp.startswith('221'):
logger.warning('Unexpected server response at QUIT: ' + resp)
self._socket.close()
self._socket = None
self._recipients = []
self.results = {}
def process(self, message, user):
"""
Process a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--process --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def classify(self, message, user):
"""
Classify a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--classify --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def train(self, message, user, class_):
"""
Train DSPAM with a message.
"""
raise NotImplementedError
def retrain_message(self, message, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
def retrain_signature(self, signature, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
|
whyscream/dspam-milter | dspam/client.py | DspamClient.data | python | def data(self, message):
self._send('DATA\r\n')
resp = self._read()
if not resp.startswith('354'):
raise DspamClientError(
'Unexpected server response at DATA: ' + resp)
# Send message payload
for line in message.split('\n'):
if line == '.':
# Dot stuffing
line = '..'
self._send(line)
# Send end-of-data
self._send('.\r\n')
# Depending on server configuration, several responses are possible:
# * Standard LMTP response code, once for each recipient:
# 250 2.6.0 <bar> Message accepted for delivery
# * Summary response (--deliver=summary), once for each recipient:
# X-DSPAM-Result: bar; result="Spam"; class="Spam"; \
# probability=1.0000; confidence=0.85; \
# signature=50c50c0f315636261418125
# (after the last summary line, a single dot is sent)
# * Stdout response (--delivery=stdout), once for each recipient:
# X-Daemon-Classification: INNOCENT
# <complete mail body>
#
# Note that when an unknown recipient is passed in, DSPAM will simply
# deliver the message (dspam.conf: (Un)TrustedDeliveryAgent,
# DeliveryHost) unaltered and unfiltered. The response for unknown
# recipients will still be something indicating 'accepted'.
peek = self._peek(24)
if peek.startswith('250'):
# Response is LTMP formatted
regex = re.compile('250 \d\.\d\.\d <([^>]+)>')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
self.results[rcpt] = {'accepted': True}
logger.debug(
'Message accepted for recipient {} in LMTP mode'.format(
rcpt))
if len(self.results) == len(self._recipients):
finished = True
elif peek.startswith('X-DSPAM-Result:'):
# Response is in summary format
regex = re.compile('X-DSPAM-Result: ([^;]+); result="(\w+)"; '
'class="(\w+)"; probability=([\d\.]+); '
'confidence=([\d\.]+); signature=([\w,/]+)')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
# map results to their DSPAM classification result names
fields = ('user', 'result', 'class',
'probability', 'confidence', 'signature')
self.results[rcpt] = dict(zip(fields, match.groups()))
if self.results[rcpt]['signature'] == 'N/A':
del(self.results[rcpt]['signature'])
logger.debug(
'Message handled for recipient {} in DLMTP summary mode, '
'result is {}'.format(rcpt, match.group(2)))
if len(self.results) == len(self._recipients):
# we received responses for all accepted recipients
finished = True
# read final dot
resp = self._read()
if resp != '.':
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
elif peek.startswith('X-Daemon-Classification:'):
# Response is in stdout format
finished = False
message = ''
while not finished:
resp = self._read()
if resp.startswith('X-Daemon-Classification:'):
if message != '':
# A new message body starts, store the previous one
rcpt = self._recipients.pop(0)
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message handled for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
message = ''
# Remember next result
result = resp[25:]
elif resp == '.':
# A single dot can signal end-of-data, or might be just
# regular mail data.
self._socket.setblocking(False)
try:
# If _peek() succeeds, we did not reach end-of-data yet
# so it was message content.
peek = self._peek(1)
message = message + '\r\n' + resp
except socket.error:
# reached end-of-data, store message and finish
finished = True
rcpt = self._recipients.pop(0)
# strip final newline
message = message[0:-2]
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message accepted for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
self._socket.setblocking(True)
else:
# regular message data
if message == '':
message = resp
else:
message = message + '\r\n' + resp
else:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp) | Send LMTP DATA command and process the server response.
The server response is stored as a list of dicts in
<DspamClient>.results, keyed on the recipient name(s). Depending
on the server return data, different formats are available:
* LMTP mode -- Dict containing 'accepted', a bool indicating
that the message was handed to the server.
* Summary mode -- Dict containing 'username', 'result',
'classification', 'probability', 'confidence'
and 'signature'.
* Stdout mode -- Dict containing 'result' and 'message', the
complete message payload including added headers.
The return data is always parsed and stored, independent of its format.
If you requested a regular LMTP response, but the server
responded with an DLMTP summary, the summary is still stored in
<DspamClient>.results, and you will need to check the result format
yourself and decide whether that was acceptable for your use case.
This is due to the fact that it's possible to configure the server to
return non LMTP responses, even when in LMTP mode (see dspam.conf:
ServerParameters).
Note: while processing response data in stdout mode, it's not possible
to relate the returned messages to a specific recipient, when multiple
recipients were specified in rcptto(). There is no guarantee
that the message stored in <DspamClient>.results['foo'] actually
belongs to the recipient 'foo'. If this relationship needs to be
guaranteed, send each message with a single recipient in rcptto().
args:
message -- The full message payload to pass to the server. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/client.py#L321-L498 | [
"def _send(self, line):\n \"\"\"\n Write a line of data to the server.\n\n Args:\n line -- A single line of data to write to the socket.\n\n \"\"\"\n if not line.endswith('\\r\\n'):\n if line.endswith('\\n'):\n logger.debug('Fixing bare LF before sending data to socket')\n ... | class DspamClient(object):
"""
A DSPAM client can be used to interact with a DSPAM server.
The client is able to speak to a DSPAM server over both a TCP or UNIX
domain socket exposed by a running DSPAM server, and interact with it
through its supported protocols: LMTP and DLMTP. The latter is an
enhanced version of LMTP to facilitate some options that are not possible
when using strict LMTP.
Some common DSPAM operations are included in this class, custom
operations can be built by creating a new LMTP dialog with the
low-level LMTP commands.
DSPAM server setup
==================
To use the client to speak with a DSPAM server, the server must be
configured to expose a TCP (dspam.conf: ServerHost, ServerPort) or
UNIX domain socket (dspam.conf: ServerDomainSocketPath).
The server can support mulitple modes (dspam.conf: ServerMode) for
interaction with connecting clients. Which mode you need, depends on
the operations you need to perform. Most of the time you'll want to
use DLMTP though, which means that you'll also need to setup
authentication (dspam.conf: ServerPass.<ident>).
Python DspamClient setup
========================
Each DspamClient instance needs to talk to a DSPAM server.
You need to specify the socket where DSPAM is listening when creating
a new instance. If you need to use DLMTP features (probably most of the
time), you also need to pass the ident and password.
"""
# Default configuration
socket = 'inet:24@localhost'
dlmtp_ident = None
dlmtp_pass = None
def __init__(self, socket=None, dlmtp_ident=None, dlmtp_pass=None):
"""
Initialize new DSPAM client.
The socket specifies where DSPAM is listening. Specify it in the form:
unix:PATH or inet:PORT[@HOST]. For example, the default UNIX domain
socket in dspam.conf would look like: unix:/var/run/dspam/dspam.sock,
and the default TCP socket: inet:24@localhost.
Args:
socket -- The socket on which DSPAM is listening.
dlmtp_ident -- The authentication identifier.
dlmtp_pass -- The authentication password.
"""
if socket is not None:
self.socket = socket
if dlmtp_ident is not None:
self.dlmtp_ident = dlmtp_ident
if dlmtp_pass is not None:
self.dlmtp_pass = dlmtp_pass
self.dlmtp = False
self.results = {}
# Some internal structures
self._socket = None
self._recipients = []
def __del__(self):
"""
Destroy the DSPAM client object.
"""
if self._socket:
self.quit()
def _send(self, line):
"""
Write a line of data to the server.
Args:
line -- A single line of data to write to the socket.
"""
if not line.endswith('\r\n'):
if line.endswith('\n'):
logger.debug('Fixing bare LF before sending data to socket')
line = line[0:-1] + '\r\n'
else:
logger.debug(
'Fixing missing CRLF before sending data to socket')
line = line + '\r\n'
logger.debug('Client sent: ' + line.rstrip())
self._socket.send(line)
def _read(self):
"""
Read a single response line from the server.
"""
line = ''
finished = False
while not finished:
char = self._socket.recv(1)
if char == '':
return ''
elif char == '\r':
continue
elif char == '\n':
finished = True
continue
else:
line = line + char
logger.debug('Server sent: ' + line.rstrip())
return line
def _peek(self, chars=1):
"""
Peek at the data in the server response.
Peeking should only be done when the response can be predicted.
Make sure that the socket will not block by requesting too
much data from it while peeking.
Args:
chars -- the number of characters to peek.
"""
line = self._socket.recv(chars, socket.MSG_PEEK)
logger.debug('Server sent (peek): ' + line.rstrip())
return line
def connect(self):
"""
Connect to TCP or domain socket, and process the server LMTP greeting.
"""
# extract proto from socket setting
try:
(proto, spec) = self.socket.split(':')
except ValueError:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'no proto found: ' + self.socket)
if proto == 'unix':
# connect to UNIX domain socket
try:
self._socket = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(spec)
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server '
'at socket {}: {}'.format(spec, err))
logger.debug('Connected to DSPAM server at socket {}'.format(spec))
elif proto == 'inet' or proto == 'inet6':
# connect to TCP socket
try:
(port, host) = spec.split('@')
port = int(port)
if host == '':
host = 'localhost'
except ValueError:
port = int(spec)
host = 'localhost'
try:
self._socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server at host {} '
'port {}: {}'.format(host, port, err))
logger.debug(
'Connected to DSPAM server at host {}, port {}'.format(
host, port))
else:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'unknown proto ' + proto)
resp = self._read()
if not resp.startswith('220'):
raise DspamClientError(
'Unexpected server response at connect: ' + resp)
def lhlo(self):
"""
Send LMTP LHLO greeting, and process the server response.
A regular LMTP greeting is sent, and if accepted by the server, the
capabilities it returns are parsed.
DLMTP authentication starts here by announcing the dlmtp_ident in
the LHLO as our hostname. When the ident is accepted and DLMTP
mode is enabled (dspam.conf: ServerMode=dspam|auto), the
DSPAMPROCESSMODE capability is announced by the server.
When this capability is detected, the <DspamClient>.dlmtp flag
will be enabled.
"""
if self.dlmtp_ident is not None:
host = self.dlmtp_ident
else:
host = socket.getfqdn()
self._send('LHLO ' + host + '\r\n')
finished = False
while not finished:
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at LHLO: ' + resp)
if resp[4:20] == 'DSPAMPROCESSMODE':
self.dlmtp = True
logger.debug('Detected DLMTP extension in LHLO response')
if resp[3] == ' ':
# difference between "250-8BITMIME" and "250 SIZE"
finished = True
def mailfrom(self, sender=None, client_args=None):
"""
Send LMTP MAIL FROM command, and process the server response.
In DLMTP mode, the server expects the client to identify itself.
Because the envelope sender is of no importance to DSPAM, the client
is expected to send an identity and a password (dspam.conf:
ServerPass.<ident>="<password>") in stead of the actual sender.
When you need want DSPAM to deliver the message itself and need to
pass the server an actual envelope sender for that, add the
--mail-from parameter in client_args.
When the server is setup in LMTP mode only (dspam.conf:
ServerMode=standard), the envelope sender is a regular envelope
sender, and is re-used when delivering the message after processing.
Client args
===========
When in DLMTP mode (and with proper auth credentials), the server
accepts parameters specified by the client. These are in the form
as they are passed to the command-line 'dspam' program.
See man dspam(1) for details, and the process() or classify() methods
in this class for simple examples.
Args:
sender -- The envelope sender to use in LMTP mode.
client_args -- DSPAM parameters to pass to the server in DLMTP mode.
"""
if sender and client_args:
raise DspamClientError('Arguments are mutually exclusive')
if client_args and not self.dlmtp:
raise DspamClientError(
'Cannot send client args, server does not support DLMTP')
command = 'MAIL FROM:'
if not sender:
if self.dlmtp_ident and self.dlmtp_pass:
sender = self.dlmtp_pass + '@' + self.dlmtp_ident
else:
sender = ''
command = command + '<' + sender + '>'
if client_args:
command = command + ' DSPAMPROCESSMODE="{}"'.format(client_args)
self._send(command + '\r\n')
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at MAIL FROM: ' + resp)
def rcptto(self, recipients):
"""
Send LMTP RCPT TO command, and process the server response.
The DSPAM server expects to find one or more valid DSPAM users as
envelope recipients. The set recipient will be the user DSPAM
processes mail for.
When you need want DSPAM to deliver the message itself, and need to
pass the server an envelope recipient for this that differs from the
DSPAM user account name, use the --rcpt-to parameter in client_args
at mailfrom().
args:
recipients -- A list of recipients
"""
for rcpt in recipients:
self._send('RCPT TO:<{}>\r\n'.format(rcpt))
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at RCPT TO for '
'recipient {}: {}'.format(rcpt, resp))
self._recipients.append(rcpt)
def rset(self):
"""
Send LMTP RSET command and process the server response.
"""
self._send('RSET\r\n')
resp = self._read()
if not resp.startswith('250'):
logger.warn('Unexpected server response at RSET: ' + resp)
self._recipients = []
self.results = {}
def quit(self):
"""
Send LMTP QUIT command, read the server response and disconnect.
"""
self._send('QUIT\r\n')
resp = self._read()
if not resp.startswith('221'):
logger.warning('Unexpected server response at QUIT: ' + resp)
self._socket.close()
self._socket = None
self._recipients = []
self.results = {}
def process(self, message, user):
"""
Process a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--process --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def classify(self, message, user):
"""
Classify a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--classify --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def train(self, message, user, class_):
"""
Train DSPAM with a message.
"""
raise NotImplementedError
def retrain_message(self, message, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
def retrain_signature(self, signature, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
|
whyscream/dspam-milter | dspam/client.py | DspamClient.rset | python | def rset(self):
self._send('RSET\r\n')
resp = self._read()
if not resp.startswith('250'):
logger.warn('Unexpected server response at RSET: ' + resp)
self._recipients = []
self.results = {} | Send LMTP RSET command and process the server response. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/client.py#L500-L510 | [
"def _send(self, line):\n \"\"\"\n Write a line of data to the server.\n\n Args:\n line -- A single line of data to write to the socket.\n\n \"\"\"\n if not line.endswith('\\r\\n'):\n if line.endswith('\\n'):\n logger.debug('Fixing bare LF before sending data to socket')\n ... | class DspamClient(object):
"""
A DSPAM client can be used to interact with a DSPAM server.
The client is able to speak to a DSPAM server over both a TCP or UNIX
domain socket exposed by a running DSPAM server, and interact with it
through its supported protocols: LMTP and DLMTP. The latter is an
enhanced version of LMTP to facilitate some options that are not possible
when using strict LMTP.
Some common DSPAM operations are included in this class, custom
operations can be built by creating a new LMTP dialog with the
low-level LMTP commands.
DSPAM server setup
==================
To use the client to speak with a DSPAM server, the server must be
configured to expose a TCP (dspam.conf: ServerHost, ServerPort) or
UNIX domain socket (dspam.conf: ServerDomainSocketPath).
The server can support mulitple modes (dspam.conf: ServerMode) for
interaction with connecting clients. Which mode you need, depends on
the operations you need to perform. Most of the time you'll want to
use DLMTP though, which means that you'll also need to setup
authentication (dspam.conf: ServerPass.<ident>).
Python DspamClient setup
========================
Each DspamClient instance needs to talk to a DSPAM server.
You need to specify the socket where DSPAM is listening when creating
a new instance. If you need to use DLMTP features (probably most of the
time), you also need to pass the ident and password.
"""
# Default configuration
socket = 'inet:24@localhost'
dlmtp_ident = None
dlmtp_pass = None
def __init__(self, socket=None, dlmtp_ident=None, dlmtp_pass=None):
"""
Initialize new DSPAM client.
The socket specifies where DSPAM is listening. Specify it in the form:
unix:PATH or inet:PORT[@HOST]. For example, the default UNIX domain
socket in dspam.conf would look like: unix:/var/run/dspam/dspam.sock,
and the default TCP socket: inet:24@localhost.
Args:
socket -- The socket on which DSPAM is listening.
dlmtp_ident -- The authentication identifier.
dlmtp_pass -- The authentication password.
"""
if socket is not None:
self.socket = socket
if dlmtp_ident is not None:
self.dlmtp_ident = dlmtp_ident
if dlmtp_pass is not None:
self.dlmtp_pass = dlmtp_pass
self.dlmtp = False
self.results = {}
# Some internal structures
self._socket = None
self._recipients = []
def __del__(self):
"""
Destroy the DSPAM client object.
"""
if self._socket:
self.quit()
def _send(self, line):
"""
Write a line of data to the server.
Args:
line -- A single line of data to write to the socket.
"""
if not line.endswith('\r\n'):
if line.endswith('\n'):
logger.debug('Fixing bare LF before sending data to socket')
line = line[0:-1] + '\r\n'
else:
logger.debug(
'Fixing missing CRLF before sending data to socket')
line = line + '\r\n'
logger.debug('Client sent: ' + line.rstrip())
self._socket.send(line)
def _read(self):
"""
Read a single response line from the server.
"""
line = ''
finished = False
while not finished:
char = self._socket.recv(1)
if char == '':
return ''
elif char == '\r':
continue
elif char == '\n':
finished = True
continue
else:
line = line + char
logger.debug('Server sent: ' + line.rstrip())
return line
def _peek(self, chars=1):
"""
Peek at the data in the server response.
Peeking should only be done when the response can be predicted.
Make sure that the socket will not block by requesting too
much data from it while peeking.
Args:
chars -- the number of characters to peek.
"""
line = self._socket.recv(chars, socket.MSG_PEEK)
logger.debug('Server sent (peek): ' + line.rstrip())
return line
def connect(self):
"""
Connect to TCP or domain socket, and process the server LMTP greeting.
"""
# extract proto from socket setting
try:
(proto, spec) = self.socket.split(':')
except ValueError:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'no proto found: ' + self.socket)
if proto == 'unix':
# connect to UNIX domain socket
try:
self._socket = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(spec)
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server '
'at socket {}: {}'.format(spec, err))
logger.debug('Connected to DSPAM server at socket {}'.format(spec))
elif proto == 'inet' or proto == 'inet6':
# connect to TCP socket
try:
(port, host) = spec.split('@')
port = int(port)
if host == '':
host = 'localhost'
except ValueError:
port = int(spec)
host = 'localhost'
try:
self._socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server at host {} '
'port {}: {}'.format(host, port, err))
logger.debug(
'Connected to DSPAM server at host {}, port {}'.format(
host, port))
else:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'unknown proto ' + proto)
resp = self._read()
if not resp.startswith('220'):
raise DspamClientError(
'Unexpected server response at connect: ' + resp)
def lhlo(self):
"""
Send LMTP LHLO greeting, and process the server response.
A regular LMTP greeting is sent, and if accepted by the server, the
capabilities it returns are parsed.
DLMTP authentication starts here by announcing the dlmtp_ident in
the LHLO as our hostname. When the ident is accepted and DLMTP
mode is enabled (dspam.conf: ServerMode=dspam|auto), the
DSPAMPROCESSMODE capability is announced by the server.
When this capability is detected, the <DspamClient>.dlmtp flag
will be enabled.
"""
if self.dlmtp_ident is not None:
host = self.dlmtp_ident
else:
host = socket.getfqdn()
self._send('LHLO ' + host + '\r\n')
finished = False
while not finished:
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at LHLO: ' + resp)
if resp[4:20] == 'DSPAMPROCESSMODE':
self.dlmtp = True
logger.debug('Detected DLMTP extension in LHLO response')
if resp[3] == ' ':
# difference between "250-8BITMIME" and "250 SIZE"
finished = True
def mailfrom(self, sender=None, client_args=None):
"""
Send LMTP MAIL FROM command, and process the server response.
In DLMTP mode, the server expects the client to identify itself.
Because the envelope sender is of no importance to DSPAM, the client
is expected to send an identity and a password (dspam.conf:
ServerPass.<ident>="<password>") in stead of the actual sender.
When you need want DSPAM to deliver the message itself and need to
pass the server an actual envelope sender for that, add the
--mail-from parameter in client_args.
When the server is setup in LMTP mode only (dspam.conf:
ServerMode=standard), the envelope sender is a regular envelope
sender, and is re-used when delivering the message after processing.
Client args
===========
When in DLMTP mode (and with proper auth credentials), the server
accepts parameters specified by the client. These are in the form
as they are passed to the command-line 'dspam' program.
See man dspam(1) for details, and the process() or classify() methods
in this class for simple examples.
Args:
sender -- The envelope sender to use in LMTP mode.
client_args -- DSPAM parameters to pass to the server in DLMTP mode.
"""
if sender and client_args:
raise DspamClientError('Arguments are mutually exclusive')
if client_args and not self.dlmtp:
raise DspamClientError(
'Cannot send client args, server does not support DLMTP')
command = 'MAIL FROM:'
if not sender:
if self.dlmtp_ident and self.dlmtp_pass:
sender = self.dlmtp_pass + '@' + self.dlmtp_ident
else:
sender = ''
command = command + '<' + sender + '>'
if client_args:
command = command + ' DSPAMPROCESSMODE="{}"'.format(client_args)
self._send(command + '\r\n')
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at MAIL FROM: ' + resp)
def rcptto(self, recipients):
"""
Send LMTP RCPT TO command, and process the server response.
The DSPAM server expects to find one or more valid DSPAM users as
envelope recipients. The set recipient will be the user DSPAM
processes mail for.
When you need want DSPAM to deliver the message itself, and need to
pass the server an envelope recipient for this that differs from the
DSPAM user account name, use the --rcpt-to parameter in client_args
at mailfrom().
args:
recipients -- A list of recipients
"""
for rcpt in recipients:
self._send('RCPT TO:<{}>\r\n'.format(rcpt))
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at RCPT TO for '
'recipient {}: {}'.format(rcpt, resp))
self._recipients.append(rcpt)
def data(self, message):
"""
Send LMTP DATA command and process the server response.
The server response is stored as a list of dicts in
<DspamClient>.results, keyed on the recipient name(s). Depending
on the server return data, different formats are available:
* LMTP mode -- Dict containing 'accepted', a bool indicating
that the message was handed to the server.
* Summary mode -- Dict containing 'username', 'result',
'classification', 'probability', 'confidence'
and 'signature'.
* Stdout mode -- Dict containing 'result' and 'message', the
complete message payload including added headers.
The return data is always parsed and stored, independent of its format.
If you requested a regular LMTP response, but the server
responded with an DLMTP summary, the summary is still stored in
<DspamClient>.results, and you will need to check the result format
yourself and decide whether that was acceptable for your use case.
This is due to the fact that it's possible to configure the server to
return non LMTP responses, even when in LMTP mode (see dspam.conf:
ServerParameters).
Note: while processing response data in stdout mode, it's not possible
to relate the returned messages to a specific recipient, when multiple
recipients were specified in rcptto(). There is no guarantee
that the message stored in <DspamClient>.results['foo'] actually
belongs to the recipient 'foo'. If this relationship needs to be
guaranteed, send each message with a single recipient in rcptto().
args:
message -- The full message payload to pass to the server.
"""
self._send('DATA\r\n')
resp = self._read()
if not resp.startswith('354'):
raise DspamClientError(
'Unexpected server response at DATA: ' + resp)
# Send message payload
for line in message.split('\n'):
if line == '.':
# Dot stuffing
line = '..'
self._send(line)
# Send end-of-data
self._send('.\r\n')
# Depending on server configuration, several responses are possible:
# * Standard LMTP response code, once for each recipient:
# 250 2.6.0 <bar> Message accepted for delivery
# * Summary response (--deliver=summary), once for each recipient:
# X-DSPAM-Result: bar; result="Spam"; class="Spam"; \
# probability=1.0000; confidence=0.85; \
# signature=50c50c0f315636261418125
# (after the last summary line, a single dot is sent)
# * Stdout response (--delivery=stdout), once for each recipient:
# X-Daemon-Classification: INNOCENT
# <complete mail body>
#
# Note that when an unknown recipient is passed in, DSPAM will simply
# deliver the message (dspam.conf: (Un)TrustedDeliveryAgent,
# DeliveryHost) unaltered and unfiltered. The response for unknown
# recipients will still be something indicating 'accepted'.
peek = self._peek(24)
if peek.startswith('250'):
# Response is LTMP formatted
regex = re.compile('250 \d\.\d\.\d <([^>]+)>')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
self.results[rcpt] = {'accepted': True}
logger.debug(
'Message accepted for recipient {} in LMTP mode'.format(
rcpt))
if len(self.results) == len(self._recipients):
finished = True
elif peek.startswith('X-DSPAM-Result:'):
# Response is in summary format
regex = re.compile('X-DSPAM-Result: ([^;]+); result="(\w+)"; '
'class="(\w+)"; probability=([\d\.]+); '
'confidence=([\d\.]+); signature=([\w,/]+)')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
# map results to their DSPAM classification result names
fields = ('user', 'result', 'class',
'probability', 'confidence', 'signature')
self.results[rcpt] = dict(zip(fields, match.groups()))
if self.results[rcpt]['signature'] == 'N/A':
del(self.results[rcpt]['signature'])
logger.debug(
'Message handled for recipient {} in DLMTP summary mode, '
'result is {}'.format(rcpt, match.group(2)))
if len(self.results) == len(self._recipients):
# we received responses for all accepted recipients
finished = True
# read final dot
resp = self._read()
if resp != '.':
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
elif peek.startswith('X-Daemon-Classification:'):
# Response is in stdout format
finished = False
message = ''
while not finished:
resp = self._read()
if resp.startswith('X-Daemon-Classification:'):
if message != '':
# A new message body starts, store the previous one
rcpt = self._recipients.pop(0)
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message handled for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
message = ''
# Remember next result
result = resp[25:]
elif resp == '.':
# A single dot can signal end-of-data, or might be just
# regular mail data.
self._socket.setblocking(False)
try:
# If _peek() succeeds, we did not reach end-of-data yet
# so it was message content.
peek = self._peek(1)
message = message + '\r\n' + resp
except socket.error:
# reached end-of-data, store message and finish
finished = True
rcpt = self._recipients.pop(0)
# strip final newline
message = message[0:-2]
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message accepted for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
self._socket.setblocking(True)
else:
# regular message data
if message == '':
message = resp
else:
message = message + '\r\n' + resp
else:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
def quit(self):
"""
Send LMTP QUIT command, read the server response and disconnect.
"""
self._send('QUIT\r\n')
resp = self._read()
if not resp.startswith('221'):
logger.warning('Unexpected server response at QUIT: ' + resp)
self._socket.close()
self._socket = None
self._recipients = []
self.results = {}
def process(self, message, user):
"""
Process a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--process --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def classify(self, message, user):
"""
Classify a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--classify --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def train(self, message, user, class_):
"""
Train DSPAM with a message.
"""
raise NotImplementedError
def retrain_message(self, message, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
def retrain_signature(self, signature, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
|
whyscream/dspam-milter | dspam/client.py | DspamClient.quit | python | def quit(self):
self._send('QUIT\r\n')
resp = self._read()
if not resp.startswith('221'):
logger.warning('Unexpected server response at QUIT: ' + resp)
self._socket.close()
self._socket = None
self._recipients = []
self.results = {} | Send LMTP QUIT command, read the server response and disconnect. | train | https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/client.py#L512-L524 | [
"def _send(self, line):\n \"\"\"\n Write a line of data to the server.\n\n Args:\n line -- A single line of data to write to the socket.\n\n \"\"\"\n if not line.endswith('\\r\\n'):\n if line.endswith('\\n'):\n logger.debug('Fixing bare LF before sending data to socket')\n ... | class DspamClient(object):
"""
A DSPAM client can be used to interact with a DSPAM server.
The client is able to speak to a DSPAM server over both a TCP or UNIX
domain socket exposed by a running DSPAM server, and interact with it
through its supported protocols: LMTP and DLMTP. The latter is an
enhanced version of LMTP to facilitate some options that are not possible
when using strict LMTP.
Some common DSPAM operations are included in this class, custom
operations can be built by creating a new LMTP dialog with the
low-level LMTP commands.
DSPAM server setup
==================
To use the client to speak with a DSPAM server, the server must be
configured to expose a TCP (dspam.conf: ServerHost, ServerPort) or
UNIX domain socket (dspam.conf: ServerDomainSocketPath).
The server can support mulitple modes (dspam.conf: ServerMode) for
interaction with connecting clients. Which mode you need, depends on
the operations you need to perform. Most of the time you'll want to
use DLMTP though, which means that you'll also need to setup
authentication (dspam.conf: ServerPass.<ident>).
Python DspamClient setup
========================
Each DspamClient instance needs to talk to a DSPAM server.
You need to specify the socket where DSPAM is listening when creating
a new instance. If you need to use DLMTP features (probably most of the
time), you also need to pass the ident and password.
"""
# Default configuration
socket = 'inet:24@localhost'
dlmtp_ident = None
dlmtp_pass = None
def __init__(self, socket=None, dlmtp_ident=None, dlmtp_pass=None):
"""
Initialize new DSPAM client.
The socket specifies where DSPAM is listening. Specify it in the form:
unix:PATH or inet:PORT[@HOST]. For example, the default UNIX domain
socket in dspam.conf would look like: unix:/var/run/dspam/dspam.sock,
and the default TCP socket: inet:24@localhost.
Args:
socket -- The socket on which DSPAM is listening.
dlmtp_ident -- The authentication identifier.
dlmtp_pass -- The authentication password.
"""
if socket is not None:
self.socket = socket
if dlmtp_ident is not None:
self.dlmtp_ident = dlmtp_ident
if dlmtp_pass is not None:
self.dlmtp_pass = dlmtp_pass
self.dlmtp = False
self.results = {}
# Some internal structures
self._socket = None
self._recipients = []
def __del__(self):
"""
Destroy the DSPAM client object.
"""
if self._socket:
self.quit()
def _send(self, line):
"""
Write a line of data to the server.
Args:
line -- A single line of data to write to the socket.
"""
if not line.endswith('\r\n'):
if line.endswith('\n'):
logger.debug('Fixing bare LF before sending data to socket')
line = line[0:-1] + '\r\n'
else:
logger.debug(
'Fixing missing CRLF before sending data to socket')
line = line + '\r\n'
logger.debug('Client sent: ' + line.rstrip())
self._socket.send(line)
def _read(self):
"""
Read a single response line from the server.
"""
line = ''
finished = False
while not finished:
char = self._socket.recv(1)
if char == '':
return ''
elif char == '\r':
continue
elif char == '\n':
finished = True
continue
else:
line = line + char
logger.debug('Server sent: ' + line.rstrip())
return line
def _peek(self, chars=1):
"""
Peek at the data in the server response.
Peeking should only be done when the response can be predicted.
Make sure that the socket will not block by requesting too
much data from it while peeking.
Args:
chars -- the number of characters to peek.
"""
line = self._socket.recv(chars, socket.MSG_PEEK)
logger.debug('Server sent (peek): ' + line.rstrip())
return line
def connect(self):
"""
Connect to TCP or domain socket, and process the server LMTP greeting.
"""
# extract proto from socket setting
try:
(proto, spec) = self.socket.split(':')
except ValueError:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'no proto found: ' + self.socket)
if proto == 'unix':
# connect to UNIX domain socket
try:
self._socket = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.connect(spec)
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server '
'at socket {}: {}'.format(spec, err))
logger.debug('Connected to DSPAM server at socket {}'.format(spec))
elif proto == 'inet' or proto == 'inet6':
# connect to TCP socket
try:
(port, host) = spec.split('@')
port = int(port)
if host == '':
host = 'localhost'
except ValueError:
port = int(spec)
host = 'localhost'
try:
self._socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
except socket.error as err:
self._socket = None
raise DspamClientError(
'Failed to connect to DSPAM server at host {} '
'port {}: {}'.format(host, port, err))
logger.debug(
'Connected to DSPAM server at host {}, port {}'.format(
host, port))
else:
raise DspamClientError(
'Failed to parse DSPAM socket specification, '
'unknown proto ' + proto)
resp = self._read()
if not resp.startswith('220'):
raise DspamClientError(
'Unexpected server response at connect: ' + resp)
def lhlo(self):
"""
Send LMTP LHLO greeting, and process the server response.
A regular LMTP greeting is sent, and if accepted by the server, the
capabilities it returns are parsed.
DLMTP authentication starts here by announcing the dlmtp_ident in
the LHLO as our hostname. When the ident is accepted and DLMTP
mode is enabled (dspam.conf: ServerMode=dspam|auto), the
DSPAMPROCESSMODE capability is announced by the server.
When this capability is detected, the <DspamClient>.dlmtp flag
will be enabled.
"""
if self.dlmtp_ident is not None:
host = self.dlmtp_ident
else:
host = socket.getfqdn()
self._send('LHLO ' + host + '\r\n')
finished = False
while not finished:
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at LHLO: ' + resp)
if resp[4:20] == 'DSPAMPROCESSMODE':
self.dlmtp = True
logger.debug('Detected DLMTP extension in LHLO response')
if resp[3] == ' ':
# difference between "250-8BITMIME" and "250 SIZE"
finished = True
def mailfrom(self, sender=None, client_args=None):
"""
Send LMTP MAIL FROM command, and process the server response.
In DLMTP mode, the server expects the client to identify itself.
Because the envelope sender is of no importance to DSPAM, the client
is expected to send an identity and a password (dspam.conf:
ServerPass.<ident>="<password>") in stead of the actual sender.
When you need want DSPAM to deliver the message itself and need to
pass the server an actual envelope sender for that, add the
--mail-from parameter in client_args.
When the server is setup in LMTP mode only (dspam.conf:
ServerMode=standard), the envelope sender is a regular envelope
sender, and is re-used when delivering the message after processing.
Client args
===========
When in DLMTP mode (and with proper auth credentials), the server
accepts parameters specified by the client. These are in the form
as they are passed to the command-line 'dspam' program.
See man dspam(1) for details, and the process() or classify() methods
in this class for simple examples.
Args:
sender -- The envelope sender to use in LMTP mode.
client_args -- DSPAM parameters to pass to the server in DLMTP mode.
"""
if sender and client_args:
raise DspamClientError('Arguments are mutually exclusive')
if client_args and not self.dlmtp:
raise DspamClientError(
'Cannot send client args, server does not support DLMTP')
command = 'MAIL FROM:'
if not sender:
if self.dlmtp_ident and self.dlmtp_pass:
sender = self.dlmtp_pass + '@' + self.dlmtp_ident
else:
sender = ''
command = command + '<' + sender + '>'
if client_args:
command = command + ' DSPAMPROCESSMODE="{}"'.format(client_args)
self._send(command + '\r\n')
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at MAIL FROM: ' + resp)
def rcptto(self, recipients):
"""
Send LMTP RCPT TO command, and process the server response.
The DSPAM server expects to find one or more valid DSPAM users as
envelope recipients. The set recipient will be the user DSPAM
processes mail for.
When you need want DSPAM to deliver the message itself, and need to
pass the server an envelope recipient for this that differs from the
DSPAM user account name, use the --rcpt-to parameter in client_args
at mailfrom().
args:
recipients -- A list of recipients
"""
for rcpt in recipients:
self._send('RCPT TO:<{}>\r\n'.format(rcpt))
resp = self._read()
if not resp.startswith('250'):
raise DspamClientError(
'Unexpected server response at RCPT TO for '
'recipient {}: {}'.format(rcpt, resp))
self._recipients.append(rcpt)
def data(self, message):
"""
Send LMTP DATA command and process the server response.
The server response is stored as a list of dicts in
<DspamClient>.results, keyed on the recipient name(s). Depending
on the server return data, different formats are available:
* LMTP mode -- Dict containing 'accepted', a bool indicating
that the message was handed to the server.
* Summary mode -- Dict containing 'username', 'result',
'classification', 'probability', 'confidence'
and 'signature'.
* Stdout mode -- Dict containing 'result' and 'message', the
complete message payload including added headers.
The return data is always parsed and stored, independent of its format.
If you requested a regular LMTP response, but the server
responded with an DLMTP summary, the summary is still stored in
<DspamClient>.results, and you will need to check the result format
yourself and decide whether that was acceptable for your use case.
This is due to the fact that it's possible to configure the server to
return non LMTP responses, even when in LMTP mode (see dspam.conf:
ServerParameters).
Note: while processing response data in stdout mode, it's not possible
to relate the returned messages to a specific recipient, when multiple
recipients were specified in rcptto(). There is no guarantee
that the message stored in <DspamClient>.results['foo'] actually
belongs to the recipient 'foo'. If this relationship needs to be
guaranteed, send each message with a single recipient in rcptto().
args:
message -- The full message payload to pass to the server.
"""
self._send('DATA\r\n')
resp = self._read()
if not resp.startswith('354'):
raise DspamClientError(
'Unexpected server response at DATA: ' + resp)
# Send message payload
for line in message.split('\n'):
if line == '.':
# Dot stuffing
line = '..'
self._send(line)
# Send end-of-data
self._send('.\r\n')
# Depending on server configuration, several responses are possible:
# * Standard LMTP response code, once for each recipient:
# 250 2.6.0 <bar> Message accepted for delivery
# * Summary response (--deliver=summary), once for each recipient:
# X-DSPAM-Result: bar; result="Spam"; class="Spam"; \
# probability=1.0000; confidence=0.85; \
# signature=50c50c0f315636261418125
# (after the last summary line, a single dot is sent)
# * Stdout response (--delivery=stdout), once for each recipient:
# X-Daemon-Classification: INNOCENT
# <complete mail body>
#
# Note that when an unknown recipient is passed in, DSPAM will simply
# deliver the message (dspam.conf: (Un)TrustedDeliveryAgent,
# DeliveryHost) unaltered and unfiltered. The response for unknown
# recipients will still be something indicating 'accepted'.
peek = self._peek(24)
if peek.startswith('250'):
# Response is LTMP formatted
regex = re.compile('250 \d\.\d\.\d <([^>]+)>')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
self.results[rcpt] = {'accepted': True}
logger.debug(
'Message accepted for recipient {} in LMTP mode'.format(
rcpt))
if len(self.results) == len(self._recipients):
finished = True
elif peek.startswith('X-DSPAM-Result:'):
# Response is in summary format
regex = re.compile('X-DSPAM-Result: ([^;]+); result="(\w+)"; '
'class="(\w+)"; probability=([\d\.]+); '
'confidence=([\d\.]+); signature=([\w,/]+)')
finished = False
while not finished:
resp = self._read()
match = regex.match(resp)
if not match:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
rcpt = match.group(1)
# map results to their DSPAM classification result names
fields = ('user', 'result', 'class',
'probability', 'confidence', 'signature')
self.results[rcpt] = dict(zip(fields, match.groups()))
if self.results[rcpt]['signature'] == 'N/A':
del(self.results[rcpt]['signature'])
logger.debug(
'Message handled for recipient {} in DLMTP summary mode, '
'result is {}'.format(rcpt, match.group(2)))
if len(self.results) == len(self._recipients):
# we received responses for all accepted recipients
finished = True
# read final dot
resp = self._read()
if resp != '.':
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
elif peek.startswith('X-Daemon-Classification:'):
# Response is in stdout format
finished = False
message = ''
while not finished:
resp = self._read()
if resp.startswith('X-Daemon-Classification:'):
if message != '':
# A new message body starts, store the previous one
rcpt = self._recipients.pop(0)
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message handled for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
message = ''
# Remember next result
result = resp[25:]
elif resp == '.':
# A single dot can signal end-of-data, or might be just
# regular mail data.
self._socket.setblocking(False)
try:
# If _peek() succeeds, we did not reach end-of-data yet
# so it was message content.
peek = self._peek(1)
message = message + '\r\n' + resp
except socket.error:
# reached end-of-data, store message and finish
finished = True
rcpt = self._recipients.pop(0)
# strip final newline
message = message[0:-2]
self.results[rcpt] = {
'result': result,
'message': message
}
logger.debug(
'Message accepted for recipient {} in DLMTP '
'stdout mode, result is {}, message body '
'is {} chars'.format(rcpt, result, len(message)))
self._socket.setblocking(True)
else:
# regular message data
if message == '':
message = resp
else:
message = message + '\r\n' + resp
else:
raise DspamClientError(
'Unexpected server response at END-OF-DATA: ' + resp)
def rset(self):
"""
Send LMTP RSET command and process the server response.
"""
self._send('RSET\r\n')
resp = self._read()
if not resp.startswith('250'):
logger.warn('Unexpected server response at RSET: ' + resp)
self._recipients = []
self.results = {}
def process(self, message, user):
"""
Process a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--process --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def classify(self, message, user):
"""
Classify a message.
"""
if not self._socket:
self.connect()
self.lhlo()
else:
self.rset()
if not self.dlmtp:
raise DspamClientError('DLMTP mode not available')
self.mailfrom(client_args='--classify --deliver=summary')
self.rcptto((user,))
self.data(message)
# check for valid result format
if 'class' not in self.results[user]:
raise DspamClientError(
'Unexpected response format from server at END-OF-DATA, '
'an error occured')
return self.results[user]
def train(self, message, user, class_):
"""
Train DSPAM with a message.
"""
raise NotImplementedError
def retrain_message(self, message, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
def retrain_signature(self, signature, class_, source='error'):
"""
Correct an invalid classification.
"""
raise NotImplementedError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.