text stringlengths 0 1.05M | meta dict |
|---|---|
#A place for code to be called from C-code
# that implements more complicated stuff.
import re
import sys
#from _mx_datetime_parser import *
if (sys.byteorder == 'little'):
_nbo = '<'
else:
_nbo = '>'
def _makenames_list(adict):
from multiarray import dtype
allfields = []
fnames = adict.keys()
for fname in fnames:
obj = adict[fname]
n = len(obj)
if not isinstance(obj, tuple) or n not in [2,3]:
raise ValueError, "entry not a 2- or 3- tuple"
if (n > 2) and (obj[2] == fname):
continue
num = int(obj[1])
if (num < 0):
raise ValueError, "invalid offset."
format = dtype(obj[0])
if (format.itemsize == 0):
raise ValueError, "all itemsizes must be fixed."
if (n > 2):
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(lambda x,y: cmp(x[2],y[2]))
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
from multiarray import dtype
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if (len(res) > 2):
titles.append(res[2])
else:
titles.append(None)
return dtype({"names" : names,
"formats" : formats,
"offsets" : offsets,
"titles" : titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
from multiarray import METADATA_DTSTR
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.metadata is None:
return descriptor.str
else:
new = descriptor.metadata.copy()
# Eliminate any key related to internal implementation
_ = new.pop(METADATA_DTSTR, None)
return (descriptor.str, new)
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('','|V%d' % num))
offset += num
if len(field) > 3:
name = (field[2],field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibilty.
def _reconstruct(subtype, shape, dtype):
from multiarray import ndarray
return ndarray.__new__(subtype, shape, dtype)
# format_re and _split were taken from numarray by J. Todd Miller
def _split(input):
"""Split the input formats string into field formats without splitting
the tuple used to specify multi-dimensional arrays."""
newlist = []
hold = ''
listinput = input.split(',')
for element in listinput:
if hold != '':
item = hold + ',' + element
else:
item = element
left = item.count('(')
right = item.count(')')
# if the parenthesis is not balanced, hold the string
if left > right :
hold = item
# when balanced, append to the output list and reset the hold
elif left == right:
newlist.append(item.strip())
hold = ''
# too many close parenthesis is unacceptable
else:
raise SyntaxError, item
# if there is string left over in hold
if hold != '':
raise SyntaxError, hold
return newlist
format_datetime = re.compile(r"""(?P<typecode>M8|m8|datetime64|timedelta64)
([[]
((?P<num>\d+)?
(?P<baseunit>Y|M|W|B|D|h|m|s|ms|us|ns|ps|fs|as)
(/(?P<den>\d+))?
[]])
(//(?P<events>\d+))?)?""", re.X)
# Return (baseunit, num, den, events), datetime
# from date-time string
def _datetimestring(astr):
res = format_datetime.match(astr)
if res is None:
raise ValueError, "Incorrect date-time string."
typecode = res.group('typecode')
datetime = (typecode == 'M8' or typecode == 'datetime64')
defaults = ['us', 1, 1, 1]
names = ['baseunit', 'num', 'den', 'events']
func = [str, int, int, int]
dt_tuple = []
for i, name in enumerate(names):
value = res.group(name)
if value:
dt_tuple.append(func[i](value))
else:
dt_tuple.append(defaults[i])
return tuple(dt_tuple), datetime
format_re = re.compile(r'(?P<order1>[<>|=]?)(?P<repeats> *[(]?[ ,0-9]*[)]? *)(?P<order2>[<>|=]?)(?P<dtype>[A-Za-z0-9.]*)')
# astr is a string (perhaps comma separated)
_convorder = {'=': _nbo,
'|': '|',
'>': '>',
'<': '<'}
def _commastring(astr):
res = _split(astr)
if (len(res)) < 1:
raise ValueError, "unrecognized formant"
result = []
for k,item in enumerate(res):
# convert item
try:
(order1, repeats, order2, dtype) = format_re.match(item).groups()
except (TypeError, AttributeError):
raise ValueError('format %s is not recognized' % item)
if order2 == '':
order = order1
elif order1 == '':
order = order2
else:
order1 = _convorder[order1]
order2 = _convorder[order2]
if (order1 != order2):
raise ValueError('in-consistent byte-order specification %s and %s' % (order1, order2))
order = order1
if order in ['|', '=', _nbo]:
order = ''
dtype = '%s%s' % (order, dtype)
if (repeats == ''):
newitem = dtype
else:
newitem = (dtype, eval(repeats))
result.append(newitem)
return result
def _getintp_ctype():
from multiarray import dtype
val = _getintp_ctype.cache
if val is not None:
return val
char = dtype('p').char
import ctypes
if (char == 'i'):
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes(object):
def cast(self, num, obj):
return num
def c_void_p(self, num):
return num
class _ctypes(object):
def __init__(self, array, ptr=None):
try:
import ctypes
self._ctypes = ctypes
except ImportError:
self._ctypes = _missing_ctypes()
self._arr = array
self._data = ptr
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
return self._ctypes.cast(self._data, obj)
def shape_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
def get_data(self):
return self._data
def get_shape(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.shape)
def get_strides(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.strides)
def get_as_parameter(self):
return self._ctypes.c_void_p(self._data)
data = property(get_data, None, doc="c-types data")
shape = property(get_shape, None, doc="c-types shape")
strides = property(get_strides, None, doc="c-types strides")
_as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
# Given a datatype and an order object
# return a new names tuple
# with the order indicated
def _newnames(datatype, order):
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
raise ValueError, "unknown field name: %s" % (name,)
return tuple(list(order) + nameslist)
raise ValueError, "unsupported order value: %s" % (order,)
# Given an array with fields and a sequence of field names
# construct a new array with just those fields copied over
def _index_fields(ary, fields):
from multiarray import empty, dtype
dt = ary.dtype
new_dtype = [(name, dt[name]) for name in dt.names if name in fields]
if ary.flags.f_contiguous:
order = 'F'
else:
order = 'C'
newarray = empty(ary.shape, dtype=new_dtype, order=order)
for name in fields:
newarray[name] = ary[name]
return newarray
| {
"repo_name": "chadnetzer/numpy-gaurdro",
"path": "numpy/core/_internal.py",
"copies": "3",
"size": "10368",
"license": "bsd-3-clause",
"hash": -2971847423534166500,
"line_mean": 28.7077363897,
"line_max": 122,
"alpha_frac": 0.5529513889,
"autogenerated": false,
"ratio": 3.8033749082905355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5856326297190536,
"avg_score": null,
"num_lines": null
} |
#A place for code to be called from C-code
# that implements more complicated stuff.
import re
import sys
from _mx_datetime_parser import *
if (sys.byteorder == 'little'):
_nbo = '<'
else:
_nbo = '>'
def _makenames_list(adict):
from multiarray import dtype
allfields = []
fnames = adict.keys()
for fname in fnames:
obj = adict[fname]
n = len(obj)
if not isinstance(obj, tuple) or n not in [2,3]:
raise ValueError, "entry not a 2- or 3- tuple"
if (n > 2) and (obj[2] == fname):
continue
num = int(obj[1])
if (num < 0):
raise ValueError, "invalid offset."
format = dtype(obj[0])
if (format.itemsize == 0):
raise ValueError, "all itemsizes must be fixed."
if (n > 2):
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(lambda x,y: cmp(x[2],y[2]))
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
from multiarray import dtype
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if (len(res) > 2):
titles.append(res[2])
else:
titles.append(None)
return dtype({"names" : names,
"formats" : formats,
"offsets" : offsets,
"titles" : titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
from multiarray import METADATA_DTSTR
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.metadata is None:
return descriptor.str
else:
new = descriptor.metadata.copy()
# Eliminate any key related to internal implementation
_ = new.pop(METADATA_DTSTR, None)
return (descriptor.str, new)
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('','|V%d' % num))
offset += num
if len(field) > 3:
name = (field[2],field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibilty.
def _reconstruct(subtype, shape, dtype):
from multiarray import ndarray
return ndarray.__new__(subtype, shape, dtype)
# format_re and _split were taken from numarray by J. Todd Miller
def _split(input):
"""Split the input formats string into field formats without splitting
the tuple used to specify multi-dimensional arrays."""
newlist = []
hold = ''
listinput = input.split(',')
for element in listinput:
if hold != '':
item = hold + ',' + element
else:
item = element
left = item.count('(')
right = item.count(')')
# if the parenthesis is not balanced, hold the string
if left > right :
hold = item
# when balanced, append to the output list and reset the hold
elif left == right:
newlist.append(item.strip())
hold = ''
# too many close parenthesis is unacceptable
else:
raise SyntaxError, item
# if there is string left over in hold
if hold != '':
raise SyntaxError, hold
return newlist
format_datetime = re.compile(r"""(?P<typecode>M8|m8|datetime64|timedelta64)
([[]
((?P<num>\d+)?
(?P<baseunit>Y|M|W|B|D|h|m|s|ms|us|ns|ps|fs|as)
(/(?P<den>\d+))?
[]])
(//(?P<events>\d+))?)?""", re.X)
# Return (baseunit, num, den, events), datetime
# from date-time string
def _datetimestring(astr):
res = format_datetime.match(astr)
if res is None:
raise ValueError, "Incorrect date-time string."
typecode = res.group('typecode')
datetime = (typecode == 'M8' or typecode == 'datetime64')
defaults = ['us', 1, 1, 1]
names = ['baseunit', 'num', 'den', 'events']
func = [str, int, int, int]
dt_tuple = []
for i, name in enumerate(names):
value = res.group(name)
if value:
dt_tuple.append(func[i](value))
else:
dt_tuple.append(defaults[i])
return tuple(dt_tuple), datetime
format_re = re.compile(r'(?P<order1>[<>|=]?)(?P<repeats> *[(]?[ ,0-9]*[)]? *)(?P<order2>[<>|=]?)(?P<dtype>[A-Za-z0-9.]*)')
# astr is a string (perhaps comma separated)
_convorder = {'=': _nbo,
'|': '|',
'>': '>',
'<': '<'}
def _commastring(astr):
res = _split(astr)
if (len(res)) < 1:
raise ValueError, "unrecognized formant"
result = []
for k,item in enumerate(res):
# convert item
try:
(order1, repeats, order2, dtype) = format_re.match(item).groups()
except (TypeError, AttributeError):
raise ValueError('format %s is not recognized' % item)
if order2 == '':
order = order1
elif order1 == '':
order = order2
else:
order1 = _convorder[order1]
order2 = _convorder[order2]
if (order1 != order2):
raise ValueError('in-consistent byte-order specification %s and %s' % (order1, order2))
order = order1
if order in ['|', '=', _nbo]:
order = ''
dtype = '%s%s' % (order, dtype)
if (repeats == ''):
newitem = dtype
else:
newitem = (dtype, eval(repeats))
result.append(newitem)
return result
def _getintp_ctype():
from multiarray import dtype
val = _getintp_ctype.cache
if val is not None:
return val
char = dtype('p').char
import ctypes
if (char == 'i'):
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes(object):
def cast(self, num, obj):
return num
def c_void_p(self, num):
return num
class _ctypes(object):
def __init__(self, array, ptr=None):
try:
import ctypes
self._ctypes = ctypes
except ImportError:
self._ctypes = _missing_ctypes()
self._arr = array
self._data = ptr
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
return self._ctypes.cast(self._data, obj)
def shape_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
def get_data(self):
return self._data
def get_shape(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.shape)
def get_strides(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.strides)
def get_as_parameter(self):
return self._ctypes.c_void_p(self._data)
data = property(get_data, None, doc="c-types data")
shape = property(get_shape, None, doc="c-types shape")
strides = property(get_strides, None, doc="c-types strides")
_as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
# Given a datatype and an order object
# return a new names tuple
# with the order indicated
def _newnames(datatype, order):
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
raise ValueError, "unknown field name: %s" % (name,)
return tuple(list(order) + nameslist)
raise ValueError, "unsupported order value: %s" % (order,)
# Given an array with fields and a sequence of field names
# construct a new array with just those fields copied over
def _index_fields(ary, fields):
from multiarray import empty, dtype
dt = ary.dtype
new_dtype = [(name, dt[name]) for name in dt.names if name in fields]
if ary.flags.f_contiguous:
order = 'F'
else:
order = 'C'
newarray = empty(ary.shape, dtype=new_dtype, order=order)
for name in fields:
newarray[name] = ary[name]
return newarray
| {
"repo_name": "illume/numpy3k",
"path": "numpy/core/_internal.py",
"copies": "1",
"size": "10367",
"license": "bsd-3-clause",
"hash": 9003052224486059000,
"line_mean": 28.7048710602,
"line_max": 122,
"alpha_frac": 0.5530047265,
"autogenerated": false,
"ratio": 3.8044036697247705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48574083962247705,
"avg_score": null,
"num_lines": null
} |
#A place for code to be called from C-code
# that implements more complicated stuff.
import re
import sys
import warnings
from numpy.compat import asbytes, bytes
if (sys.byteorder == 'little'):
_nbo = asbytes('<')
else:
_nbo = asbytes('>')
def _makenames_list(adict, align):
from multiarray import dtype
allfields = []
fnames = adict.keys()
for fname in fnames:
obj = adict[fname]
n = len(obj)
if not isinstance(obj, tuple) or n not in [2,3]:
raise ValueError("entry not a 2- or 3- tuple")
if (n > 2) and (obj[2] == fname):
continue
num = int(obj[1])
if (num < 0):
raise ValueError("invalid offset.")
format = dtype(obj[0], align=align)
if (format.itemsize == 0):
raise ValueError("all itemsizes must be fixed.")
if (n > 2):
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(key=lambda x: x[2])
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
from multiarray import dtype
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict, align)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if (len(res) > 2):
titles.append(res[2])
else:
titles.append(None)
return dtype({"names" : names,
"formats" : formats,
"offsets" : offsets,
"titles" : titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.metadata is None:
return descriptor.str
else:
new = descriptor.metadata.copy()
if new:
return (descriptor.str, new)
else:
return descriptor.str
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('','|V%d' % num))
offset += num
if len(field) > 3:
name = (field[2],field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibilty.
def _reconstruct(subtype, shape, dtype):
from multiarray import ndarray
return ndarray.__new__(subtype, shape, dtype)
# format_re was originally from numarray by J. Todd Miller
format_re = re.compile(asbytes(
r'(?P<order1>[<>|=]?)'
r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
r'(?P<order2>[<>|=]?)'
r'(?P<dtype>[A-Za-z0-9.]*(?:\[[a-zA-Z0-9,.]+\])?)'))
sep_re = re.compile(asbytes(r'\s*,\s*'))
space_re = re.compile(asbytes(r'\s+$'))
# astr is a string (perhaps comma separated)
_convorder = {asbytes('='): _nbo}
def _commastring(astr):
startindex = 0
result = []
while startindex < len(astr):
mo = format_re.match(astr, pos=startindex)
try:
(order1, repeats, order2, dtype) = mo.groups()
except (TypeError, AttributeError):
raise ValueError('format number %d of "%s" is not recognized' %
(len(result)+1, astr))
startindex = mo.end()
# Separator or ending padding
if startindex < len(astr):
if space_re.match(astr, pos=startindex):
startindex = len(astr)
else:
mo = sep_re.match(astr, pos=startindex)
if not mo:
raise ValueError(
'format number %d of "%s" is not recognized' %
(len(result)+1, astr))
startindex = mo.end()
if order2 == asbytes(''):
order = order1
elif order1 == asbytes(''):
order = order2
else:
order1 = _convorder.get(order1, order1)
order2 = _convorder.get(order2, order2)
if (order1 != order2):
raise ValueError('inconsistent byte-order specification %s and %s' % (order1, order2))
order = order1
if order in [asbytes('|'), asbytes('='), _nbo]:
order = asbytes('')
dtype = order + dtype
if (repeats == asbytes('')):
newitem = dtype
else:
newitem = (dtype, eval(repeats))
result.append(newitem)
return result
def _getintp_ctype():
from multiarray import dtype
val = _getintp_ctype.cache
if val is not None:
return val
char = dtype('p').char
import ctypes
if (char == 'i'):
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes(object):
def cast(self, num, obj):
return num
def c_void_p(self, num):
return num
class _ctypes(object):
def __init__(self, array, ptr=None):
try:
import ctypes
self._ctypes = ctypes
except ImportError:
self._ctypes = _missing_ctypes()
self._arr = array
self._data = ptr
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
return self._ctypes.cast(self._data, obj)
def shape_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
def get_data(self):
return self._data
def get_shape(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.shape)
def get_strides(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.strides)
def get_as_parameter(self):
return self._ctypes.c_void_p(self._data)
data = property(get_data, None, doc="c-types data")
shape = property(get_shape, None, doc="c-types shape")
strides = property(get_strides, None, doc="c-types strides")
_as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
# Given a datatype and an order object
# return a new names tuple
# with the order indicated
def _newnames(datatype, order):
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
raise ValueError("unknown field name: %s" % (name,))
return tuple(list(order) + nameslist)
raise ValueError("unsupported order value: %s" % (order,))
# Given an array with fields and a sequence of field names
# construct a new array with just those fields copied over
def _index_fields(ary, fields):
from multiarray import empty, dtype
dt = ary.dtype
names = [name for name in fields if name in dt.names]
formats = [dt.fields[name][0] for name in fields if name in dt.names]
offsets = [dt.fields[name][1] for name in fields if name in dt.names]
view_dtype = {'names':names, 'formats':formats, 'offsets':offsets, 'itemsize':dt.itemsize}
view = ary.view(dtype=view_dtype)
return view.copy()
# Given a string containing a PEP 3118 format specifier,
# construct a Numpy dtype
_pep3118_native_map = {
'?': '?',
'b': 'b',
'B': 'B',
'h': 'h',
'H': 'H',
'i': 'i',
'I': 'I',
'l': 'l',
'L': 'L',
'q': 'q',
'Q': 'Q',
'e': 'e',
'f': 'f',
'd': 'd',
'g': 'g',
'Zf': 'F',
'Zd': 'D',
'Zg': 'G',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
_pep3118_standard_map = {
'?': '?',
'b': 'b',
'B': 'B',
'h': 'i2',
'H': 'u2',
'i': 'i4',
'I': 'u4',
'l': 'i4',
'L': 'u4',
'q': 'i8',
'Q': 'u8',
'e': 'f2',
'f': 'f',
'd': 'd',
'Zf': 'F',
'Zd': 'D',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False):
from numpy.core.multiarray import dtype
fields = {}
offset = 0
explicit_name = False
this_explicit_name = False
common_alignment = 1
is_padding = False
last_offset = 0
dummy_name_index = [0]
def next_dummy_name():
dummy_name_index[0] += 1
def get_dummy_name():
while True:
name = 'f%d' % dummy_name_index[0]
if name not in fields:
return name
next_dummy_name()
# Parse spec
while spec:
value = None
# End of structure, bail out to upper level
if spec[0] == '}':
spec = spec[1:]
break
# Sub-arrays (1)
shape = None
if spec[0] == '(':
j = spec.index(')')
shape = tuple(map(int, spec[1:j].split(',')))
spec = spec[j+1:]
# Byte order
if spec[0] in ('@', '=', '<', '>', '^', '!'):
byteorder = spec[0]
if byteorder == '!':
byteorder = '>'
spec = spec[1:]
# Byte order characters also control native vs. standard type sizes
if byteorder in ('@', '^'):
type_map = _pep3118_native_map
type_map_chars = _pep3118_native_typechars
else:
type_map = _pep3118_standard_map
type_map_chars = _pep3118_standard_typechars
# Item sizes
itemsize = 1
if spec[0].isdigit():
j = 1
for j in xrange(1, len(spec)):
if not spec[j].isdigit():
break
itemsize = int(spec[:j])
spec = spec[j:]
# Data types
is_padding = False
if spec[:2] == 'T{':
value, spec, align, next_byteorder = _dtype_from_pep3118(
spec[2:], byteorder=byteorder, is_subdtype=True)
elif spec[0] in type_map_chars:
next_byteorder = byteorder
if spec[0] == 'Z':
j = 2
else:
j = 1
typechar = spec[:j]
spec = spec[j:]
is_padding = (typechar == 'x')
dtypechar = type_map[typechar]
if dtypechar in 'USV':
dtypechar += '%d' % itemsize
itemsize = 1
numpy_byteorder = {'@': '=', '^': '='}.get(byteorder, byteorder)
value = dtype(numpy_byteorder + dtypechar)
align = value.alignment
else:
raise ValueError("Unknown PEP 3118 data type specifier %r" % spec)
#
# Native alignment may require padding
#
# Here we assume that the presence of a '@' character implicitly implies
# that the start of the array is *already* aligned.
#
extra_offset = 0
if byteorder == '@':
start_padding = (-offset) % align
intra_padding = (-value.itemsize) % align
offset += start_padding
if intra_padding != 0:
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
# Inject internal padding to the end of the sub-item
value = _add_trailing_padding(value, intra_padding)
else:
# We can postpone the injection of internal padding,
# as the item appears at most once
extra_offset += intra_padding
# Update common alignment
common_alignment = (align*common_alignment
/ _gcd(align, common_alignment))
# Convert itemsize to sub-array
if itemsize != 1:
value = dtype((value, (itemsize,)))
# Sub-arrays (2)
if shape is not None:
value = dtype((value, shape))
# Field name
this_explicit_name = False
if spec and spec.startswith(':'):
i = spec[1:].index(':') + 1
name = spec[1:i]
spec = spec[i+1:]
explicit_name = True
this_explicit_name = True
else:
name = get_dummy_name()
if not is_padding or this_explicit_name:
if name in fields:
raise RuntimeError("Duplicate field name '%s' in PEP3118 format"
% name)
fields[name] = (value, offset)
last_offset = offset
if not this_explicit_name:
next_dummy_name()
byteorder = next_byteorder
offset += value.itemsize
offset += extra_offset
# Check if this was a simple 1-item type
if len(fields.keys()) == 1 and not explicit_name and fields['f0'][1] == 0 \
and not is_subdtype:
ret = fields['f0'][0]
else:
ret = dtype(fields)
# Trailing padding must be explicitly added
padding = offset - ret.itemsize
if byteorder == '@':
padding += (-offset) % common_alignment
if is_padding and not this_explicit_name:
ret = _add_trailing_padding(ret, padding)
# Finished
if is_subdtype:
return ret, spec, common_alignment, byteorder
else:
return ret
def _add_trailing_padding(value, padding):
"""Inject the specified number of padding bytes at the end of a dtype"""
from numpy.core.multiarray import dtype
if value.fields is None:
vfields = {'f0': (value, 0)}
else:
vfields = dict(value.fields)
if value.names and value.names[-1] == '' and \
value[''].char == 'V':
# A trailing padding field is already present
vfields[''] = ('V%d' % (vfields[''][0].itemsize + padding),
vfields[''][1])
value = dtype(vfields)
else:
# Get a free name for the padding field
j = 0
while True:
name = 'pad%d' % j
if name not in vfields:
vfields[name] = ('V%d' % padding, value.itemsize)
break
j += 1
value = dtype(vfields)
if '' not in vfields:
# Strip out the name of the padding field
names = list(value.names)
names[-1] = ''
value.names = tuple(names)
return value
def _prod(a):
p = 1
for x in a:
p *= x
return p
def _gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
a, b = b, a%b
return a
| {
"repo_name": "pelson/numpy",
"path": "numpy/core/_internal.py",
"copies": "1",
"size": "16554",
"license": "bsd-3-clause",
"hash": 7702190861246649000,
"line_mean": 28.4555160142,
"line_max": 102,
"alpha_frac": 0.521444968,
"autogenerated": false,
"ratio": 3.7872340425531914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4808679010553191,
"avg_score": null,
"num_lines": null
} |
#A place for code to be called from C-code
# that implements more complicated stuff.
import re
import sys
from numpy.compat import asbytes, bytes
if (sys.byteorder == 'little'):
_nbo = asbytes('<')
else:
_nbo = asbytes('>')
def _makenames_list(adict):
from multiarray import dtype
allfields = []
fnames = adict.keys()
for fname in fnames:
obj = adict[fname]
n = len(obj)
if not isinstance(obj, tuple) or n not in [2,3]:
raise ValueError("entry not a 2- or 3- tuple")
if (n > 2) and (obj[2] == fname):
continue
num = int(obj[1])
if (num < 0):
raise ValueError("invalid offset.")
format = dtype(obj[0])
if (format.itemsize == 0):
raise ValueError("all itemsizes must be fixed.")
if (n > 2):
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(key=lambda x: x[2])
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
from multiarray import dtype
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if (len(res) > 2):
titles.append(res[2])
else:
titles.append(None)
return dtype({"names" : names,
"formats" : formats,
"offsets" : offsets,
"titles" : titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
from multiarray import METADATA_DTSTR
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.metadata is None:
return descriptor.str
else:
new = descriptor.metadata.copy()
# Eliminate any key related to internal implementation
_ = new.pop(METADATA_DTSTR, None)
return (descriptor.str, new)
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('','|V%d' % num))
offset += num
if len(field) > 3:
name = (field[2],field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibilty.
def _reconstruct(subtype, shape, dtype):
from multiarray import ndarray
return ndarray.__new__(subtype, shape, dtype)
# format_re and _split were taken from numarray by J. Todd Miller
def _split(input):
"""Split the input formats string into field formats without splitting
the tuple used to specify multi-dimensional arrays."""
newlist = []
hold = asbytes('')
listinput = input.split(asbytes(','))
for element in listinput:
if hold != asbytes(''):
item = hold + asbytes(',') + element
else:
item = element
left = item.count(asbytes('('))
right = item.count(asbytes(')'))
# if the parenthesis is not balanced, hold the string
if left > right :
hold = item
# when balanced, append to the output list and reset the hold
elif left == right:
newlist.append(item.strip())
hold = asbytes('')
# too many close parenthesis is unacceptable
else:
raise SyntaxError(item)
# if there is string left over in hold
if hold != asbytes(''):
raise SyntaxError(hold)
return newlist
format_datetime = re.compile(asbytes(r"""
(?P<typecode>M8|m8|datetime64|timedelta64)
([[]
((?P<num>\d+)?
(?P<baseunit>Y|M|W|B|D|h|m|s|ms|us|ns|ps|fs|as)
(/(?P<den>\d+))?
[]])
(//(?P<events>\d+))?)?"""), re.X)
# Return (baseunit, num, den, events), datetime
# from date-time string
def _datetimestring(astr):
res = format_datetime.match(astr)
if res is None:
raise ValueError("Incorrect date-time string.")
typecode = res.group('typecode')
datetime = (typecode == asbytes('M8') or typecode == asbytes('datetime64'))
defaults = [asbytes('us'), 1, 1, 1]
names = ['baseunit', 'num', 'den', 'events']
func = [bytes, int, int, int]
dt_tuple = []
for i, name in enumerate(names):
value = res.group(name)
if value:
dt_tuple.append(func[i](value))
else:
dt_tuple.append(defaults[i])
return tuple(dt_tuple), datetime
format_re = re.compile(asbytes(r'(?P<order1>[<>|=]?)(?P<repeats> *[(]?[ ,0-9]*[)]? *)(?P<order2>[<>|=]?)(?P<dtype>[A-Za-z0-9.]*)'))
# astr is a string (perhaps comma separated)
_convorder = {asbytes('='): _nbo}
def _commastring(astr):
res = _split(astr)
if (len(res)) < 1:
raise ValueError("unrecognized formant")
result = []
for k,item in enumerate(res):
# convert item
try:
(order1, repeats, order2, dtype) = format_re.match(item).groups()
except (TypeError, AttributeError):
raise ValueError('format %s is not recognized' % item)
if order2 == asbytes(''):
order = order1
elif order1 == asbytes(''):
order = order2
else:
order1 = _convorder.get(order1, order1)
order2 = _convorder.get(order2, order2)
if (order1 != order2):
raise ValueError('in-consistent byte-order specification %s and %s' % (order1, order2))
order = order1
if order in [asbytes('|'), asbytes('='), _nbo]:
order = asbytes('')
dtype = order + dtype
if (repeats == asbytes('')):
newitem = dtype
else:
newitem = (dtype, eval(repeats))
result.append(newitem)
return result
def _getintp_ctype():
from multiarray import dtype
val = _getintp_ctype.cache
if val is not None:
return val
char = dtype('p').char
import ctypes
if (char == 'i'):
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes(object):
def cast(self, num, obj):
return num
def c_void_p(self, num):
return num
class _ctypes(object):
def __init__(self, array, ptr=None):
try:
import ctypes
self._ctypes = ctypes
except ImportError:
self._ctypes = _missing_ctypes()
self._arr = array
self._data = ptr
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
return self._ctypes.cast(self._data, obj)
def shape_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
def get_data(self):
return self._data
def get_shape(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.shape)
def get_strides(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.strides)
def get_as_parameter(self):
return self._ctypes.c_void_p(self._data)
data = property(get_data, None, doc="c-types data")
shape = property(get_shape, None, doc="c-types shape")
strides = property(get_strides, None, doc="c-types strides")
_as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
# Given a datatype and an order object
# return a new names tuple
# with the order indicated
def _newnames(datatype, order):
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
raise ValueError("unknown field name: %s" % (name,))
return tuple(list(order) + nameslist)
raise ValueError("unsupported order value: %s" % (order,))
# Given an array with fields and a sequence of field names
# construct a new array with just those fields copied over
def _index_fields(ary, fields):
from multiarray import empty, dtype
dt = ary.dtype
new_dtype = [(name, dt[name]) for name in dt.names if name in fields]
if ary.flags.f_contiguous:
order = 'F'
else:
order = 'C'
newarray = empty(ary.shape, dtype=new_dtype, order=order)
for name in fields:
newarray[name] = ary[name]
return newarray
# Given a string containing a PEP 3118 format specifier,
# construct a Numpy dtype
_pep3118_native_map = {
'?': '?',
'b': 'b',
'B': 'B',
'h': 'h',
'H': 'H',
'i': 'i',
'I': 'I',
'l': 'l',
'L': 'L',
'q': 'q',
'Q': 'Q',
'f': 'f',
'd': 'd',
'g': 'g',
'Zf': 'F',
'Zd': 'D',
'Zg': 'G',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
_pep3118_standard_map = {
'?': '?',
'b': 'b',
'B': 'B',
'h': 'i2',
'H': 'u2',
'i': 'i4',
'I': 'u4',
'l': 'i4',
'L': 'u4',
'q': 'i8',
'Q': 'u8',
'f': 'f',
'd': 'd',
'Zf': 'F',
'Zd': 'D',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False):
from numpy.core.multiarray import dtype
fields = {}
offset = 0
explicit_name = False
this_explicit_name = False
common_alignment = 1
is_padding = False
last_offset = 0
dummy_name_index = [0]
def next_dummy_name():
dummy_name_index[0] += 1
def get_dummy_name():
while True:
name = 'f%d' % dummy_name_index[0]
if name not in fields:
return name
next_dummy_name()
# Parse spec
while spec:
value = None
# End of structure, bail out to upper level
if spec[0] == '}':
spec = spec[1:]
break
# Sub-arrays (1)
shape = None
if spec[0] == '(':
j = spec.index(')')
shape = tuple(map(int, spec[1:j].split(',')))
spec = spec[j+1:]
# Byte order
if spec[0] in ('@', '=', '<', '>', '^', '!'):
byteorder = spec[0]
if byteorder == '!':
byteorder = '>'
spec = spec[1:]
# Byte order characters also control native vs. standard type sizes
if byteorder in ('@', '^'):
type_map = _pep3118_native_map
type_map_chars = _pep3118_native_typechars
else:
type_map = _pep3118_standard_map
type_map_chars = _pep3118_standard_typechars
# Item sizes
itemsize = 1
if spec[0].isdigit():
j = 1
for j in xrange(1, len(spec)):
if not spec[j].isdigit():
break
itemsize = int(spec[:j])
spec = spec[j:]
# Data types
is_padding = False
if spec[:2] == 'T{':
value, spec, align, next_byteorder = _dtype_from_pep3118(
spec[2:], byteorder=byteorder, is_subdtype=True)
elif spec[0] in type_map_chars:
next_byteorder = byteorder
if spec[0] == 'Z':
j = 2
else:
j = 1
typechar = spec[:j]
spec = spec[j:]
is_padding = (typechar == 'x')
dtypechar = type_map[typechar]
if dtypechar in 'USV':
dtypechar += '%d' % itemsize
itemsize = 1
numpy_byteorder = {'@': '=', '^': '='}.get(byteorder, byteorder)
value = dtype(numpy_byteorder + dtypechar)
align = value.alignment
else:
raise ValueError("Unknown PEP 3118 data type specifier %r" % spec)
#
# Native alignment may require padding
#
# Here we assume that the presence of a '@' character implicitly implies
# that the start of the array is *already* aligned.
#
extra_offset = 0
if byteorder == '@':
start_padding = (-offset) % align
intra_padding = (-value.itemsize) % align
offset += start_padding
if intra_padding != 0:
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
# Inject internal padding to the end of the sub-item
value = _add_trailing_padding(value, intra_padding)
else:
# We can postpone the injection of internal padding,
# as the item appears at most once
extra_offset += intra_padding
# Update common alignment
common_alignment = (align*common_alignment
/ _gcd(align, common_alignment))
# Convert itemsize to sub-array
if itemsize != 1:
value = dtype((value, (itemsize,)))
# Sub-arrays (2)
if shape is not None:
value = dtype((value, shape))
# Field name
this_explicit_name = False
if spec and spec.startswith(':'):
i = spec[1:].index(':') + 1
name = spec[1:i]
spec = spec[i+1:]
explicit_name = True
this_explicit_name = True
else:
name = get_dummy_name()
if not is_padding or this_explicit_name:
if name in fields:
raise RuntimeError("Duplicate field name '%s' in PEP3118 format"
% name)
fields[name] = (value, offset)
last_offset = offset
if not this_explicit_name:
next_dummy_name()
byteorder = next_byteorder
offset += value.itemsize
offset += extra_offset
# Check if this was a simple 1-item type
if len(fields.keys()) == 1 and not explicit_name and fields['f0'][1] == 0 \
and not is_subdtype:
ret = fields['f0'][0]
else:
ret = dtype(fields)
# Trailing padding must be explicitly added
padding = offset - ret.itemsize
if byteorder == '@':
padding += (-offset) % common_alignment
if is_padding and not this_explicit_name:
ret = _add_trailing_padding(ret, padding)
# Finished
if is_subdtype:
return ret, spec, common_alignment, byteorder
else:
return ret
def _add_trailing_padding(value, padding):
"""Inject the specified number of padding bytes at the end of a dtype"""
from numpy.core.multiarray import dtype
if value.fields is None:
vfields = {'f0': (value, 0)}
else:
vfields = dict(value.fields)
if value.names and value.names[-1] == '' and \
value[''].char == 'V':
# A trailing padding field is already present
vfields[''] = ('V%d' % (vfields[''][0].itemsize + padding),
vfields[''][1])
value = dtype(vfields)
else:
# Get a free name for the padding field
j = 0
while True:
name = 'pad%d' % j
if name not in vfields:
vfields[name] = ('V%d' % padding, value.itemsize)
break
j += 1
value = dtype(vfields)
if '' not in vfields:
# Strip out the name of the padding field
names = list(value.names)
names[-1] = ''
value.names = tuple(names)
return value
def _prod(a):
p = 1
for x in a:
p *= x
return p
def _gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
a, b = b, a%b
return a
| {
"repo_name": "dagss/numpy_svn",
"path": "numpy/core/_internal.py",
"copies": "2",
"size": "17690",
"license": "bsd-3-clause",
"hash": 3303125416066148400,
"line_mean": 28,
"line_max": 131,
"alpha_frac": 0.5335217637,
"autogenerated": false,
"ratio": 3.765432098765432,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5298953862465432,
"avg_score": null,
"num_lines": null
} |
#A place for code to be called from C-code
# that implements more complicated stuff.
import re
import sys
if (sys.byteorder == 'little'):
_nbo = '<'
else:
_nbo = '>'
def _makenames_list(adict):
from multiarray import dtype
allfields = []
fnames = adict.keys()
for fname in fnames:
obj = adict[fname]
n = len(obj)
if not isinstance(obj, tuple) or n not in [2,3]:
raise ValueError, "entry not a 2- or 3- tuple"
if (n > 2) and (obj[2] == fname):
continue
num = int(obj[1])
if (num < 0):
raise ValueError, "invalid offset."
format = dtype(obj[0])
if (format.itemsize == 0):
raise ValueError, "all itemsizes must be fixed."
if (n > 2):
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(lambda x,y: cmp(x[2],y[2]))
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
from multiarray import dtype
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if (len(res) > 2):
titles.append(res[2])
else:
titles.append(None)
return dtype({"names" : names,
"formats" : formats,
"offsets" : offsets,
"titles" : titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
return descriptor.str
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('','|V%d' % num))
offset += num
if len(field) > 3:
name = (field[2],field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibilty.
def _reconstruct(subtype, shape, dtype):
from multiarray import ndarray
return ndarray.__new__(subtype, shape, dtype)
# format_re and _split were taken from numarray by J. Todd Miller
def _split(input):
"""Split the input formats string into field formats without splitting
the tuple used to specify multi-dimensional arrays."""
newlist = []
hold = ''
listinput = input.split(',')
for element in listinput:
if hold != '':
item = hold + ',' + element
else:
item = element
left = item.count('(')
right = item.count(')')
# if the parenthesis is not balanced, hold the string
if left > right :
hold = item
# when balanced, append to the output list and reset the hold
elif left == right:
newlist.append(item.strip())
hold = ''
# too many close parenthesis is unacceptable
else:
raise SyntaxError, item
# if there is string left over in hold
if hold != '':
raise SyntaxError, hold
return newlist
format_re = re.compile(r'(?P<order1>[<>|=]?)(?P<repeats> *[(]?[ ,0-9]*[)]? *)(?P<order2>[<>|=]?)(?P<dtype>[A-Za-z0-9.]*)')
# astr is a string (perhaps comma separated)
_convorder = {'=': _nbo,
'|': '|',
'>': '>',
'<': '<'}
def _commastring(astr):
res = _split(astr)
if (len(res)) < 1:
raise ValueError, "unrecognized formant"
result = []
for k,item in enumerate(res):
# convert item
try:
(order1, repeats, order2, dtype) = format_re.match(item).groups()
except (TypeError, AttributeError):
raise ValueError('format %s is not recognized' % item)
if order2 == '':
order = order1
elif order1 == '':
order = order2
else:
order1 = _convorder[order1]
order2 = _convorder[order2]
if (order1 != order2):
raise ValueError('in-consistent byte-order specification %s and %s' % (order1, order2))
order = order1
if order in ['|', '=', _nbo]:
order = ''
dtype = '%s%s' % (order, dtype)
if (repeats == ''):
newitem = dtype
else:
newitem = (dtype, eval(repeats))
result.append(newitem)
return result
def _getintp_ctype():
from multiarray import dtype
val = _getintp_ctype.cache
if val is not None:
return val
char = dtype('p').char
import ctypes
if (char == 'i'):
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes(object):
def cast(self, num, obj):
return num
def c_void_p(self, num):
return num
class _ctypes(object):
def __init__(self, array, ptr=None):
try:
import ctypes
self._ctypes = ctypes
except ImportError:
self._ctypes = _missing_ctypes()
self._arr = array
self._data = ptr
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
return self._ctypes.cast(self._data, obj)
def shape_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
def get_data(self):
return self._data
def get_shape(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.shape)
def get_strides(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.strides)
def get_as_parameter(self):
return self._ctypes.c_void_p(self._data)
data = property(get_data, None, doc="c-types data")
shape = property(get_shape, None, doc="c-types shape")
strides = property(get_strides, None, doc="c-types strides")
_as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
# Given a datatype and an order object
# return a new names tuple
# with the order indicated
def _newnames(datatype, order):
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
raise ValueError, "unknown field name: %s" % (name,)
return tuple(list(order) + nameslist)
raise ValueError, "unsupported order value: %s" % (order,)
| {
"repo_name": "houseind/robothon",
"path": "GlyphProofer/dist/GlyphProofer.app/Contents/Resources/lib/python2.6/numpy/core/_internal.py",
"copies": "1",
"size": "8420",
"license": "mit",
"hash": -771053260792686000,
"line_mean": 27.6394557823,
"line_max": 122,
"alpha_frac": 0.5573634204,
"autogenerated": false,
"ratio": 3.8099547511312215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48673181715312214,
"avg_score": null,
"num_lines": null
} |
"""A place for useful functions and classes that don't have a home."""
import json
import yaml
import logging
import os
import csv
from datapackage import DataPackage
from os.path import isfile, join
from petl import fromdicts, look, fromcsv, dicts
from slugify import slugify
from .config import (
CODELISTS_DIR,
FISCAL_SCHEMA_FILE,
FISCAL_METADATA_FILE,
FISCAL_MODEL_FILE,
STATUS_FILE,
GEOCODES_FILE,
VERBOSE,
LOG_SAMPLE_SIZE,
JSON_FORMAT,
DATA_DIR, PROCESSORS_DIR)
def format_to_json(blob):
return json.dumps(blob, **JSON_FORMAT)
def sanitize_field_names(raw_fields):
"""Return the field name without redundant blanks and line breaks."""
clean_fields = []
for raw_field in raw_fields:
if raw_field:
tokens = raw_field.split()
clean_field = ' '.join(tokens)
clean_fields.append(clean_field)
return clean_fields
def get_nuts_codes():
"""Return a list of valid NUTS codes."""
with open(GEOCODES_FILE) as stream:
lines = csv.DictReader(stream)
geocodes = []
for i, line in enumerate(lines):
# The first line has an empty NUTS-code
if i > 0:
geocode = line['NUTS-Code']
geocodes.append(geocode)
logging.debug('Loaded %d NUTS geocodes', len(geocodes))
return tuple(geocodes)
GEOCODES = list(dicts(fromcsv(GEOCODES_FILE)))
def get_all_codelists():
"""Return all codelists as a dictionary of dictionaries."""
codelists = {}
for codelist_file in os.listdir(CODELISTS_DIR):
codelist_name, _ = os.path.splitext(codelist_file)
codelist = get_codelist(codelist_name)
codelists.update({codelist_name: codelist})
return codelists
def get_codelist(codelist_file):
"""Return one codelist as a dictionary."""
filepath = os.path.join(CODELISTS_DIR, codelist_file + '.yaml')
with open(filepath) as stream:
text = stream.read()
return yaml.load(text)
def get_fiscal_datapackage(skip_validation=False, source=None):
"""Create the master fiscal datapackage from parts."""
with open(FISCAL_METADATA_FILE) as stream:
fiscal_datapackage = yaml.load(stream.read())
if source:
datapackage = source
datapackage['name'] = slugify(os.getcwd().lstrip(DATA_DIR)).lower()
else:
datapackage = fiscal_datapackage
with open(FISCAL_SCHEMA_FILE) as stream:
schema = yaml.load(stream.read())
datapackage['resources'][0]['schema'] = schema
datapackage['resources'][0].update(mediatype='text/csv')
datapackage['resources'] = [datapackage['resources'][0]]
# TODO: Update the resource properties in the fiscal data-package
with open(FISCAL_MODEL_FILE) as stream:
datapackage['model'] = yaml.load(stream.read())
if not skip_validation:
DataPackage(datapackage, schema='fiscal').validate()
return datapackage
def get_fiscal_field_names():
"""Return the list of fiscal fields names."""
with open(FISCAL_SCHEMA_FILE) as stream:
schema = yaml.load(stream.read())
return [field_['name'] for field_ in schema['fields']]
def get_fiscal_fields(key):
"""Return a lookup table matching the field name to another property."""
with open(FISCAL_SCHEMA_FILE) as stream:
schema = yaml.load(stream.read())
return {field_['name']: field_[key] for field_ in schema['fields']}
def write_feedback(section, messages, folder=os.getcwd()):
"""Append messages to the status file."""
filepath = os.path.join(folder, STATUS_FILE)
with open(filepath) as stream:
feedback = json.load(stream)
if section not in feedback:
feedback[section] = []
for message in messages:
feedback[section].append(message)
logging.warning('[%s] %s', section, message)
with open(filepath, 'w+') as stream:
json.dump(feedback, stream, indent=4)
def get_available_processors():
"""Return the list of available processors modules."""
modules = [item.replace('.py', '')
for item in os.listdir(PROCESSORS_DIR)
if isfile(join(PROCESSORS_DIR, item))]
return modules
processor_names = get_available_processors()
def process(resources,
row_processor,
pass_resource_index=False,
pass_row_index=False,
**parameters):
"""Apply a row processor to each row of each datapackage resource."""
parameters_as_json = json.dumps(parameters, **JSON_FORMAT)
logging.info('Parameters = \n%s', parameters_as_json)
if 'verbose' in parameters:
verbose = parameters.pop('verbose')
else:
verbose = VERBOSE
sample_rows = []
for resource_index, resource in enumerate(resources):
if pass_resource_index:
parameters.update(resource_index=resource_index)
def process_rows(resource_):
for row_index, row in enumerate(resource_):
if pass_row_index:
parameters.update(row_index=row_index)
new_row = row_processor(row, **parameters)
yield new_row
if verbose and row_index < LOG_SAMPLE_SIZE:
sample_rows.append(new_row)
if verbose:
table = look(fromdicts(sample_rows), limit=LOG_SAMPLE_SIZE)
message = 'Output of processor %s for resource %s is...\n%s'
args = row_processor.__name__, resource_index, table
logging.info(message, *args)
yield process_rows(resource)
| {
"repo_name": "Victordeleon/os-data-importers",
"path": "eu-structural-funds/common/utilities.py",
"copies": "1",
"size": "5651",
"license": "mit",
"hash": 5958126724003183000,
"line_mean": 26.9752475248,
"line_max": 76,
"alpha_frac": 0.6308617944,
"autogenerated": false,
"ratio": 3.9134349030470914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5044296697447092,
"avg_score": null,
"num_lines": null
} |
"""A Platform controller
Files & Folder, DateTime classes
"""
import os
import argparse
import datetime
import platform
from pprint import pprint
import shutil
import re
from subprocess import call, check_call, check_output, Popen, PIPE, STDOUT
from sys import platform as _platform
try:
import win32com.client as w32
except ImportError:
windows = False
from DB74 import DataBaseObject
from Template import SQL
class DateTimeObject:
def __init__(self, date_set=datetime.datetime.now(), date_format='%d.%m.%Y %H:%M:%S'):
self.date = date_set
self.date_string = self.date_string_format(self.date, date_format)
def date_string_format(self, float_num, format_str):
if isinstance(float_num, float):
dt_object = datetime.datetime.utcfromtimestamp(float_num)
else:
dt_object = float_num
return dt_object.strftime(format_str)
class FileSystemObject:
def __init__(self, from_path='', to_path=''):
if not from_path:
from_path = os.path.dirname(os.path.realpath(__file__))
#print('using path relative to running script location ...' + from_path)
self.path = from_path
self.separator = self.get_separator_from_path()
# avoid separators in the end of path string
if self.separator == self.path[-1:]:
self.path = self.path[:-1]
if os.path.isfile(from_path):
self.exist = True
self.is_file = True
self.is_folder = False
file_xt = self.path.split(self.separator)[-1].split('.')[-1].lower()
if any(file_xt in x for x in ['ctb', 'sqlite3', 'db']):
self.obj_type = 'db'
elif any(file_xt in x for x in ['xml', 'txt', 'csv']):
self.obj_type = 'txt'
else:
self.obj_type = 'not_defined yet'
#print('file {0} ({1}) is type: {2}'.format(self.path, file_xt, self.obj_type))
elif os.path.isdir(from_path):
self.exist = True
self.is_file = False
self.is_folder = True
self.obj_type = 'dir'
else:
self.exist = False
self.obj_type = ''
if '.' in from_path:
self.is_file = True
self.is_folder = False
else:
self.is_file = False
self.is_folder = True
# destination definition
if to_path:
self.destination = to_path
else:
self.destination = ''
def get_separator_from_path(self):
if '\\' in self.path:
separator = '\\'
elif '/' in self.path:
separator = '/'
else:
separator = None
return separator
def dir_up(self, level=1):
# strip filename / last dir from path
return self.separator.join(self.path.split(self.separator)[:(-1*level)]) + self.separator
def last_part(self):
return self.path.split(self.separator)[-1]
def append_objects(self, **kwargs):
# if all dirs could do - or could it be used with files
# build_path = self.separator.join('{0}'.format(val) for key, val in kwargs.items())
build_path = ''
for arg in (sorted(kwargs)):
if 'file' in arg:
file_name = kwargs[arg]
else:
build_path += kwargs[arg] + self.separator
file_name = ''
if file_name:
return self.path + self.separator + build_path + self.separator + file_name
else:
return self.path + self.separator + build_path
def get_another_directory_file(self, another):
if self.is_file:
# strip filename from path
root_dir = self.dir_up(1)
return self.separator.join(root_dir.split(self.separator)[0:-1]) + self.separator + another
elif self.is_folder:
return self.path + self.separator + another
else:
print('not file nor folder ... returning base path')
return self.path
def move_file_to(self, another_directory, filename=''):
if not filename:
filename = self.last_part()
if self.is_file:
shutil.move(self.path, FileSystemObject(another_directory).append_objects(file=filename))
print('file ' + self.path + ' archived')
else:
print('directory move not implemented')
def copy_file_to(self, another_directory, filename=''):
if not filename:
shutil.copy(self.path, another_directory)
else:
if self.is_file:
shutil.copy(self.path, FileSystemObject(another_directory).append_objects(file=filename))
print('file ' + self.path + ' archived')
else:
print('directory copy not implemented')
def directory_lister(self, list_files=False):
root_fld = FileSystemObject().dir_up(1)
structure_fld = FileSystemObject(root_fld).append_objects(dir='Structure')
mlt_fld = FileSystemObject(root_fld).append_objects(dir='Multimedia')
template_file = FileSystemObject(structure_fld).append_objects(file='HTML_DirectoryList.txt')
if not self.destination:
self.destination = FileSystemObject(mlt_fld).append_objects(file='DirectoryList.html')
print(template_file + ' - will be writing to: ' + self.destination)
with open(template_file, 'r') as content_file:
content = content_file.read()
template = content.replace('XXX', self.path)
table_head = '<table><tr class="Head"><td>List Generated on {0} / Total Folder Size - {1} / {2} Subfolders </td></tr>'
table_row = '<tr class="{0}"><td>{1}</td><td>{2}</td></tr>'
htm_content = ''
total_size = 0
folder_count = 0
# Walk the directory tree
for root, directories, files in os.walk(self.path):
print(root)
folder_size = 0
file_count = 0
tmp_content = ''
for filename in files:
folder_size += (os.path.getsize(root + '/' + filename) / 1024)
if list_files:
file_size = str('{0:.2f}'.format(os.path.getsize(root + '/' + filename) / 1024)) + ' kb'
tmp_content = tmp_content + table_row.format('File', filename, file_size) + '\n'
file_count += 1
ref = '<a href="file:///' + root + '">' + root + '</a> (' + str(file_count) + ' files in folder)'
htm_content = htm_content + '\n' + table_row.format('Fldr', ref,
str(folder_size) + ' kb') + '\n' + tmp_content
total_size = total_size + folder_size
folder_count += 1
content = table_head.format(DateTimeObject().date_string, str(total_size) + ' kb', folder_count) + '\n' + htm_content
whole_content = template.replace('YYY', content)
# print(content)
# print(template)
self.object_write(whole_content)
def object_read_split(self):
folder_list = []
file_list = []
object_dict = self.object_read()
for object in object_dict:
if FileSystemObject(object_dict[object]).is_folder:
folder_list.append(object)
else:
file_list.append(object)
return folder_list, file_list
def object_read(self, filter=''):
if self.is_file:
with open(self.path, 'r') as content_file:
content = content_file.read()
return content
elif self.is_folder:
obj_lib = {}
for file_name in os.listdir(self.path):
if filter in file_name or not filter:
obj_lib[file_name] = self.path + self.separator + file_name
return obj_lib
def object_write(self, content='', mode='w+'):
if not self.destination:
self.destination = self.path
if not self.exist:
self.object_create_neccesary()
if FileSystemObject(self.destination).is_file:
if mode != 'w+' or mode != 'a':
if 'app' in mode:
mode = 'a'
else:
mode = 'w+'
with open(self.destination, mode, encoding="utf-8") as target_file:
target_file.write(content)
else:
pprint(vars(self))
print('is not a file, cannot write: ' + self.destination)
def object_size(self):
# return file size in kilobytes
if self.is_file:
return '{0:.2f}'.format(os.path.getsize(self.path) / 1024)
elif self.is_folder:
return 'for all files sum size'
def object_mod_date(self, format='%Y. %m. %d %H:%M:%S'):
if self.exist:
return DateTimeObject().date_string_format(os.path.getmtime(self.path), format)
else:
self.object_create_neccesary()
return DateTimeObject().date_string
def object_create_neccesary(self):
# must check if path is meaningful name
if not self.exist:
if self.is_folder:
os.makedirs(self.path)
print('directory ' + self.path + ' folder created ...')
else:
self.file_touch()
def file_touch(self):
with open(self.path, 'w+'):
os.utime(self.path, None)
def file_refresh(self, content):
# print('refreshing filename: ' + filename + ' with text: ' + text)
if content:
if not self.is_file:
print('file {0} not exist, must create'.format(self.path))
self.file_touch()
self.object_write(content, 'w+')
else:
print('no text to write, skipping file {0}'.format(self.path))
def extra_path_from(self, basepath):
return ''.join(self.path.rsplit(basepath))
class CurrentPlatform:
def __init__(self):
if _platform == 'linux' or _platform == 'linux2':
self.main = 'lnx'
elif _platform == 'darwin':
self.main = 'mac'
elif _platform == 'win32' or _platform == 'win64':
# TODO: print('must create _winreg import and read ...')
self.main = 'win'
else:
self.main = _platform
if self.main == "win":
self.environment = os.environ.get('USERNAME'), os.environ.get('USERDOMAIN')
else:
self.environment = os.environ.get('USERNAME'), os.environ.get('HOSTNAME')
self.hostname = platform.node()
if self.main == "lnx":
self.homepath = os.environ.get('HOME')
else:
self.homepath = os.environ.get('HOMEDRIVE') + os.environ.get('HOMEPATH')
self.release = platform.release()
def print_system_description(self):
# this is not working
# return platform.version()
# for debug purposes
print('system - {0} / release - {1}'.format(self.main, self.release))
def get_home_dir_path(self):
if self.main == "lnx":
return os.environ.get('HOME')
else:
return os.environ.get('HOMEDRIVE') + os.environ.get('HOMEPATH')
class CurrentPlatformControl(CurrentPlatform):
def __init__(self, application=''):
CurrentPlatform.__init__(self)
if application:
self.app_name = application
d = os.path.dirname(os.path.realpath(__file__)) + '/Settings.sqlite'
sql = SQL.get_app_command.format(application, self.main)
#print(sql)
try:
self.app_run_path = DataBaseObject(d).return_one(sql)[0]
except:
self.app_run_path = ''
else:
self.app_name = 'not_defined'
self.app_run_path = ''
if not self.app_run_path:
self.app_run_path = application
def run_with_argument(self, arg_1='', arg_2=''):
print(self.app_run_path + ' %s' % arg_1)
call([self.app_run_path, arg_1])
# if self.main == 'lnx':
# call([self.app_run_path, arg_1])
# elif self.main == 'win':
# call([self.app_run_path, arg_1])
def check_output(self, arg='', timeout=2):
try:
if arg:
command_input = self.app_run_path + ' ' + arg
else:
command_input = self.app_run_path
return check_output(command_input, stderr=STDOUT, timeout=timeout, shell=True)
except:
return None
def run_stream(self, file_name, arg_1, arg_2):
try:
return Popen([self.app_run_path, file_name, arg_1, arg_2], stdin=PIPE, stdout=PIPE, stderr=PIPE)
except:
return None
def list_attached_peripherals(self):
if self.main == 'win':
wmi = w32.GetObject("winmgmts:")
for usb in wmi.InstancesOf("Win32_USBHub"):
return usb.DeviceID
else:
device_re = re.compile("Bus\s+(?P<bus>\d+)\s+Device\s+(?P<device>\d+).+ID\s(?P<id>\w+:\w+)\s(?P<tag>.+)$", re.I)
df = check_output("lsusb")
devices = []
for i in df.split(b'\n'):
if i and device_re.match(str(i)):
dinfo = device_re.match(str(i)).groupdict()
dinfo['device'] = '/dev/bus/usb/%s/%s' % (dinfo.pop('bus'), dinfo.pop('device'))
devices.append(dinfo)
return devices
def external_call(self, script_file=''):
if script_file:
call(self.app_run_path + " " + script_file, shell=True)
else:
call(self.app_run_path, shell=True)
def compare_directories(dir1, dir2):
if not os.path.isdir(dir1) or not os.path.isdir(dir2):
print('one of submitted directories do not exist, quitting...')
else:
found = True
for root, directories, files in os.walk(dir1):
corr = root.replace(dir1, dir2)
# print(root + ' :x: ' + corr)
if not os.path.isdir(corr):
print('not found ' + dir2 + '/' + root)
continue
for filename in files:
# print(filename)
corr_file = filename.replace(dir1, dir2)
if not os.path.exists(corr_file):
# print(root + ' :x: ' + corr)
# print('not found ' + filename)
found = False
| {
"repo_name": "kubow/HAC",
"path": "System/OS74.py",
"copies": "1",
"size": "14638",
"license": "unlicense",
"hash": -8456782645580089000,
"line_mean": 37.1197916667,
"line_max": 126,
"alpha_frac": 0.5403060527,
"autogenerated": false,
"ratio": 3.88688263409453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.492718868679453,
"avg_score": null,
"num_lines": null
} |
#A platform neutral detection mechanism for the platform
#this will spit out the search and install functions for any given platform
import platform
import sys
import os
from camio_deps import *
#Uname examples
# Fedora 14:
# uname() = ('Linux', 'nf-test109.cl.cam.ac.uk', '2.6.35.14-106.fc14.x86_64', '#1 SMP Wed Nov 23 13:07:52 UTC 2011', 'x86_64', 'x86_64')
# linux_distribution() = ('Fedora', '14', 'Laughlin')
#
# MacOS X 10.8:
# uname() = ('Darwin', 'bumblebee.cl.cam.ac.uk', '12.4.0', 'Darwin Kernel Version 12.4.0: Wed May 1 17:57:12 PDT 2013; root:xnu-2050.24.15~1/RELEASE_X86_64', 'x86_64', 'i386')
# mac_ver() = ('10.8.4', ('', '', ''), 'x86_64')
#
# Debian 6:
# uname() = ('Linux', '(none)', '2..32-5-686', '#1 SMP Sun Sep 23 09:49:36 UTC 2012', 'i686', '')
# linux_distribution = ('debian', '6.0.6', '')
#
# Ubuntu 12.04:
# uname() = ('Linux', 'ubuntu', '3.2.0-38-generic', '#60-Ubuntu SMP Wed Feb 13 13:22:43 UTC 2013', 'x86_64', 'x86_64')
# linux_distrubtion() = ('Ubuntu', '12.04', 'precise')
#
# Ubuntu 13.04:
# uname() = ('Linux', 'ubuntu', '3.8.0-25-generic', '#37-Ubuntu SMP Thu Jun 6 20:47:07 UTC 2013', 'x86_64', 'x86_64')
# linux_distribution = ('Ubuntu', '13.04', 'raring')
#
# FreeBSD 9.1:
# ('FreeBSD', 'pcbsd-7337', '9.1-RELEASE', 'FreeBSD 9.1-RELEASE #2: Tue Nov 27 03:45:16 UTC 2012 root@darkstar:/usr/obj/pcbsd-build90/fbsd-source/9.1/sys/GENERIC', 'amd64', 'amd64')
#Figures out what platform we are on
def platform_install():
#Gather some platform stats
ostype = os.name
uname = platform.uname()
system = uname[0]
hostname = uname[1]
version = uname[2]
build = uname[3]
cpu = uname[4]
arch = uname[5]
#Figure out what OS we're using, make sure it's posix
if(ostype != "posix"):
print "CamIO2 does not support Operating Systems other than POSIX. Your operating system is \"" + os_type + "\"."
return
#Figure out what OS we're using
if(system == "Linux"):
print "CamIO2 Prepare: Detected system is running \"" + system + "\"..."
import camio_prepare_arch_linux
distro = platform.linux_distribution()
return camio_prepare_arch_linux.install(uname,distro)
if(system == "Darwin"):
print "CamIO2 Prepare: Detected system is running \"" + system + "\"..."
import camio_prepare_arch_darwin
mac_ver = platform.mac_ver()
return camio_prepare_arch_darwin.install(uname,mac_ver)
if(system == "FreeBSD"):
print "CamIO2 Prepare: Detected system is running \"" + system + "\"..."
import camio_prepare_arch_freebsd
return camio_prepare_arch_freebsd.install(uname )
print "CamIO2 Prepare: Could not detect Operating System. Expected to find, Linux, FreeBSD or Darwin (MacOS X), but instead found \"" + system + "\"."
print "CamIO2 Prepare: Fatal Error! Exiting now."
sys.exit(-1)
| {
"repo_name": "mgrosvenor/camio2",
"path": "old/scripts/camio_prepare_arch.py",
"copies": "2",
"size": "2916",
"license": "bsd-3-clause",
"hash": -8923015168130724000,
"line_mean": 36.8701298701,
"line_max": 185,
"alpha_frac": 0.621399177,
"autogenerated": false,
"ratio": 2.951417004048583,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4572816181048583,
"avg_score": null,
"num_lines": null
} |
"""A platform that to monitor Uptime Robot monitors."""
import logging
from pyuptimerobot import UptimeRobot
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_TARGET = "target"
ATTRIBUTION = "Data provided by Uptime Robot"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_API_KEY): cv.string})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Uptime Robot binary_sensors."""
up_robot = UptimeRobot()
api_key = config.get(CONF_API_KEY)
monitors = up_robot.getMonitors(api_key)
devices = []
if not monitors or monitors.get("stat") != "ok":
_LOGGER.error("Error connecting to Uptime Robot")
return
for monitor in monitors["monitors"]:
devices.append(
UptimeRobotBinarySensor(
api_key,
up_robot,
monitor["id"],
monitor["friendly_name"],
monitor["url"],
)
)
add_entities(devices, True)
class UptimeRobotBinarySensor(BinarySensorEntity):
"""Representation of a Uptime Robot binary sensor."""
def __init__(self, api_key, up_robot, monitor_id, name, target):
"""Initialize Uptime Robot the binary sensor."""
self._api_key = api_key
self._monitor_id = str(monitor_id)
self._name = name
self._target = target
self._up_robot = up_robot
self._state = None
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the binary sensor."""
return self._state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_CONNECTIVITY
@property
def device_state_attributes(self):
"""Return the state attributes of the binary sensor."""
return {ATTR_ATTRIBUTION: ATTRIBUTION, ATTR_TARGET: self._target}
def update(self):
"""Get the latest state of the binary sensor."""
monitor = self._up_robot.getMonitors(self._api_key, self._monitor_id)
if not monitor or monitor.get("stat") != "ok":
_LOGGER.warning("Failed to get new state")
return
status = monitor["monitors"][0]["status"]
self._state = 1 if status == 2 else 0
| {
"repo_name": "tchellomello/home-assistant",
"path": "homeassistant/components/uptimerobot/binary_sensor.py",
"copies": "10",
"size": "2667",
"license": "apache-2.0",
"hash": -3709408380411471400,
"line_mean": 28.9662921348,
"line_max": 81,
"alpha_frac": 0.6295463067,
"autogenerated": false,
"ratio": 4.096774193548387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00013702384214853386,
"num_lines": 89
} |
"""A platform that to monitor Uptime Robot monitors."""
import logging
from pyuptimerobot import UptimeRobot
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorDevice
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_TARGET = "target"
ATTRIBUTION = "Data provided by Uptime Robot"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_API_KEY): cv.string})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Uptime Robot binary_sensors."""
up_robot = UptimeRobot()
api_key = config.get(CONF_API_KEY)
monitors = up_robot.getMonitors(api_key)
devices = []
if not monitors or monitors.get("stat") != "ok":
_LOGGER.error("Error connecting to Uptime Robot")
return
for monitor in monitors["monitors"]:
devices.append(
UptimeRobotBinarySensor(
api_key,
up_robot,
monitor["id"],
monitor["friendly_name"],
monitor["url"],
)
)
add_entities(devices, True)
class UptimeRobotBinarySensor(BinarySensorDevice):
"""Representation of a Uptime Robot binary sensor."""
def __init__(self, api_key, up_robot, monitor_id, name, target):
"""Initialize Uptime Robot the binary sensor."""
self._api_key = api_key
self._monitor_id = str(monitor_id)
self._name = name
self._target = target
self._up_robot = up_robot
self._state = None
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the binary sensor."""
return self._state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return "connectivity"
@property
def device_state_attributes(self):
"""Return the state attributes of the binary sensor."""
return {ATTR_ATTRIBUTION: ATTRIBUTION, ATTR_TARGET: self._target}
def update(self):
"""Get the latest state of the binary sensor."""
monitor = self._up_robot.getMonitors(self._api_key, self._monitor_id)
if not monitor or monitor.get("stat") != "ok":
_LOGGER.warning("Failed to get new state")
return
status = monitor["monitors"][0]["status"]
self._state = 1 if status == 2 else 0
| {
"repo_name": "postlund/home-assistant",
"path": "homeassistant/components/uptimerobot/binary_sensor.py",
"copies": "4",
"size": "2612",
"license": "apache-2.0",
"hash": 8699783346436395000,
"line_mean": 29.7294117647,
"line_max": 86,
"alpha_frac": 0.6297856049,
"autogenerated": false,
"ratio": 4.132911392405063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00027869852735038506,
"num_lines": 85
} |
"""A platform that to monitor Uptime Robot monitors."""
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import (
PLATFORM_SCHEMA, BinarySensorDevice)
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_TARGET = 'target'
ATTRIBUTION = "Data provided by Uptime Robot"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Uptime Robot binary_sensors."""
from pyuptimerobot import UptimeRobot
up_robot = UptimeRobot()
api_key = config.get(CONF_API_KEY)
monitors = up_robot.getMonitors(api_key)
devices = []
if not monitors or monitors.get('stat') != 'ok':
_LOGGER.error("Error connecting to Uptime Robot")
return
for monitor in monitors['monitors']:
devices.append(UptimeRobotBinarySensor(
api_key, up_robot, monitor['id'], monitor['friendly_name'],
monitor['url']))
add_entities(devices, True)
class UptimeRobotBinarySensor(BinarySensorDevice):
"""Representation of a Uptime Robot binary sensor."""
def __init__(self, api_key, up_robot, monitor_id, name, target):
"""Initialize Uptime Robot the binary sensor."""
self._api_key = api_key
self._monitor_id = str(monitor_id)
self._name = name
self._target = target
self._up_robot = up_robot
self._state = None
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the binary sensor."""
return self._state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return 'connectivity'
@property
def device_state_attributes(self):
"""Return the state attributes of the binary sensor."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_TARGET: self._target,
}
def update(self):
"""Get the latest state of the binary sensor."""
monitor = self._up_robot.getMonitors(self._api_key, self._monitor_id)
if not monitor or monitor.get('stat') != 'ok':
_LOGGER.warning("Failed to get new state")
return
status = monitor['monitors'][0]['status']
self._state = 1 if status == 2 else 0
| {
"repo_name": "MartinHjelmare/home-assistant",
"path": "homeassistant/components/uptimerobot/binary_sensor.py",
"copies": "7",
"size": "2573",
"license": "apache-2.0",
"hash": 2132857675665406000,
"line_mean": 29.2705882353,
"line_max": 77,
"alpha_frac": 0.6393315196,
"autogenerated": false,
"ratio": 4.058359621451104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 85
} |
"""A platform that to monitor Uptime Robot monitors."""
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorDevice
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_TARGET = "target"
ATTRIBUTION = "Data provided by Uptime Robot"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_API_KEY): cv.string})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Uptime Robot binary_sensors."""
from pyuptimerobot import UptimeRobot
up_robot = UptimeRobot()
api_key = config.get(CONF_API_KEY)
monitors = up_robot.getMonitors(api_key)
devices = []
if not monitors or monitors.get("stat") != "ok":
_LOGGER.error("Error connecting to Uptime Robot")
return
for monitor in monitors["monitors"]:
devices.append(
UptimeRobotBinarySensor(
api_key,
up_robot,
monitor["id"],
monitor["friendly_name"],
monitor["url"],
)
)
add_entities(devices, True)
class UptimeRobotBinarySensor(BinarySensorDevice):
"""Representation of a Uptime Robot binary sensor."""
def __init__(self, api_key, up_robot, monitor_id, name, target):
"""Initialize Uptime Robot the binary sensor."""
self._api_key = api_key
self._monitor_id = str(monitor_id)
self._name = name
self._target = target
self._up_robot = up_robot
self._state = None
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the binary sensor."""
return self._state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return "connectivity"
@property
def device_state_attributes(self):
"""Return the state attributes of the binary sensor."""
return {ATTR_ATTRIBUTION: ATTRIBUTION, ATTR_TARGET: self._target}
def update(self):
"""Get the latest state of the binary sensor."""
monitor = self._up_robot.getMonitors(self._api_key, self._monitor_id)
if not monitor or monitor.get("stat") != "ok":
_LOGGER.warning("Failed to get new state")
return
status = monitor["monitors"][0]["status"]
self._state = 1 if status == 2 else 0
| {
"repo_name": "Cinntax/home-assistant",
"path": "homeassistant/components/uptimerobot/binary_sensor.py",
"copies": "3",
"size": "2616",
"license": "apache-2.0",
"hash": -817632635388466600,
"line_mean": 29.7764705882,
"line_max": 86,
"alpha_frac": 0.62882263,
"autogenerated": false,
"ratio": 4.139240506329114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00027869852735038506,
"num_lines": 85
} |
"""A platform which allows you to get information from Tautulli."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_API_KEY, CONF_HOST, CONF_MONITORED_CONDITIONS, CONF_NAME, CONF_PORT,
CONF_SSL, CONF_VERIFY_SSL, CONF_PATH)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_MONITORED_USERS = 'monitored_users'
DEFAULT_NAME = 'Tautulli'
DEFAULT_PORT = '8181'
DEFAULT_PATH = ''
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = True
TIME_BETWEEN_UPDATES = timedelta(seconds=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_MONITORED_USERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.string,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Create the Tautulli sensor."""
from pytautulli import Tautulli
name = config.get(CONF_NAME)
host = config[CONF_HOST]
port = config.get(CONF_PORT)
path = config.get(CONF_PATH)
api_key = config[CONF_API_KEY]
monitored_conditions = config.get(CONF_MONITORED_CONDITIONS)
user = config.get(CONF_MONITORED_USERS)
use_ssl = config.get(CONF_SSL)
verify_ssl = config.get(CONF_VERIFY_SSL)
session = async_get_clientsession(hass, verify_ssl)
tautulli = TautulliData(Tautulli(
host, port, api_key, hass.loop, session, use_ssl, path))
if not await tautulli.test_connection():
raise PlatformNotReady
sensor = [TautulliSensor(tautulli, name, monitored_conditions, user)]
async_add_entities(sensor, True)
class TautulliSensor(Entity):
"""Representation of a Tautulli sensor."""
def __init__(self, tautulli, name, monitored_conditions, users):
"""Initialize the Tautulli sensor."""
self.tautulli = tautulli
self.monitored_conditions = monitored_conditions
self.usernames = users
self.sessions = {}
self.home = {}
self._attributes = {}
self._name = name
self._state = None
async def async_update(self):
"""Get the latest data from the Tautulli API."""
await self.tautulli.async_update()
self.home = self.tautulli.api.home_data
self.sessions = self.tautulli.api.session_data
self._attributes['Top Movie'] = self.home.get('movie')
self._attributes['Top TV Show'] = self.home.get('tv')
self._attributes['Top User'] = self.home.get('user')
for key in self.sessions:
if 'sessions' not in key:
self._attributes[key] = self.sessions[key]
for user in self.tautulli.api.users:
if self.usernames is None or user in self.usernames:
userdata = self.tautulli.api.user_data
self._attributes[user] = {}
self._attributes[user]['Activity'] = userdata[user]['Activity']
if self.monitored_conditions:
for key in self.monitored_conditions:
try:
self._attributes[user][key] = userdata[user][key]
except (KeyError, TypeError):
self._attributes[user][key] = ''
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self.sessions.get('stream_count')
@property
def icon(self):
"""Return the icon of the sensor."""
return 'mdi:plex'
@property
def device_state_attributes(self):
"""Return attributes for the sensor."""
return self._attributes
class TautulliData:
"""Get the latest data and update the states."""
def __init__(self, api):
"""Initialize the data object."""
self.api = api
@Throttle(TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from Tautulli."""
await self.api.get_data()
async def test_connection(self):
"""Test connection to Tautulli."""
await self.api.test_connection()
connection_status = self.api.connection
return connection_status
| {
"repo_name": "MartinHjelmare/home-assistant",
"path": "homeassistant/components/tautulli/sensor.py",
"copies": "5",
"size": "4982",
"license": "apache-2.0",
"hash": -7034662715303056000,
"line_mean": 33.5972222222,
"line_max": 79,
"alpha_frac": 0.6493376154,
"autogenerated": false,
"ratio": 3.841171935235158,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 144
} |
"""A platform which allows you to get information from Tautulli."""
from datetime import timedelta
from pytautulli import Tautulli
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PATH,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
CONF_MONITORED_USERS = "monitored_users"
DEFAULT_NAME = "Tautulli"
DEFAULT_PORT = "8181"
DEFAULT_PATH = ""
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = True
TIME_BETWEEN_UPDATES = timedelta(seconds=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_MONITORED_USERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.string,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Create the Tautulli sensor."""
name = config.get(CONF_NAME)
host = config[CONF_HOST]
port = config.get(CONF_PORT)
path = config.get(CONF_PATH)
api_key = config[CONF_API_KEY]
monitored_conditions = config.get(CONF_MONITORED_CONDITIONS)
user = config.get(CONF_MONITORED_USERS)
use_ssl = config[CONF_SSL]
verify_ssl = config.get(CONF_VERIFY_SSL)
session = async_get_clientsession(hass, verify_ssl)
tautulli = TautulliData(
Tautulli(host, port, api_key, hass.loop, session, use_ssl, path)
)
if not await tautulli.test_connection():
raise PlatformNotReady
sensor = [TautulliSensor(tautulli, name, monitored_conditions, user)]
async_add_entities(sensor, True)
class TautulliSensor(SensorEntity):
"""Representation of a Tautulli sensor."""
def __init__(self, tautulli, name, monitored_conditions, users):
"""Initialize the Tautulli sensor."""
self.tautulli = tautulli
self.monitored_conditions = monitored_conditions
self.usernames = users
self.sessions = {}
self.home = {}
self._attributes = {}
self._name = name
self._state = None
async def async_update(self):
"""Get the latest data from the Tautulli API."""
await self.tautulli.async_update()
self.home = self.tautulli.api.home_data
self.sessions = self.tautulli.api.session_data
self._attributes["Top Movie"] = self.home.get("movie")
self._attributes["Top TV Show"] = self.home.get("tv")
self._attributes["Top User"] = self.home.get("user")
for key in self.sessions:
if "sessions" not in key:
self._attributes[key] = self.sessions[key]
for user in self.tautulli.api.users:
if self.usernames is None or user in self.usernames:
userdata = self.tautulli.api.user_data
self._attributes[user] = {}
self._attributes[user]["Activity"] = userdata[user]["Activity"]
if self.monitored_conditions:
for key in self.monitored_conditions:
try:
self._attributes[user][key] = userdata[user][key]
except (KeyError, TypeError):
self._attributes[user][key] = ""
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self.sessions.get("stream_count")
@property
def icon(self):
"""Return the icon of the sensor."""
return "mdi:plex"
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return "Watching"
@property
def extra_state_attributes(self):
"""Return attributes for the sensor."""
return self._attributes
class TautulliData:
"""Get the latest data and update the states."""
def __init__(self, api):
"""Initialize the data object."""
self.api = api
@Throttle(TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from Tautulli."""
await self.api.get_data()
async def test_connection(self):
"""Test connection to Tautulli."""
await self.api.test_connection()
connection_status = self.api.connection
return connection_status
| {
"repo_name": "home-assistant/home-assistant",
"path": "homeassistant/components/tautulli/sensor.py",
"copies": "5",
"size": "5085",
"license": "apache-2.0",
"hash": -8394826887668499000,
"line_mean": 32.2352941176,
"line_max": 86,
"alpha_frac": 0.6389380531,
"autogenerated": false,
"ratio": 3.8581183611532626,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022995835096958136,
"num_lines": 153
} |
""" A playful implementation of the famous "German Tank Problem" in statistics.
First, the random number generator populates a list of "tanks", represented
by sequential serial numbers. The numbers are added to the list in random
order until they run out.
We then choose the sample size, representing the number of tanks we have
captured in battle and whose serial number we have been able to observe.
The program then retrieves an amount of random serial numbers equal to our
specified sample size, and attempts to estimate how many tanks there are
in total.
The formula can make fairly accurate estimates with relatively small sample
sizes, providing the serial numbers sampled are reasonably random.
"""
from random import sample
def generate_serials(total, samplesize):
""" Generate a list of consecutive serial numbers up to the specified limit
("total") and return a random sample out of it, of size "sample".
"""
serialnumbers = [i+1 for i in range(total)]
randomised = sample(serialnumbers, samplesize)
return randomised
def estimate_tanks(sample):
estimate = max(sample) + (max(sample) / len(sample)) - 1
return round(estimate)
def experiment(realtanks, samplesize):
""" Create a virtual tank army of size "realktanks", and retrieve a random
sample of serial numbers sized "samplesize". Then attempt to estimate the
number "realtanks" from that sample.
"""
capturedtanks = generate_serials(realtanks, samplesize)
estimate = estimate_tanks(capturedtanks)
print("GERMAN TANK PROBLEM\n")
print("Actual number of tanks: {}".format(realtanks))
print("Sample size: {}".format(samplesize))
print("Serial numbers sampled:")
print(capturedtanks)
print("-----")
print("Estimated number of tanks: {}".format(estimate))
error = abs(realtanks - estimate) / realtanks
percentageoff = round(error * 100, 2)
print("Error: {}%".format(percentageoff))
experiment(1500, 20) | {
"repo_name": "doolanshire/Combat-Models",
"path": "germantank/germantankproblem.py",
"copies": "1",
"size": "2069",
"license": "mit",
"hash": 6782802843258085000,
"line_mean": 34.6896551724,
"line_max": 79,
"alpha_frac": 0.6964717255,
"autogenerated": false,
"ratio": 4.231083844580777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5427555570080776,
"avg_score": null,
"num_lines": null
} |
"""A play local music files example
To use the script:
* Make sure soco is installed
* Drop this script into a folder that, besides python files, contains
nothing but music files
* Adjust the settings on the first three lines of the main function
* Run the script
"""
from __future__ import print_function, unicode_literals
import os
import time
from threading import Thread
from random import choice
try:
# Python 3
from urllib.parse import quote
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
print('Running as python 3')
except ImportError:
# Python 2
from urllib import quote
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
print('Running as python 2')
import soco
class HttpServer(Thread):
"""A simple HTTP Server in its own thread"""
def __init__(self, port):
super(HttpServer, self).__init__()
self.daemon = True
handler = SimpleHTTPRequestHandler
self.httpd = TCPServer(("", port), handler)
def run(self):
"""Start the server"""
print('Start HTTP server')
self.httpd.serve_forever()
def stop(self):
"""Stop the server"""
print('Stop HTTP server')
self.httpd.socket.close()
def add_random_file_from_present_folder(machine_ip, port, zone_name):
"""Add a random non-py file from this folder and subfolders to soco"""
# Make a list of music files, right now it is done by collection all files
# below the current folder whose extension does not start with .py
# This will probably need to be modded for other pusposes.
music_files = []
print('Looking for music files')
for path, dirs, files in os.walk('.'):
for file_ in files:
if not os.path.splitext(file_)[1].startswith('.py'):
music_files.append(os.path.relpath(os.path.join(path, file_)))
print('Found:', music_files[-1])
random_file = choice(music_files)
# urlencode all the path parts (but not the /'s)
random_file = os.path.join(
*[quote(part) for part in os.path.split(random_file)]
)
print('\nPlaying random file:', random_file)
netpath = 'http://{}:{}/{}'.format(machine_ip, port, random_file)
for zone in soco.discover():
if zone.player_name == zone_name:
break
number_in_queue = zone.add_uri_to_queue(netpath)
zone.play_from_queue(number_in_queue)
def main():
# Settings
machine_ip = '192.168.0.25'
port = 8000
zone_name = 'Stue' # Danish for living room
# Setup and start the http server
server = HttpServer(port)
server.start()
# When the http server is setup you can really add your files in
# any way that is desired. The source code for
# add_random_file_from_present_folder is just an example, but it may be
# helpful in figuring out how to format the urls
try:
add_random_file_from_present_folder(machine_ip, port, zone_name)
# Remember the http server runs in its own daemonized thread, so it is
# necessary to keep the main thread alive. So sleep for 3 years.
time.sleep(10**8)
except KeyboardInterrupt:
server.stop()
main()
| {
"repo_name": "bwhaley/SoCo",
"path": "examples/play_local_files/play_local_files.py",
"copies": "12",
"size": "3285",
"license": "mit",
"hash": -5350110950678847000,
"line_mean": 29.9905660377,
"line_max": 78,
"alpha_frac": 0.6566210046,
"autogenerated": false,
"ratio": 3.967391304347826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A play local music files example
To use the script:
* Make sure soco is installed
* Drop this script into a folder that, besides python files, contains
nothing but music files
* Choose which player to use and run the script at the command line as such:
play_local_files.py "Living Room"
NOTE: The script has been changed from the earlier version, where the
settings were written directly into the file. They now have to be
given at the command line instead. But, it should only be necessary to
supply the zone name. The local machine IP should be autodetected.
"""
from __future__ import print_function, unicode_literals
import os
import sys
import time
import socket
from threading import Thread
from random import choice
try:
# Python 3
from urllib.parse import quote
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
print("Running as python 3")
except ImportError:
# Python 2
from urllib import quote
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
print("Running as python 2")
from soco.discovery import by_name, discover
class HttpServer(Thread):
"""A simple HTTP Server in its own thread"""
def __init__(self, port):
super(HttpServer, self).__init__()
self.daemon = True
handler = SimpleHTTPRequestHandler
self.httpd = TCPServer(("", port), handler)
def run(self):
"""Start the server"""
print("Start HTTP server")
self.httpd.serve_forever()
def stop(self):
"""Stop the server"""
print("Stop HTTP server")
self.httpd.socket.close()
def add_random_file_from_present_folder(machine_ip, port, zone):
"""Add a random non-py file from this folder and subfolders to soco"""
# Make a list of music files, right now it is done by collection all files
# below the current folder whose extension does not start with .py
# This will probably need to be modded for other pusposes.
music_files = []
print("Looking for music files")
for path, dirs, files in os.walk("."):
for file_ in files:
if not os.path.splitext(file_)[1].startswith(".py"):
music_files.append(os.path.relpath(os.path.join(path, file_)))
print("Found:", music_files[-1])
random_file = choice(music_files)
# urlencode all the path parts (but not the /'s)
random_file = os.path.join(*[quote(part) for part in os.path.split(random_file)])
print("\nPlaying random file:", random_file)
netpath = "http://{}:{}/{}".format(machine_ip, port, random_file)
number_in_queue = zone.add_uri_to_queue(netpath)
# play_from_queue indexes are 0-based
zone.play_from_queue(number_in_queue - 1)
def detect_ip_address():
"""Return the local ip-address"""
# Rather hackish way to get the local ip-address, recipy from
# https://stackoverflow.com/a/166589
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip_address = s.getsockname()[0]
s.close()
return ip_address
def parse_args():
"""Parse the command line arguments"""
import argparse
description = "Play local files with Sonos by running a local web server"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("zone", help="The name of the zone to play from")
parser.add_argument(
"--port", default=8000, help="The local machine port to run the webser on"
)
parser.add_argument(
"--ip",
default=detect_ip_address(),
help="The local IP address of this machine. By "
"default it will attempt to autodetect it.",
)
return parser.parse_args()
def main():
# Settings
args = parse_args()
print(
" Will use the following settings:\n"
" Zone: {args.zone}\n"
" IP of this machine: {args.ip}\n"
" Use port: {args.port}".format(args=args)
)
# Get the zone
zone = by_name(args.zone)
# Check if a zone by the given name was found
if zone is None:
zone_names = [zone_.player_name for zone_ in discover()]
print(
"No Sonos player named '{}'. Player names are {}".format(
args.zone, zone_names
)
)
sys.exit(1)
# Check whether the zone is a coordinator (stand alone zone or
# master of a group)
if not zone.is_coordinator:
print(
"The zone '{}' is not a group master, and therefore cannot "
"play music. Please use '{}' in stead".format(
args.zone, zone.group.coordinator.player_name
)
)
sys.exit(2)
# Setup and start the http server
server = HttpServer(args.port)
server.start()
# When the http server is setup you can really add your files in
# any way that is desired. The source code for
# add_random_file_from_present_folder is just an example, but it may be
# helpful in figuring out how to format the urls
try:
add_random_file_from_present_folder(args.ip, args.port, zone)
# Remember the http server runs in its own daemonized thread, so it is
# necessary to keep the main thread alive. So sleep for 3 years.
time.sleep(10 ** 8)
except KeyboardInterrupt:
server.stop()
main()
| {
"repo_name": "KennethNielsen/SoCo",
"path": "examples/play_local_files/play_local_files.py",
"copies": "1",
"size": "5396",
"license": "mit",
"hash": 3533822638462514700,
"line_mean": 30.0114942529,
"line_max": 85,
"alpha_frac": 0.647516679,
"autogenerated": false,
"ratio": 3.9473299195318217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5094846598531821,
"avg_score": null,
"num_lines": null
} |
"""aplicaciones_informaticas URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from rest_framework import routers
from django.contrib import admin
from backend import views
admin.autodiscover()
router = routers.DefaultRouter()
router.register(r'healthcenters', views.HealthCenterViewSet)
router.register(r'queues', views.AtentionQueueViewSet)
router.register(r'specialties', views.SpecialtyViewSet, base_name='Specialty')
router.register(r'triagescalelevel', views.TriageScaleLevelViewSet, base_name='TriageScaleLevel')
urlpatterns = [
url(r'^api/v1/', include(router.urls)),
url(r'^admin/', admin.site.urls),
url(r'^api/v1/hospitals/(?P<hc_id>[-\w]+)/queues/(?P<queue_id>\d+)$',
views.AtentionQueueViewSet.as_view({'get': 'get_one_for_hc'})),
url(r'^api/v1/hospitals/(?P<hc_id>[-\w]+)/queues/?$',
views.AtentionQueueViewSet.as_view({'get': 'get_all_for_hc'})),
url(r'^api/v1/hospitals/(?P<hc_id>[-\w]+)/queue/(?P<queue_id>\d+)/patients?$',
views.AtentionQueueViewSet.as_view({'post': 'add_patient', 'get': 'get_all_patients'})),
url(r'^api/v1/hospitals/(?P<hc_id>[-\w]+)/queue/(?P<queue_id>\d+)/patients?/(?P<patient_id>\d+)$',
views.AtentionQueueViewSet.as_view({'get': 'get_patient', 'delete':'delete_patient'})),
url(r'^api/v1/hospitals/recommendation$',
views.RecommendationEngineViewSet.as_view({'post':'get_recommendation'})),
url(r'^api/v1/hospitals/recommendation/select/(?P<hc_id>[-\w]+)/queue/(?P<queue_id>\d+)/',
views.RecommendationEngineViewSet.as_view({'post':'select_recommendation'})),
url(r'^api/v1/hospitals/(?P<hc_id>[-\w]+)/rate',
views.HealthCenterViewSet.as_view({'post':'rate'})),
url(r'^api/v1/hospitals/avg',
views.HealthCenterViewSet.as_view({'get':'get_average_wait'})),
url(r'^api/v1/hospitals/stats/(?P<hc_id>[-\w]+)/count$',
views.HealthCenterViewSet.as_view({'get':'patient_count_stats'})),
url(r'^api/v1/hospitals/stats/(?P<hc_id>[-\w]+)/count/per_specialty',
views.ReportsViewSet.as_view({'get':'patient_percentage_per_specialty'})),
url(r'^api/v1/hospitals/(?P<hc_id>[-\w]+)/feed',
views.ReportsViewSet.as_view({'get':'get_feed'})),
url(r'^api/v1/hospitals/stats/(?P<hc_id>[-\w]+)/delete_reason$',
views.ReportsViewSet.as_view({'get':'patient_delete_reason'})),
url(r'^api/v1/hospitals/statistics/attention_per_hour',
views.ReportsViewSet.as_view({'get':'get_attention_per_hour'}))
]
| {
"repo_name": "awainer/7539",
"path": "aplicaciones_informaticas/aplicaciones_informaticas/urls.py",
"copies": "1",
"size": "3216",
"license": "unlicense",
"hash": 4899393259536788000,
"line_mean": 50.8709677419,
"line_max": 104,
"alpha_frac": 0.6486318408,
"autogenerated": false,
"ratio": 3.0952839268527432,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42439157676527434,
"avg_score": null,
"num_lines": null
} |
#Aplicación para construir coches mediante
#el ensamblado de sus partes (motor, chasis, etc.)
class AbstractFactory(object):
def crearCoche(self, **args):
raise NotImplementedError("Requires derived factory class for implementation.")
def crearMotor(self, **args):
raise NotImplementedError("Requires derived factory class for implementation.")
def crearChasis(self, **args):
raise NotImplementedError("Requires derived factory class for implementation.")
class CocheFord(object):
def do_somthing(self):
print ("Nuevo coche Ford")
class MotorFord(object):
def do_somthing(self):
print ("Nuevo Motor Ford")
class ChasisFord(object):
def do_somthing(self):
print ("Nuevo Chasis Ford")
class ConcreteFactory(AbstractFactory):
def crearCoche(self):
return CocheFord()
def crearMotor(self):
return MotorFord()
def crearChasis(self):
return ChasisFord()
class Client(object):
def __init__(self, factory):
self.factory = factory
def use_a_product(self):
s=input('Tipo:')
if(s=='ford'):
CocheFord = self.factory.crearCoche()
CocheFord.do_somthing()
MotorFord = self.factory.crearMotor()
MotorFord.do_somthing()
ChasisFord = self.factory.crearChasis()
ChasisFord.do_somthing()
def main():
factory = ConcreteFactory()
client = Client(factory)
client.use_a_product()
if __name__ == "__main__":
main()
| {
"repo_name": "AnhellO/DAS_Sistemas",
"path": "Ago-Dic-2018/David Perez/Practica3_y_exposicion/AbstractFactory1.py",
"copies": "1",
"size": "1521",
"license": "mit",
"hash": 1206751590669739300,
"line_mean": 32.7777777778,
"line_max": 87,
"alpha_frac": 0.65,
"autogenerated": false,
"ratio": 3.6104513064133017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9680161923784981,
"avg_score": 0.016057876525664063,
"num_lines": 45
} |
"""A PLOT3D file reader. This reader does not support a timeseries of
files.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2007-2015, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import basename, isfile, exists, splitext
# Enthought library imports.
from traits.api import Trait, Instance, Str, TraitPrefixMap, Button
from traitsui.api import View, Group, Item, FileEditor
from tvtk.api import tvtk
from apptools.persistence.state_pickler import set_state
from apptools.persistence.file_path import FilePath
# Local imports.
from mayavi.core.source import Source
from mayavi.core.common import handle_children_state, error
from mayavi.core.pipeline_info import PipelineInfo
########################################################################
# `PLOT3DReader` class
########################################################################
class PLOT3DReader(Source):
"""A PLOT3D file reader. This reader does not support a
timeseries of files.
"""
# The version of this class. Used for persistence.
__version__ = 0
# XYZ file name
xyz_file_name = Str('', desc='the XYZ file')
# The (optional) Q file.
q_file_name = Str('', desc='the Q file')
# The active scalar name.
scalars_name = Trait('density',
TraitPrefixMap({'density': 100,
'pressure':110,
'temperature': 120,
'enthalpy': 130,
'internal energy': 140,
'kinetic energy': 144,
'velocity magnitude': 153,
'stagnation energy': 163,
'entropy': 170,
'swirl': 184}),
desc='scalar data attribute to show')
# The active vector name.
vectors_name = Trait('momentum',
TraitPrefixMap({'velocity': 200,
'vorticity': 201,
'momentum': 202,
'pressure gradient': 210}),
desc='vector data attribute to show')
# The VTK data file reader.
reader = Instance(tvtk.MultiBlockPLOT3DReader, args=(), allow_none=False,
record=True)
# Information about what this object can produce.
output_info = PipelineInfo(datasets=['structured_grid'])
########################################
# View related code.
update_reader = Button('Update Reader')
# Our view.
view = View(Group(Item('xyz_file_name', editor=FileEditor()),
Item('q_file_name', editor=FileEditor()),
Item(name='scalars_name',
enabled_when='len(object.q_file_name) > 0'),
Item(name='vectors_name',
enabled_when='len(object.q_file_name)>0'),
Item(name='update_reader'),
label='Reader',
),
Group(Item(name='reader', style='custom',
resizable=True),
show_labels=False,
label='PLOT3DReader'
),
resizable=True)
########################################
# Private traits.
# The current file paths. This is not meant to be touched by the
# user.
xyz_file_path = Instance(FilePath, args=(), desc='the current XYZ file path')
q_file_path = Instance(FilePath, args=(), desc='the current Q file path')
######################################################################
# `object` interface
######################################################################
def __get_pure_state__(self):
d = super(PLOT3DReader, self).__get_pure_state__()
# These traits are dynamically created.
for name in ('scalars_name', 'vectors_name', 'xyz_file_name',
'q_file_name'):
d.pop(name, None)
return d
def __set_pure_state__(self, state):
xyz_fn = state.xyz_file_path.abs_pth
q_fn = state.q_file_path.abs_pth
if not isfile(xyz_fn):
msg = 'Could not find file at %s\n'%xyz_fn
msg += 'Please move the file there and try again.'
raise IOError(msg)
# Setup the reader state.
set_state(self, state, first=['reader'], ignore=['*'])
# Initialize the files.
self.initialize(xyz_fn, q_fn, configure=False)
# Now set the remaining state without touching the children.
set_state(self, state, ignore=['children', 'xyz_file_path', 'q_file_path'])
# Setup the children.
handle_children_state(self.children, state.children)
# Setup the children's state.
set_state(self, state, first=['children'], ignore=['*'])
######################################################################
# `FileDataSource` interface
######################################################################
def initialize(self, xyz_file_name, q_file_name='', configure=True):
"""Given an xyz filename and a Q filename which may or may not
be part of a time series, this initializes the list of files.
This method need not be called to initialize the data.
If configure is True, it pops up a UI to configure the
PLOT3DReader.
"""
if len(q_file_name) == 0:
base = splitext(xyz_file_name)[0]
qf = base + '.q'
if exists(qf):
q_file_name = qf
if configure:
# First set properties of the reader. This is useful when
# the data format has atypical defaults. Automatic
# detection can be disastrous sometimes due to VTK related
# problems.
self.reader.edit_traits(kind='livemodal')
self.xyz_file_name = xyz_file_name
if len(q_file_name) > 0:
self.q_file_name = q_file_name
def update(self):
if len(self.xyz_file_path.get()) == 0:
return
self.reader.update()
self.render()
def has_output_port(self):
""" Return True as the reader has output port."""
return True
def get_output_object(self):
""" Return the reader output port."""
return self.reader.output_port
######################################################################
# Non-public interface
######################################################################
def _xyz_file_name_changed(self, value):
if len(value) == 0:
return
else:
self.reader.xyz_file_name = value
self.xyz_file_path.set(value)
self._update_reader_output()
def _q_file_name_changed(self, value):
if len(value) == 0:
return
else:
self.reader.q_file_name = value
self.q_file_path.set(value)
self._update_reader_output()
def _update_reader_output(self):
r = self.reader
r.update()
if r.error_code != 0:
try:
self.reader.i_blanking = True
except AttributeError:
pass
else:
r.update()
# Try reading file.
if r.error_code != 0:
# No output so the file might be an ASCII file.
try:
# Turn off IBlanking.
r.set(i_blanking = False, binary_file = False)
except AttributeError:
pass
else:
r.update()
# Try again this time as ascii and with blanking.
if r.error_code != 0:
# No output so the file might be an ASCII file.
try:
# Turn on IBlanking.
r.i_blanking = True
except AttributeError:
pass
else:
r.update()
# If there still is an error, ask the user.
if r.error_code != 0:
r.edit_traits(kind='livemodal')
r.update()
# If there still is an error, ask the user to retry.
if r.error_code != 0:
msg = 'Unable to read file properly. '\
'Please check the settings of the reader '\
'on the UI and press the "Update Reader" button '\
'when done and try again!'
error(msg)
return
# Now setup the outputs by resetting self.outputs. Changing
# the outputs automatically fires a pipeline_changed event.
try:
n = r.get_output().number_of_blocks
except AttributeError: # for VTK >= 4.5
n = r.number_of_outputs
outputs = []
for i in range(n):
outputs.append(r.get_output().get_block(i))
self.outputs = outputs
# Fire data_changed just in case the outputs are not
# really changed. This can happen if the dataset is of
# the same type as before.
self.data_changed = True
# Change our name on the tree view
self.name = self._get_name()
def _scalars_name_changed(self, value):
self.reader.scalar_function_number = self.scalars_name_
self.reader.modified()
self.update()
self.data_changed = True
def _vectors_name_changed(self, value):
self.reader.vector_function_number = self.vectors_name_
self.reader.modified()
self.update()
self.data_changed = True
def _update_reader_fired(self):
self.reader.modified()
self._update_reader_output()
self.pipeline_changed = True
def _get_name(self):
""" Gets the name to display on the tree view.
"""
xyz_fname = basename(self.xyz_file_path.get())
q_fname = basename(self.q_file_path.get())
if len(self.q_file_name) > 0:
ret = "PLOT3D:%s, %s"%(xyz_fname, q_fname)
else:
ret = "PLOT3D:%s"%(xyz_fname)
if '[Hidden]' in self.name:
ret += ' [Hidden]'
return ret
| {
"repo_name": "dmsurti/mayavi",
"path": "mayavi/sources/plot3d_reader.py",
"copies": "1",
"size": "10421",
"license": "bsd-3-clause",
"hash": 2697921063207734000,
"line_mean": 34.9344827586,
"line_max": 83,
"alpha_frac": 0.4981287784,
"autogenerated": false,
"ratio": 4.428814279643009,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000850964929697291,
"num_lines": 290
} |
"""A PLOT3D file reader. This reader does not support a timeseries of
files.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import basename, isfile, exists, splitext
# Enthought library imports.
from traits.api import Trait, Instance, Str, TraitPrefixMap, Button
from traitsui.api import View, Group, Item, FileEditor
from tvtk.api import tvtk
from apptools.persistence.state_pickler import set_state
from apptools.persistence.file_path import FilePath
# Local imports.
from mayavi.core.source import Source
from mayavi.core.common import handle_children_state, error
from mayavi.core.pipeline_info import PipelineInfo
########################################################################
# `PLOT3DReader` class
########################################################################
class PLOT3DReader(Source):
"""A PLOT3D file reader. This reader does not support a
timeseries of files.
"""
# The version of this class. Used for persistence.
__version__ = 0
# XYZ file name
xyz_file_name = Str('', desc='the XYZ file')
# The (optional) Q file.
q_file_name = Str('', desc='the Q file')
# The active scalar name.
scalars_name = Trait('density',
TraitPrefixMap({'density': 100,
'pressure':110,
'temperature': 120,
'enthalpy': 130,
'internal energy': 140,
'kinetic energy': 144,
'velocity magnitude': 153,
'stagnation energy': 163,
'entropy': 170,
'swirl': 184}),
desc='scalar data attribute to show')
# The active vector name.
vectors_name = Trait('momentum',
TraitPrefixMap({'velocity': 200,
'vorticity': 201,
'momentum': 202,
'pressure gradient': 210}),
desc='vector data attribute to show')
# The VTK data file reader.
reader = Instance(tvtk.MultiBlockPLOT3DReader, args=(), allow_none=False,
record=True)
# Information about what this object can produce.
output_info = PipelineInfo(datasets=['structured_grid'])
########################################
# View related code.
update_reader = Button('Update Reader')
# Our view.
view = View(Group(Item('xyz_file_name', editor=FileEditor()),
Item('q_file_name', editor=FileEditor()),
Item(name='scalars_name',
enabled_when='len(object.q_file_name) > 0'),
Item(name='vectors_name',
enabled_when='len(object.q_file_name)>0'),
Item(name='update_reader'),
label='Reader',
),
Group(Item(name='reader', style='custom',
resizable=True),
show_labels=False,
label='PLOT3DReader'
),
resizable=True)
########################################
# Private traits.
# The current file paths. This is not meant to be touched by the
# user.
xyz_file_path = Instance(FilePath, args=(), desc='the current XYZ file path')
q_file_path = Instance(FilePath, args=(), desc='the current Q file path')
######################################################################
# `object` interface
######################################################################
def __get_pure_state__(self):
d = super(PLOT3DReader, self).__get_pure_state__()
# These traits are dynamically created.
for name in ('scalars_name', 'vectors_name', 'xyz_file_name',
'q_file_name'):
d.pop(name, None)
return d
def __set_pure_state__(self, state):
xyz_fn = state.xyz_file_path.abs_pth
q_fn = state.q_file_path.abs_pth
if not isfile(xyz_fn):
msg = 'Could not find file at %s\n'%xyz_fn
msg += 'Please move the file there and try again.'
raise IOError, msg
# Setup the reader state.
set_state(self, state, first=['reader'], ignore=['*'])
# Initialize the files.
self.initialize(xyz_fn, q_fn, configure=False)
# Now set the remaining state without touching the children.
set_state(self, state, ignore=['children', 'xyz_file_path', 'q_file_path'])
# Setup the children.
handle_children_state(self.children, state.children)
# Setup the children's state.
set_state(self, state, first=['children'], ignore=['*'])
######################################################################
# `FileDataSource` interface
######################################################################
def initialize(self, xyz_file_name, q_file_name='', configure=True):
"""Given an xyz filename and a Q filename which may or may not
be part of a time series, this initializes the list of files.
This method need not be called to initialize the data.
If configure is True, it pops up a UI to configure the
PLOT3DReader.
"""
if len(q_file_name) == 0:
base = splitext(xyz_file_name)[0]
qf = base + '.q'
if exists(qf):
q_file_name = qf
if configure:
# First set properties of the reader. This is useful when
# the data format has atypical defaults. Automatic
# detection can be disastrous sometimes due to VTK related
# problems.
self.reader.edit_traits(kind='livemodal')
self.xyz_file_name = xyz_file_name
if len(q_file_name) > 0:
self.q_file_name = q_file_name
def update(self):
if len(self.xyz_file_path.get()) == 0:
return
self.reader.update()
self.render()
def has_output_port(self):
""" Return True as the reader has output port."""
return True
def get_output_object(self):
""" Return the reader output port."""
return self.reader.output_port
######################################################################
# Non-public interface
######################################################################
def _xyz_file_name_changed(self, value):
if len(value) == 0:
return
else:
self.reader.xyz_file_name = value
self.xyz_file_path.set(value)
self._update_reader_output()
def _q_file_name_changed(self, value):
if len(value) == 0:
return
else:
self.reader.q_file_name = value
self.q_file_path.set(value)
self._update_reader_output()
def _update_reader_output(self):
r = self.reader
r.update()
if r.error_code != 0:
try:
self.reader.i_blanking = True
except AttributeError:
pass
else:
r.update()
# Try reading file.
if r.error_code != 0:
# No output so the file might be an ASCII file.
try:
# Turn off IBlanking.
r.set(i_blanking = False, binary_file = False)
except AttributeError:
pass
else:
r.update()
# Try again this time as ascii and with blanking.
if r.error_code != 0:
# No output so the file might be an ASCII file.
try:
# Turn on IBlanking.
r.i_blanking = True
except AttributeError:
pass
else:
r.update()
# If there still is an error, ask the user.
if r.error_code != 0:
r.edit_traits(kind='livemodal')
r.update()
# If there still is an error, ask the user to retry.
if r.error_code != 0:
msg = 'Unable to read file properly. '\
'Please check the settings of the reader '\
'on the UI and press the "Update Reader" button '\
'when done and try again!'
error(msg)
return
# Now setup the outputs by resetting self.outputs. Changing
# the outputs automatically fires a pipeline_changed event.
try:
n = r.get_output().number_of_blocks
except AttributeError: # for VTK >= 4.5
n = r.number_of_outputs
outputs = []
for i in range(n):
outputs.append(r.get_output().get_block(i))
self.outputs = outputs
# Fire data_changed just in case the outputs are not
# really changed. This can happen if the dataset is of
# the same type as before.
self.data_changed = True
# Change our name on the tree view
self.name = self._get_name()
def _scalars_name_changed(self, value):
self.reader.scalar_function_number = self.scalars_name_
self.reader.modified()
self.update()
self.data_changed = True
def _vectors_name_changed(self, value):
self.reader.vector_function_number = self.vectors_name_
self.reader.modified()
self.update()
self.data_changed = True
def _update_reader_fired(self):
self.reader.modified()
self._update_reader_output()
self.pipeline_changed = True
def _get_name(self):
""" Gets the name to display on the tree view.
"""
xyz_fname = basename(self.xyz_file_path.get())
q_fname = basename(self.q_file_path.get())
if len(self.q_file_name) > 0:
ret = "PLOT3D:%s, %s"%(xyz_fname, q_fname)
else:
ret = "PLOT3D:%s"%(xyz_fname)
if '[Hidden]' in self.name:
ret += ' [Hidden]'
return ret
| {
"repo_name": "alexandreleroux/mayavi",
"path": "mayavi/sources/plot3d_reader.py",
"copies": "2",
"size": "10417",
"license": "bsd-3-clause",
"hash": -2603234849827698000,
"line_mean": 34.7972508591,
"line_max": 83,
"alpha_frac": 0.497936066,
"autogenerated": false,
"ratio": 4.432765957446809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5930702023446809,
"avg_score": null,
"num_lines": null
} |
"""A plot of the deltas for erosion between scenarios."""
import datetime
import sys
from pyiem.dep import read_crop
from pyiem.plot.use_agg import plt
import matplotlib.dates as mdates
def main(argv):
"""Go Main Go."""
huc12 = argv[1]
fpath = argv[2]
year = int(argv[3])
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
data = {}
for scenario in range(59, 70):
df = read_crop(
"/i/%s/crop/%s/%s/%s_%s.crop"
% (scenario, huc12[:8], huc12[8:], huc12, fpath)
)
data[scenario] = df[df["ofe"] == 1].set_index("date")
ax1 = plt.axes([0.15, 0.5, 0.85, 0.35])
ax2 = plt.axes([0.15, 0.1, 0.85, 0.35])
baseline = data[59][data[59].index.year == year]
for scenario in range(60, 70):
color = colors[scenario - 60]
date = datetime.date(2000, 4, 15) + datetime.timedelta(
days=(scenario - 60) * 5
)
scendata = data[scenario][data[scenario]["year"] == year]
delta = scendata["canopy_percent"] - baseline["canopy_percent"]
x = delta.index.to_pydatetime()
ax1.plot(
x,
scendata["canopy_percent"] * 100.0,
label=date.strftime("%b %d"),
color=color,
)
ax2.plot(x, delta.values * 100.0, color=color)
ax1.set_xlim(datetime.date(year, 4, 15), datetime.date(year, 7, 15))
ax2.set_xlim(datetime.date(year, 4, 15), datetime.date(year, 7, 15))
ax1.xaxis.set_major_locator(mdates.DayLocator([1]))
ax1.xaxis.set_major_formatter(mdates.DateFormatter("%b"))
ax2.xaxis.set_major_locator(mdates.DayLocator([1]))
ax2.xaxis.set_major_formatter(mdates.DateFormatter("%b"))
ax1.set_ylabel("Coverage [%]")
ax2.set_ylabel("Absolute Differnece from Apr 10 [%]")
ax2.set_ylim(-101, 0)
ax1.set_title(
"huc12: %s fpath: %s\n%s Canopy Coverage by Planting Date"
% (huc12, fpath, year)
)
ax1.grid()
ax2.grid()
ax1.legend(loc=2, ncol=2)
plt.gcf().savefig("test.png")
if __name__ == "__main__":
main(sys.argv)
| {
"repo_name": "akrherz/idep",
"path": "scripts/tillage_timing/plot_crop.py",
"copies": "2",
"size": "2113",
"license": "mit",
"hash": -1889929092373244400,
"line_mean": 32.5396825397,
"line_max": 72,
"alpha_frac": 0.5802177,
"autogenerated": false,
"ratio": 2.967696629213483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9547914329213483,
"avg_score": 0,
"num_lines": 63
} |
"""A plot of the deltas for erosion between scenarios."""
import datetime
import sys
from pyiem.dep import read_env
from pyiem.plot.use_agg import plt
import matplotlib.dates as mdates
def main(argv):
"""Go Main Go."""
huc12 = argv[1]
fpath = argv[2]
year = int(argv[3])
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
data = {}
for scenario in range(59, 70):
data[scenario] = read_env(
"/i/%s/env/%s/%s/%s_%s.env"
% (scenario, huc12[:8], huc12[8:], huc12, fpath)
).set_index("date")
print(data[scenario]["av_det"].sum())
ax = plt.axes([0.2, 0.1, 0.75, 0.75])
baseline = data[59][data[59].index.year == year]
yticklabels = []
for scenario in range(60, 70):
color = colors[scenario - 60]
date = datetime.date(2000, 4, 15) + datetime.timedelta(
days=(scenario - 60) * 5
)
scendata = data[scenario][data[scenario].index.year == year]
delta = scendata["sed_del"] - baseline["sed_del"]
delta = delta[delta != 0]
total = (
(scendata["sed_del"].sum() - baseline["sed_del"].sum())
/ baseline["sed_del"].sum()
) * 100.0
yticklabels.append("%s %4.2f%%" % (date.strftime("%b %d"), total))
x = delta.index.to_pydatetime()
# res = ax.scatter(x, delta.values + (scenario - 60))
for idx, val in enumerate(delta):
ax.arrow(
x[idx],
scenario - 60,
0,
val,
head_width=0.5,
head_length=0.1,
fc=color,
ec=color,
)
ax.axhline(scenario - 60, color=color)
ax.set_xlim(datetime.date(year, 1, 1), datetime.date(year + 1, 1, 1))
ax.set_ylim(-0.5, 10)
ax.xaxis.set_major_locator(mdates.DayLocator([1]))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%b"))
ax.set_title(
"huc12: %s fpath: %s\n%s Daily Change in Delivery vs Apr 10 Planting"
% (huc12, fpath, year)
)
ax.grid(axis="x")
ax.set_yticks(range(10))
ax.set_yticklabels(yticklabels)
plt.gcf().savefig("test.png")
if __name__ == "__main__":
main(sys.argv)
| {
"repo_name": "akrherz/dep",
"path": "scripts/tillage_timing/plot_dailydeltas.py",
"copies": "2",
"size": "2278",
"license": "mit",
"hash": 7817540743329571000,
"line_mean": 31.5428571429,
"line_max": 77,
"alpha_frac": 0.5359964881,
"autogenerated": false,
"ratio": 3.2129760225669957,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9748972510666996,
"avg_score": 0,
"num_lines": 70
} |
"""A plot of the deltas for erosion between scenarios."""
import datetime
import sys
from pyiem.plot.use_agg import plt
from pyiem.util import get_dbconn
import pandas as pd
from pandas.io.sql import read_sql
import matplotlib.dates as mdates
def main(argv):
"""Go Main Go."""
huc12 = argv[1]
year = int(argv[2])
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
pgconn = get_dbconn("idep")
df = read_sql(
"""
SELECT scenario, huc_12, avg_delivery, valid from results_by_huc12
WHERE scenario >= 59 and scenario < 70 and
extract(year from valid) = %s and huc_12 = %s ORDER by valid ASC
""",
pgconn,
params=(year, huc12),
)
df["valid"] = pd.to_datetime(df["valid"])
ax = plt.axes([0.2, 0.1, 0.75, 0.75])
baseline = df[df["scenario"] == 59].copy().set_index("valid")
yticklabels = []
col = "avg_delivery"
for scenario in range(60, 70):
color = colors[scenario - 60]
date = datetime.date(2000, 4, 15) + datetime.timedelta(
days=(scenario - 60) * 5
)
scendata = df[df["scenario"] == scenario].copy().set_index("valid")
delta = scendata[col] - baseline[col]
delta = delta[delta != 0]
total = (
(scendata[col].sum() - baseline[col].sum()) / baseline[col].sum()
) * 100.0
yticklabels.append("%s %4.2f%%" % (date.strftime("%b %d"), total))
x = delta.index.to_pydatetime()
# res = ax.scatter(x, delta.values + (scenario - 60))
for idx, val in enumerate(delta):
ax.arrow(
x[idx],
scenario - 60,
0,
val * 10.0,
head_width=4,
head_length=0.1,
fc=color,
ec=color,
)
ax.axhline(scenario - 60, color=color)
ax.set_xlim(datetime.date(year, 1, 1), datetime.date(year + 1, 1, 1))
ax.set_ylim(-0.5, 10)
ax.xaxis.set_major_locator(mdates.DayLocator([1]))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%b"))
ax.set_title(
"huc12: %s \n%s Daily Change in Delivery vs Apr 10 Planting"
% (huc12, year)
)
ax.grid(axis="x")
ax.set_yticks(range(10))
ax.set_yticklabels(yticklabels)
plt.gcf().savefig("test.png")
if __name__ == "__main__":
main(sys.argv)
| {
"repo_name": "akrherz/idep",
"path": "scripts/tillage_timing/plot_dailydeltas_huc12.py",
"copies": "2",
"size": "2417",
"license": "mit",
"hash": 3561513481146395600,
"line_mean": 31.6621621622,
"line_max": 77,
"alpha_frac": 0.5519238726,
"autogenerated": false,
"ratio": 3.266216216216216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4818140088816216,
"avg_score": null,
"num_lines": null
} |
"""A plot of the deltas for erosion between scenarios."""
import os
from pyiem.dep import read_env
from pyiem.plot.use_agg import plt
from tqdm import tqdm
import numpy as np
def plot():
"""Plot."""
y = [...]
y = np.array(y)
fig, ax = plt.subplots(1, 1)
ax.set_title(
"DEP Additional Springtime 1 inch/hr Storms (2007-2020)\n"
"30 HUC12 Averaged Change [%] over Baseline"
)
ax.bar(np.arange(1, 11), y * 100.0)
for i, val in enumerate(y):
ax.text(i + 1, val * 100.0 + 1, f"{val * 100.:.1f}%", ha="center")
ax.set_xlabel("Additional 1 inch/hr Storms per Spring Season per Year")
ax.set_ylim(0, 110)
ax.set_ylabel("Change in Detachment over Baseline [%]")
ax.grid(True)
fig.savefig("test.png")
def main():
"""Go Main Go."""
hucs = open("myhucs.txt").read().split("\n")
scenarios = [0]
scenarios.extend(list(range(130, 140)))
deltas = []
baseline = None
for scenario in tqdm(scenarios):
vals = []
for huc12 in hucs:
mydir = f"/i/{scenario}/env/{huc12[:8]}/{huc12[8:]}"
data = []
for fn in os.listdir(mydir):
res = read_env(os.path.join(mydir, fn)).set_index("date")
data.append(res["av_det"].sum())
vals.append(np.average(data))
if scenario > 0:
deltas.append((np.average(vals) - baseline) / baseline)
else:
baseline = np.average(vals)
print(deltas)
if __name__ == "__main__":
# main()
plot()
| {
"repo_name": "akrherz/dep",
"path": "scripts/climatechange/plot_yearly.py",
"copies": "2",
"size": "1548",
"license": "mit",
"hash": -9023072279120573000,
"line_mean": 27.6666666667,
"line_max": 75,
"alpha_frac": 0.5562015504,
"autogenerated": false,
"ratio": 3.120967741935484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4677169292335484,
"avg_score": null,
"num_lines": null
} |
""" A Plot which uses ScaleSystems for its ticks.
"""
from traits.api import Any
from chaco.api import (DataRange2D, LinearMapper, LogMapper,
PlotGrid, Plot, PlotAxis)
from chaco.scales_tick_generator import ScalesTickGenerator
from chaco.scales.api import DefaultScale, LogScale, ScaleSystem
def add_default_axes(plot, orientation="normal", vtitle="", htitle=""):
"""
Creates left and bottom axes for a plot. Assumes that the index is
horizontal and value is vertical by default; set orientation to
something other than "normal" if they are flipped.
"""
if orientation in ("normal", "h"):
v_mapper = plot.value_mapper
h_mapper = plot.index_mapper
else:
v_mapper = plot.index_mapper
h_mapper = plot.value_mapper
yticks = ScalesTickGenerator()
left = PlotAxis(
orientation='left',
title=vtitle,
mapper=v_mapper,
component=plot,
tick_generator=yticks,
)
xticks = ScalesTickGenerator()
bottom = PlotAxis(
orientation='bottom',
title=htitle,
mapper=h_mapper,
component=plot,
tick_generator=xticks,
)
plot.underlays.append(left)
plot.underlays.append(bottom)
return left, bottom
class ScalyPlot(Plot):
x_axis = Any()
y_axis = Any()
x_ticks = Any()
y_ticks = Any()
linear_scale_factory = Any()
log_scale_factory = Any()
def _linear_scale_default(self):
return self._make_scale("linear")
def _log_scale_default(self):
return self._make_scale("log")
def _make_scale(self, scale_type="linear"):
""" Returns a new linear or log scale """
if scale_type == "linear":
if self.linear_scale_factory is not None:
return self.linear_scale_factory()
else:
return ScaleSystem(DefaultScale())
else:
if self.log_scale_factory is not None:
return self.log_scale_factory()
else:
return ScaleSystem(LogScale())
def _init_components(self):
# Since this is called after the HasTraits constructor, we have to make
# sure that we don't blow away any components that the caller may have
# already set.
if self.range2d is None:
self.range2d = DataRange2D()
if self.index_mapper is None:
if self.index_scale == "linear":
imap = LinearMapper(range=self.range2d.x_range)
else:
imap = LogMapper(range=self.range2d.x_range)
self.index_mapper = imap
if self.value_mapper is None:
if self.value_scale == "linear":
vmap = LinearMapper(range=self.range2d.y_range)
else:
vmap = LogMapper(range=self.range2d.y_range)
self.value_mapper = vmap
if self.x_ticks is None:
self.x_ticks = ScalesTickGenerator(scale=self._make_scale(self.index_scale))
if self.y_ticks is None:
self.y_ticks = ScalesTickGenerator(scale=self._make_scale(self.value_scale))
if self.x_grid is None:
self.x_grid = PlotGrid(mapper=self.x_mapper, orientation="vertical",
line_color="lightgray", line_style="dot",
component=self, tick_generator=self.x_ticks)
if self.y_grid is None:
self.y_grid = PlotGrid(mapper=self.y_mapper, orientation="horizontal",
line_color="lightgray", line_style="dot",
component=self, tick_generator=self.y_ticks)
if self.x_axis is None:
self.x_axis = PlotAxis(mapper=self.x_mapper, orientation="bottom",
component=self, tick_generator=self.x_ticks)
if self.y_axis is None:
self.y_axis = PlotAxis(mapper=self.y_mapper, orientation="left",
component=self, tick_generator=self.y_ticks)
def _index_scale_changed(self, old, new):
Plot._index_scale_changed(self, old, new)
# Now adjust the ScaleSystems.
self.x_ticks.scale = self._make_scale(self.index_scale)
def _value_scale_changed(self, old, new):
Plot._value_scale_changed(self, old, new)
# Now adjust the ScaleSystems.
self.y_ticks.scale = self._make_scale(self.value_scale)
| {
"repo_name": "burnpanck/chaco",
"path": "chaco/shell/scaly_plot.py",
"copies": "3",
"size": "4448",
"license": "bsd-3-clause",
"hash": 1441726591063034400,
"line_mean": 34.584,
"line_max": 88,
"alpha_frac": 0.5915017986,
"autogenerated": false,
"ratio": 3.9051799824407376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012571825287616973,
"num_lines": 125
} |
"""A plugin-able framework for the static analysis of Scratch projects."""
from __future__ import print_function
import appdirs
import cPickle
import errno
import importlib
import kurt
import os
import sys
import traceback
from hashlib import sha1
from imp import load_source
from optparse import OptionParser
from .plugins import HairballPlugin
__version__ = '0.3'
class KurtCache(object):
"""Interface to an on-disk cache of processed Kurt objects."""
DEFAULT_CACHE_DIR = appdirs.user_cache_dir(
appname='Hairball', appauthor='bboe')
@staticmethod
def path_to_key(filepath):
"""Return the sha1sum (key) belonging to the file at filepath."""
tmp, last = os.path.split(filepath)
tmp, middle = os.path.split(tmp)
return '{}{}{}'.format(os.path.basename(tmp), middle,
os.path.splitext(last)[0])
def __init__(self, cache_dir=DEFAULT_CACHE_DIR):
"""Initialize the index of cached files."""
# Create the cache directory
try:
os.makedirs(cache_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise # Don't continue without cache support
self.hashes = set()
self.cache_dir = cache_dir
# Initialize the index
for path, _, filenames in os.walk(cache_dir):
for filename in filenames:
if filename.endswith('.pkl'):
filepath = os.path.join(path, filename)
self.hashes.add(self.path_to_key(filepath))
def key_to_path(self, key):
"""Return the fullpath to the file with sha1sum key."""
return os.path.join(self.cache_dir, key[:2], key[2:4],
key[4:] + '.pkl')
def load(self, filename):
"""Optimized load and return the parsed version of filename.
Uses the on-disk parse cache if the file is located in it.
"""
# Compute sha1 hash (key)
with open(filename) as fp:
key = sha1(fp.read()).hexdigest()
path = self.key_to_path(key)
# Return the cached file if available
if key in self.hashes:
try:
with open(path) as fp:
return cPickle.load(fp)
except EOFError:
os.unlink(path)
self.hashes.remove(key)
except IOError:
self.hashes.remove(key)
# Create the nested cache directory
try:
os.makedirs(os.path.dirname(path))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
# Process the file and save in the cache
scratch = kurt.Project.load(filename) # can fail
with os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT,
0400), 'w') as fp:
# open file for writing but make it immediately read-only
cPickle.dump(scratch, fp, cPickle.HIGHEST_PROTOCOL)
self.hashes.add(key)
return scratch
class Hairball(object):
"""The Hairball exeuction class.
This class is responsible for parsing command line arguments, loading the
plugins, and running the plugins on the specified scratch files.
"""
def __init__(self, options, paths, cache=True):
"""Initialize a Hairball instance."""
self.options = options
self.paths = paths
if options.kurt_plugin:
for kurt_plugin in options.kurt_plugin:
failure = False
if kurt_plugin.endswith('.py') and os.path.isfile(kurt_plugin):
module = os.path.splitext(os.path.basename(kurt_plugin))[0]
try:
load_source(module, kurt_plugin)
except Exception: # TODO: Enumerate possible exceptions
failure = True
else:
try:
importlib.import_module(kurt_plugin)
except ImportError:
failure = True
if failure and not options.quiet:
print('Could not load Kurt plugin: {}'.format(kurt_plugin))
# Initialization Data
if cache is True:
self.cache = KurtCache()
elif cache:
self.cache = cache
else:
self.cache = False
self.plugins = []
self.extensions = [x.extension for x in
kurt.plugin.Kurt.plugins.values()]
def hairball_files(self, paths, extensions):
"""Yield filepath to files with the proper extension within paths."""
def add_file(filename):
return os.path.splitext(filename)[1] in extensions
while paths:
arg_path = paths.pop(0)
if os.path.isdir(arg_path):
found = False
for path, dirs, files in os.walk(arg_path):
dirs.sort() # Traverse in sorted order
for filename in sorted(files):
if add_file(filename):
yield os.path.join(path, filename)
found = True
if not found:
if not self.options.quiet:
print('No files found in {}'.format(arg_path))
elif add_file(arg_path):
yield arg_path
elif not self.options.quiet:
print('Invalid file {}'.format(arg_path))
print('Did you forget to load a Kurt plugin (-k)?')
def finalize(self):
"""Indicate that analysis is complete.
Calling finalize will call the finalize method of all plugins thus
allowing them to output any aggregate results or perform any clean-up.
"""
for plugin in self.plugins:
plugin.finalize()
def initialize_plugins(self):
"""Attempt to Load and initialize all the plugins.
Any issues loading plugins will be output to stderr.
"""
for plugin_name in self.options.plugin:
parts = plugin_name.split('.')
if len(parts) > 1:
module_name = '.'.join(parts[:-1])
class_name = parts[-1]
else:
# Use the titlecase format of the module name as the class name
module_name = parts[0]
class_name = parts[0].title()
# First try to load plugins from the passed in plugins_dir and then
# from the hairball.plugins package.
plugin = None
for package in (None, 'hairball.plugins'):
if package:
module_name = '{}.{}'.format(package, module_name)
try:
module = __import__(module_name, fromlist=[class_name])
# Initializes the plugin by calling its constructor
plugin = getattr(module, class_name)()
# Verify plugin is of the correct class
if not isinstance(plugin, HairballPlugin):
sys.stderr.write('Invalid type for plugin {}: {}\n'
.format(plugin_name, type(plugin)))
plugin = None
else:
break
except (ImportError, AttributeError):
pass
if plugin:
self.plugins.append(plugin)
else:
sys.stderr.write('Cannot find plugin {}\n'.format(plugin_name))
if not self.plugins:
sys.stderr.write('No plugins loaded. Goodbye!\n')
sys.exit(1)
def process(self):
"""Run the analysis across all files found in the given paths.
Each file is loaded once and all plugins are run against it before
loading the next file.
"""
for filename in self.hairball_files(self.paths, self.extensions):
if not self.options.quiet:
print(filename)
try:
if self.cache:
scratch = self.cache.load(filename)
else:
scratch = kurt.Project.load(filename)
except Exception: # pylint: disable=W0703
traceback.print_exc()
continue
for plugin in self.plugins:
# pylint: disable=W0212
plugin._process(scratch, filename=filename)
# pylint: enable=W0212
def main():
"""The entrypoint for the hairball command installed via setup.py."""
description = ('PATH can be either the path to a scratch file, or a '
'directory containing scratch files. Multiple PATH '
'arguments can be provided.')
parser = OptionParser(usage='%prog -p PLUGIN_NAME [options] PATH...',
description=description,
version='%prog {}'.format(__version__))
parser.add_option('-d', '--plugin-dir', metavar='DIR',
help=('Specify the path to a directory containing '
'plugins. Plugins in this directory take '
'precedence over similarly named plugins '
'included with Hairball.'))
parser.add_option('-p', '--plugin', action='append',
help=('Use the named plugin to perform analysis. '
'This option can be provided multiple times.'))
parser.add_option('-k', '--kurt-plugin', action='append',
help=('Provide either a python import path (e.g, '
'kelp.octopi) to a package/module, or the path'
' to a python file, which will be loaded as a '
'Kurt plugin. This option can be provided '
'multiple times.'))
parser.add_option('-q', '--quiet', action='store_true',
help=('Prevent output from Hairball. Plugins may still '
'produce output.'))
parser.add_option('-C', '--no-cache', action='store_true',
help='Do not use Hairball\'s cache.', default=False)
options, args = parser.parse_args(sys.argv[1:])
if not options.plugin:
parser.error('At least one plugin must be specified via -p.')
if not args:
parser.error('At least one PATH must be provided.')
if options.plugin_dir:
if os.path.isdir(options.plugin_dir):
sys.path.append(options.plugin_dir)
else:
parser.error('{} is not a directory'.format(options.plugin_dir))
hairball = Hairball(options, args, cache=not options.no_cache)
hairball.initialize_plugins()
hairball.process()
hairball.finalize()
| {
"repo_name": "thsunmy/hairball",
"path": "hairball/__init__.py",
"copies": "2",
"size": "10925",
"license": "bsd-2-clause",
"hash": -2392027369944029000,
"line_mean": 37.8790035587,
"line_max": 79,
"alpha_frac": 0.5393135011,
"autogenerated": false,
"ratio": 4.644982993197279,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 281
} |
"""A plugin-able framework for the static analysis of Scratch projects."""
import kurt
import os
import sys
from imp import load_source
from optparse import OptionParser
from .plugins import HairballPlugin
__version__ = '0.1rc3'
class Hairball(object):
"""The Hairball exeuction class.
This class is responsible for parsing command line arguments, loading the
plugins, and running the plugins on the specified scratch files.
"""
def __init__(self, argv):
self.plugins = []
description = ('PATH can be either the path to a scratch file, or a '
'directory containing scratch files. Multiple PATH '
'arguments can be provided.')
parser = OptionParser(usage='%prog -p PLUGIN_NAME [options] PATH...',
description=description,
version='%prog {0}'.format(__version__))
parser.add_option('-d', '--plugin-dir', metavar='DIR',
help=('Specify the path to a directory containing '
'plugins. Plugins in this directory take '
'precedence over similarly named plugins '
'included with Hairball.'))
parser.add_option('-p', '--plugin', action='append',
help=('Use the named plugin to perform analysis. '
'This option can be provided multiple times.'))
parser.add_option('-k', '--kurt-plugin', action='append',
help=('Include the named file containing Kurt '
'plugin. This file should contain a '
'load_hairball method. '
'This option can be provided multiple times.'))
self.options, self.args = parser.parse_args(argv)
if not self.options.plugin:
parser.error('At least one plugin must be specified via -p.')
if not self.args:
parser.error('At least one PATH must be provided.')
if self.options.plugin_dir:
if os.path.isdir(self.options.plugin_dir):
sys.path.append(self.options.plugin_dir)
else:
parser.error('`{0}` is not a directory'
.format(self.options.plugin_dir))
if self.options.kurt_plugin:
for kurt_plugin in self.options.kurt_plugin:
module = os.path.splitext(os.path.basename(kurt_plugin))[0]
load_source(module, kurt_plugin)
self.extensions = [x.extension for x in
kurt.plugin.Kurt.plugins.values()]
def finalize(self):
"""Indicate that analysis is complete.
Calling finalize will call the finalize method of all plugins thus
allowing them to output any aggregate results or perform any clean-up.
"""
for plugin in self.plugins:
plugin.finalize()
def initialize_plugins(self):
"""Attempt to Load and initialize all the plugins.
Any issues loading plugins will be output to stderr.
"""
for plugin_name in self.options.plugin:
parts = plugin_name.split('.')
if len(parts) > 1:
module_name = '.'.join(parts[:-1])
class_name = parts[-1]
else:
# Use the titlecase format of the module name as the class name
module_name = parts[0]
class_name = parts[0].title()
# First try to load plugins from the passed in plugins_dir and then
# from the hairball.plugins package.
plugin = None
for package in (None, 'hairball.plugins'):
if package:
module_name = '{0}.{1}'.format(package, module_name)
try:
module = __import__(module_name, fromlist=[class_name])
# Initializes the plugin by calling its constructor
plugin = getattr(module, class_name)()
# Verify plugin is of the correct class
if not isinstance(plugin, HairballPlugin):
sys.stderr.write('Invalid type found for plugin `{0}` '
'{1}\n'.format(plugin_name,
type(plugin)))
plugin = None
else:
break
except (ImportError, AttributeError):
pass
if plugin:
self.plugins.append(plugin)
else:
sys.stderr.write('Cannot find plugin `{0}`\n'
.format(plugin_name))
if not self.plugins:
sys.stderr.write('No plugins loaded. Goodbye!\n')
sys.exit(1)
def process(self):
"""Start the analysis."""
scratch_files = []
while self.args:
filename = self.args.pop()
_, ext = os.path.splitext(filename)
# Interatively traverse directories
if os.path.isdir(filename):
for temp in os.listdir(filename):
if temp not in ('.', '..'):
self.args.append(os.path.join(filename, temp))
elif ext in self.extensions and os.path.isfile(filename):
scratch_files.append(filename)
# Run all the plugins on a single file at at time so we only have to
# open the file once.
for filename in sorted(scratch_files):
print(filename)
scratch = kurt.Project.load(filename)
for plugin in self.plugins:
plugin._process(scratch) # pylint: disable-msg=W0212
def main():
"""The entrypoint for the `hairball` command installed via setup.py."""
hairball = Hairball(sys.argv[1:])
hairball.initialize_plugins()
hairball.process()
hairball.finalize()
| {
"repo_name": "jemole/hairball",
"path": "hairball/__init__.py",
"copies": "1",
"size": "6081",
"license": "bsd-2-clause",
"hash": 7289362801204921000,
"line_mean": 39.54,
"line_max": 79,
"alpha_frac": 0.5318204243,
"autogenerated": false,
"ratio": 4.8223632038065025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5854183628106503,
"avg_score": null,
"num_lines": null
} |
# A plugin for allowing people who are not daytraders to spam the channel with bitcoin prices.
import includes.helpers as helpers
import datetime
import logging
import json
import urllib.request
import time
import threading # Stopgap solution
class BTC(helpers.Plugin):
def __init__(self, parent):
super().__init__(parent)
default_config = {
'cooldown': 60*10, # cooldown time for requests in sec
'cooldown_brown': 60*30, # cooldown for people who need to get a life
'cooldown_black': 15, # cooldown for telling people they are not daytraders
'whitelist': [],
'blacklist': [],
'brownlist': [],
}
self.config = helpers.parse_config('settings_btc.json', default_config)
self.last_request = 0
self.last_black_request = 0
def handle_pm(self, msg_data):
pass
def handle_message(self, msg_data):
if msg_data['message'].lower().startswith("!btc"):
current_time = time.time()
currency = 'USD'
if msg_data['nick'] in self.config['whitelist']:
cooldown = 0
elif msg_data['nick'] in self.config['blacklist']:
if current_time > self.last_black_request + self.config['cooldown_black']:
self.last_black_request = current_time
self.parent.send_msg(msg_data['channel'], "{}, you are not a day trader. Go back to your real job.".format(msg_data['nick']))
elif msg_data['nick'] in self.config['brownlist']:
cooldown = self.config['cooldown_brown']
else:
cooldown = self.config['cooldown']
if current_time > self.last_request + cooldown:
self.last_request = current_time
thread = threading.Thread(target=self.get_price, args=(msg_data,))
thread.start()
#price_str = self.get_price(currency)
#self.parent.send_msg(msg_data['channel'], price_str)
def get_price(self, msg_data, currency="USD"):
today = datetime.date.today()
d_ago = today - datetime.timedelta(days=1)
week_ago = today - datetime.timedelta(days=7)
month_ago = today - datetime.timedelta(days=30)
current_api = "https://api.coindesk.com/v1/bpi/currentprice.json"
lastM_api = "https://api.coindesk.com/v1/bpi/historical/close.json"
coinbase_api_buy = "https://api.coinbase.com/v2/prices/BTC-{}/buy".format(currency)
coinbase_api_sell = "https://api.coinbase.com/v2/prices/BTC-{}/sell".format(currency)
try:
current_r = urllib.request.urlopen(current_api).read()
lastM_r = urllib.request.urlopen(lastM_api).read()
coinbase_ask_r = urllib.request.urlopen(coinbase_api_buy).read()
coinbase_bid_r = urllib.request.urlopen(coinbase_api_sell).read()
current_j = json.loads(current_r.decode('utf-8'))
lastM_j = json.loads(lastM_r.decode('utf-8'))
coinbase_ask_j = json.loads(coinbase_ask_r.decode('utf-8'))
coinbase_bid_j = json.loads(coinbase_bid_r.decode('utf-8'))
if d_ago.isoformat() not in lastM_j["bpi"]: #depending on timezone, day might not have changed yet!
d_ago = d_ago - datetime.timedelta(days=1)
usd_price_1d = float(lastM_j["bpi"][d_ago.isoformat()])
usd_price_1w = float(lastM_j["bpi"][week_ago.isoformat()])
usd_price_1m = float(lastM_j["bpi"][month_ago.isoformat()])
usd_price = float(current_j["bpi"]["USD"]["rate"].replace(',',''))
coinbase_ask_price = float(coinbase_ask_j["data"]["amount"])
coinbase_bid_price = float(coinbase_bid_j["data"]["amount"])
except BaseException as e:
logging.debug('BTC: request/parse error - {}'.format(e))
return ""
d_change = self.format_change(usd_price, usd_price_1d, 'd')
w_change = self.format_change(usd_price, usd_price_1w, 'w')
m_change = self.format_change(usd_price, usd_price_1m, 'm')
if currency == "USD":
response_str = "BTC/USD - Coinbase: Bid=${:.2f} Ask=${:.2f} | CoinDesk: Spot=${:.2f} {} {} {}".format(coinbase_bid_price, coinbase_ask_price, usd_price, d_change, w_change, m_change)
else:
response_str = "BTC/{} - Coinbase: Bid=${} Ask=${} | CoinDesk does not support this currency.".format(currency, coinbase_bid_price, coinbase_ask_price)
#return response_str
self.parent.send_msg(msg_data['channel'], response_str)
def format_change(self, new_price, old_price, char):
data = {
'char': char,
'change': (new_price-old_price)/old_price,
}
if data['change'] > 0:
data['arrow'] = "↑"
data['color'] = "\x033"
else:
data['arrow'] = "↓"
data['color'] = "\x034"
return '{color}{char}:{arrow}{change:.2%}'.format(**data)
#“Powered by (((CoinDesk)))”
| {
"repo_name": "Sulter/MASTERlinker",
"path": "plugins/BTC.py",
"copies": "1",
"size": "4628",
"license": "mit",
"hash": 4822214217128051000,
"line_mean": 43,
"line_max": 188,
"alpha_frac": 0.6374458874,
"autogenerated": false,
"ratio": 3.1665524331734063,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4303998320573406,
"avg_score": null,
"num_lines": null
} |
"""A plugins that implements epheremal addresses"""
import sys
from socket import AF_INET6
from subprocess import Popen, PIPE
from contextlib import nested
from threading import RLock
import NDprotector
from NDprotector.Plugin import Plugin
from NDprotector.Log import warn
from NDprotector.Address import Address
from NDprotector.NeighCache import NeighCache
if "lib" not in sys.path:
sys.path.append("lib")
import nfqueue
IPTABLES = "/sbin/ip6tables"
def test_plugin():
"""unitary tests for the function that composes the module"""
e1 = EphemeralAddress()
e2 = EphemeralAddress()
assert e1.get_name() == e2.get_name()
assert e1.connexion_closed("fe80::1","fe80::2") == False
assert e2.connexion_closed("fe80::2","fe80::1") == True
assert e2.connexion_closed("fe80::1","fe80::2") == False
# the callback function that listen to TCP requests:
def callback(i, payload):
"""a callback function for the NFQUEUE"""
data = payload.get_data()
data = IPv6(data)
CGApool = EphemeralPool()
# we might have intercepted packet for addresses
# that are NOT Ephemeral CGA
# if this is a SYN packet:
# - put back the address in deprecated mode
# - preparre the next address for the preferred state
# a TCP SYN only
if data[TCP].flags == 2:
warn("this is a TCP SYN (address %s goes to deprecated state)\n" % data.src)
CGApool.reportaddress_as_used(data.src)
if CGApool.is_ephemeral_CGA(data.src):
CGApool.prepare_next_valid_address()
# first idea: if this is a FIN + ACK (17) packet, remove the address for the interface
# second idea: if there is one FIN on each side, wait a bit and remove the address
# (we implemen the second idea)
elif data[TCP].flags & 1:
warn("this is a TCP FIN (addresses %s-%s)\n" % (data.src, data.dst))
if EphemeralAddress.connexion_closed(data.src, data.dst):
if CGApool.is_ephemeral_CGA(data.src):
warn("address %s scheduled to be removed from the interface\n"\
% data.src)
CGApool.schedule_address_removal( data.src )
elif CGApool.is_ephemeral_CGA(data.dst):
warn("address %s scheduled to be removed from the interface\n"\
% data.src)
CGApool.schedule_address_removal( data.dst )
payload.set_verdict(nfqueue.NF_ACCEPT)
return 1
class EphemeralAddress(Plugin):
"""implements the idea of ephemeral CGA addresses
Consists in the following elements:
- a pool of address (self-regenerated when not enough address are available)
- a mechanism that deprecates/un-deprecates address when they should be selected for outgoing packets
"""
capabilities = [ "NFQueue", "PERSISTENT_OBJ",
"Address", "Filtering" ]
name="EphemeralAddress"
QUEUE_NUM = "5"
TIMEOUT = 60
# size of the pool of address
POOL_SIZE = 150
# could be modified later on
# PREFIX = "2001:6f8:147b::"
# PREFIX = "2001:db8:ffff:0::"
# PREFIX = "2003::"
# PREFIX = "2001:6f8:202:249::"
PREFIX = "2001:AAAA:BBBB::"
INTERFACE = "eth0"
# represents the half closed connexions
# (shared among instances)
half_conn = []
@classmethod
def set_filter_interface(cls, interface, negate=False):
"""set filter on the interfaces"""
# in FIN packets
if negate:
p = Popen ( [ IPTABLES, "-A", "INPUT", "!", "-i",
interface, "-p", "tcp", "--tcp-flags", "FIN", "FIN",
"-j", "NFQUEUE", "--queue-num", cls.QUEUE_NUM ],
stdout=PIPE, stderr=PIPE )
else:
p = Popen ( [ IPTABLES, "-A", "INPUT", "-i",
interface, "-p", "tcp", "--tcp-flags", "FIN", "FIN",
"-j", "NFQUEUE", "--queue-num", cls.QUEUE_NUM ],
stdout=PIPE, stderr=PIPE )
output = p.stdout.read() + p.stderr.read()
# out SYN packets
if negate:
p = Popen ( [ IPTABLES, "-A", "OUTPUT", "!", "-o",
interface, "-p", "tcp", "--tcp-flags", "SYN,ACK", "SYN",
"-j", "NFQUEUE", "--queue-num", cls.QUEUE_NUM ]
, stdout=PIPE, stderr=PIPE )
else:
p = Popen ( [ IPTABLES, "-A", "OUTPUT", "-o",
interface, "-p", "tcp", "--tcp-flags", "SYN,ACK", "SYN",
"-j", "NFQUEUE", "--queue-num", cls.QUEUE_NUM ]
, stdout=PIPE, stderr=PIPE )
output = output + p.stdout.read() + p.stderr.read()
# out FIN packets
if negate:
p = Popen ( [ IPTABLES, "-A", "OUTPUT", "!", "-o",
interface, "-p", "tcp", "--tcp-flags", "FIN", "FIN",
"-j", "NFQUEUE", "--queue-num", cls.QUEUE_NUM ]
, stdout=PIPE, stderr=PIPE )
else:
p = Popen ( [ IPTABLES, "-A", "OUTPUT", "-o",
interface, "-p", "tcp", "--tcp-flags", "FIN", "FIN",
"-j", "NFQUEUE", "--queue-num", cls.QUEUE_NUM ]
, stdout=PIPE, stderr=PIPE )
output = output + p.stdout.read() + p.stderr.read()
return output
@classmethod
def unset_filter_interface(cls, interface, negate=False):
"""unset filter on the interfaces"""
# in FIN packets
if negate:
p = Popen ( [ IPTABLES, "-D", "INPUT", "!", "-i",
interface, "-p", "tcp", "--tcp-flags", "FIN", "FIN",
"-j", "NFQUEUE", "--queue-num", cls.QUEUE_NUM ],
stdout=PIPE, stderr=PIPE )
else:
p = Popen ( [ IPTABLES, "-D", "INPUT", "-i",
interface, "-p", "tcp", "--tcp-flags", "FIN", "FIN",
"-j", "NFQUEUE", "--queue-num", cls.QUEUE_NUM ],
stdout=PIPE, stderr=PIPE )
output = p.stdout.read() + p.stderr.read()
# out SYN packets
if negate:
p = Popen ( [ IPTABLES, "-D", "OUTPUT", "!", "-o",
interface, "-p", "tcp", "--tcp-flags", "SYN,ACK", "SYN",
"-j", "NFQUEUE", "--queue-num", cls.QUEUE_NUM ]
, stdout=PIPE, stderr=PIPE )
else:
p = Popen ( [ IPTABLES, "-D", "OUTPUT", "-o",
interface, "-p", "tcp", "--tcp-flags", "SYN,ACK", "SYN",
"-j", "NFQUEUE", "--queue-num", cls.QUEUE_NUM ]
, stdout=PIPE, stderr=PIPE )
output = output + p.stdout.read() + p.stderr.read()
# out FIN packets
if negate:
p = Popen ( [ IPTABLES, "-D", "OUTPUT", "!", "-o",
interface, "-p", "tcp", "--tcp-flags", "FIN", "FIN",
"-j", "NFQUEUE", "--queue-num", cls.QUEUE_NUM ]
, stdout=PIPE, stderr=PIPE )
else:
p = Popen ( [ IPTABLES, "-D", "OUTPUT", "-o",
interface, "-p", "tcp", "--tcp-flags", "FIN", "FIN",
"-j", "NFQUEUE", "--queue-num", cls.QUEUE_NUM ]
, stdout=PIPE, stderr=PIPE )
output = output + p.stdout.read() + p.stderr.read()
return output
@classmethod
def get_name(cls):
"""indicates the name of the plugin"""
return cls.name
@classmethod
def listening_queue(cls):
"""queue that listen for outgoing TCP connexions"""
q = nfqueue.queue()
q.open()
q.unbind(AF_INET6)
q.bind(AF_INET6)
q.set_callback(callback)
q.create_queue(5)
q.set_queue_maxlen(5000)
return q
@classmethod
def init_address(cls, address_obj, extra_param):
"""initialise extra field in an Address obj"""
if "ephemeral" in extra_param:
address_obj.ephemeral = extra_param["ephemeral"]
else:
address_obj.ephemeral = False
@classmethod
def connexion_closed(cls,source, destination):
"""returns a Boolean to indicates
if a connexion is closed"""
cls.half_conn.append((source, destination))
if (source, destination) in cls.half_conn and\
(destination, source) in cls.half_conn:
# do some cleanup
del cls.half_conn[cls.half_conn.index((source, destination))]
del cls.half_conn[cls.half_conn.index((destination, source))]
return True
else:
return False
@classmethod
def persisent_obj_start(cls):
"""initialize an Ephemeral CGA address pool"""
EphemeralCGApoolStart()
class EphemeralPool(object):
"""the pool contains two types of Ephemeral CGA:
- the ones that are currently in use
- the ones that are free to be used"""
__shared_object = {}
def __init__(self):
"""initialize the Ephemeral Address Pool"""
# DP borg, all instances share the same variables
self.__dict__ = self.__shared_object
# DP borg
if not hasattr(self,"freePool"):
self.freePool = []
self.freePoolLock = RLock()
if not hasattr(self,"inUsePool"):
self.inUsePool = []
self.inUsePoolLock = RLock()
if not hasattr(self,"TBremovedAddr"):
self.TBremovedAddr = {}
self.TBremovedAddrLock = RLock()
# some connxion are required to the Neighbor Cache
# it seems better to always stay connected
if not hasattr(self,"nc"):
self.nc = NeighCache()
if not hasattr(self,"clean"):
self.clean = True
def get_address_in_use(self):
"""returns the addresses that are currently in use"""
with self.inUsePoolLock:
return self.inUsePool
def get_not_yet_used_addresses(self):
"""returns the list of addresses that are currently free to be used"""
with self.freePoolLock:
return self.freePool
def prepare_next_valid_address(self):
"""pick on address from the pool and change it valid state"""
with nested(self.freePoolLock, self.inUsePoolLock):
try:
address = self.freePool[0]
address.modify() # by default, place the address in "valid" state
del self.freePool[0]
self.inUsePool.append(address)
except IndexError: # or will crash when no address is left
pass
def is_ephemeral_CGA(self, address):
"""return True if the address is an Ephemeral CGA"""
with nested(self.freePoolLock, self.inUsePoolLock):
return str(address) in [ str(address) for address in self.freePool + self.inUsePool ]
def reportaddress_as_used(self, address):
"""report the address as an address that is currently in use"""
# connect to the NC to obtain the list of currently assigned addresses
configured_addresses = self.nc.dump_addresses()
# address = None
# obtain the address object
for address_obj in ( a for a in configured_addresses
if str(a) == address):
address = address_obj
break # we can stop here as the address is only recorded one
# change the state of the address to deprecated
try:
address.modify(preferred_lft=0)
warn("Address %s reported to have initialized a connexion is now being deprecated\n" % address)
except AttributeError: # the address does not belong to our program
pass
# if this is an ephemeral CGA from the "free" pool, place it
# in the "in use" pool
try:
with nested(self.freePoolLock, self.inUsePoolLock):
index = self.freePool.index(address)
del self.freePool[index]
self.inUsePool.append(address)
except (ValueError, IndexError):
pass
def regenerate(self, n_address):
"""create n_address addresses to fill the pool
there should be POOL_SIZE addresses in the pool"""
warn("Ephemeral CGA pool: regeneration of %i addresses\n" % n_address)
for i in range(n_address):
a = Address(key=NDprotector.default_publickey,
interface = EphemeralAddress.INTERFACE,
prefix = EphemeralAddress.PREFIX,
ephemeral = True,
sec = 0,
dad=False) # temporary
a.modify(preferred_lft=0)
with self.freePoolLock:
self.freePool.append(a)
# connect to the NC to store the new address
self.nc.store_address(a)
warn("Ephemeral CGA pool: regeneration of %i addresses complete\n"
% n_address)
def schedule_address_removal(self,address):
"""schedule an address for removal after TIMEOUT seconds"""
EphemeralAddress.TIMEOUT
with nested(self.inUsePoolLock, self.TBremovedAddrLock):
for address in ( a for a in self.inUsePool if str(a) == address ):
index = self.inUsePool.index(address)
del self.inUsePool[index]
self.TBremovedAddr[address] = EphemeralAddress.TIMEOUT
break
def close_cleaning_thread(self):
"""close the cleaning/maintenance thread"""
self.clean = False
def pool_maintenance():
"""cleansen the pool:
- remove addresses scheduled to be removed (in the in-use address)
- regenerate the cache"""
pool = EphemeralPool()
with pool.TBremovedAddrLock:
for (address, ttl) in pool.TBremovedAddr.items():
if ttl <= 0:
# suppress the address from the system
warn("scheduled removal for address %s is now removed\n" % address)
address.remove()
pool.nc.del_address(str(address))
del pool.TBremovedAddr[address]
else:
pool.TBremovedAddr[address] = ttl -1
# regenerate the pool of addresses
remaining_addresses = 0
with pool.freePoolLock:
remaining_addresses = len(pool.freePool)
if remaining_addresses <= EphemeralAddress.POOL_SIZE/2 :
pool.regenerate(EphemeralAddress.POOL_SIZE - remaining_addresses)
def EphemeralCGApoolStart():
"""provide access to the pool"""
# initialisation of the pool
pool = EphemeralPool()
# no need to generate a complete pool
# done in the maintenance thread
pool.regenerate(EphemeralAddress.POOL_SIZE)
pool.prepare_next_valid_address()
# subscribe the cleaning Thread for the pool
# (be warned that this function will add delay to the main thread)
#cleanup_thread_subscribe(pool_maintenance)
| {
"repo_name": "tcheneau/NDprotector",
"path": "NDprotector/plugins/ephemeraladdress.py",
"copies": "2",
"size": "14989",
"license": "bsd-3-clause",
"hash": -6006806601095283000,
"line_mean": 33.299771167,
"line_max": 107,
"alpha_frac": 0.5605443992,
"autogenerated": false,
"ratio": 3.9949360341151388,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5555480433315139,
"avg_score": null,
"num_lines": null
} |
# A plugin that gives when the bot last saw the user
import includes.helpers as helpers
import sqlite3
import time
import datetime
import logging
class seen(helpers.Plugin):
def __init__(self, parent):
super().__init__(parent)
default_config = {
'name_replacements': {}
}
self.config = helpers.parse_config('settings_seen.json', default_config)
db_path = "database/seen.db"
self.connection = sqlite3.connect(db_path)
self.cursor = self.connection.cursor()
self.cursor.execute('CREATE TABLE IF NOT EXISTS seen '
'(Id INTEGER PRIMARY KEY, nickname TEXT UNIQUE, last_time INT)')
self.connection.commit()
def handle_message(self, msg_data):
# We update the seen time of this nick
self.update(msg_data["nick"])
# Ignore if message does not contain module name
if not msg_data["message"].startswith("!seen"):
return None
self.handle_seen(msg_data)
def update(self, nick):
nick = nick.lower()
t = int(time.time())
self.cursor.execute("INSERT OR IGNORE INTO seen(nickname, last_time) VALUES(?,?)", (nick, t))
self.cursor.execute("UPDATE seen SET last_time=? WHERE nickname=?", (t, nick))
self.connection.commit()
def handle_seen(self, msg_data):
# Get the nick and strip its spaces.
nick = msg_data["message"].replace("!seen", "")
nick = nick.replace(" ", "")
matching_nick = nick.lower()
if matching_nick in self.config['name_replacements']:
nick = self.config['name_replacements'][nick]
matching_nick = nick.lower()
if matching_nick == msg_data['nick'].lower():
response = "I see you, {}.".format(nick)
elif matching_nick == self.parent.config['connection']['nick'][:16].lower():
response = "I see me in every way there is to be seen, perceiving all that is within {}.".format(nick)
else:
try:
self.cursor.execute("SELECT last_time FROM seen WHERE nickname=?", (matching_nick,))
except BaseException as e:
logging.error('Error in Seen: {}'.format(e))
return None
row = self.cursor.fetchone()
if not row:
response = "I haven't seen {}.".format(nick)
else:
t = row[0]
time_now = int(time.time())
diff = time_now - t
time_str = helpers.time_string(datetime.timedelta(seconds=diff))
response = "I saw {} {}".format(nick, time_str)
self.parent.send_msg(msg_data["channel"], response)
| {
"repo_name": "Sulter/MASTERlinker",
"path": "plugins/seen.py",
"copies": "1",
"size": "2452",
"license": "mit",
"hash": -7790782952050977000,
"line_mean": 35.5970149254,
"line_max": 108,
"alpha_frac": 0.6378466558,
"autogenerated": false,
"ratio": 3.7378048780487805,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48756515338487805,
"avg_score": null,
"num_lines": null
} |
# A plugin that tracks number of lines and words written by users
import includes.helpers as helpers
import sqlite3
import time
import re
import random
class stats(helpers.Plugin):
def __init__(self, parent):
super().__init__(parent)
db_path = "database/stats.db"
self.connection = sqlite3.connect(db_path)
self.cursor = self.connection.cursor()
self.cursor.execute(
'CREATE TABLE IF NOT EXISTS nickstats (Id INTEGER PRIMARY KEY, nickname TEXT UNIQUE, lines INT DEFAULT(0), words INT DEFAULT(0), init_time INT, random_quote TEXT)')
self.connection.commit()
def handle_pm(self, msg_data):
# Ignore private messages
pass
def handle_message(self, msg_data):
# Ignore if message does not contain module name
if msg_data["message"].startswith("!stats"):
self.print_user_stats(msg_data)
else:
self.add_line_n_words(msg_data["nick"], msg_data["message"])
def add_line_n_words(self, nick, msg):
self.cursor.execute("INSERT OR IGNORE INTO nickstats(nickname, init_time) VALUES(?,?)",
(nick, int(round(time.time(), 0))))
self.cursor.execute("UPDATE nickstats SET lines = lines + 1 WHERE nickname=?", (nick,))
# Count words
words = len(re.findall("\S+", msg))
self.cursor.execute("UPDATE nickstats SET words = words + ? WHERE nickname=?", (words, nick))
self.connection.commit()
# There is x chance that this phrase will be added as that persons "quote"
if random.random() > 0.99:
try: # catching encoding errors
self.cursor.execute("UPDATE nickstats SET random_quote=? WHERE nickname=?", (msg, nick))
self.connection.commit()
except:
return None
def print_user_stats(self, msg_data):
try:
self.cursor.execute("SELECT words, lines FROM nickstats WHERE nickname=?", (msg_data["nick"],))
except:
return None
row = self.cursor.fetchone()
if not row:
words = 0
lines = 0
else:
words = row[0]
lines = row[1]
response = "Counting {} words in {} lines".format(words, lines)
self.parent.send_msg(msg_data["channel"], response)
| {
"repo_name": "Sulter/MASTERlinker",
"path": "plugins/stats.py",
"copies": "1",
"size": "2134",
"license": "mit",
"hash": 5622441777195780000,
"line_mean": 34.5666666667,
"line_max": 170,
"alpha_frac": 0.6569821931,
"autogenerated": false,
"ratio": 3.7373029772329245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9825875420414822,
"avg_score": 0.013681949983620538,
"num_lines": 60
} |
"""A plugin to allow us to issue jira queries over slack"""
from jira import JIRA
import os
import re
import datetime
JIRA_USER = os.environ['JIRA_USER']
JIRA_AUTH = os.environ['JIRA_AUTH']
OPTIONS = {
'server': 'https://trifacta.atlassian.net'
}
def preprocess(issue):
"""Takes a single issue pulled from a JIRA query and
extracts shortend version for more compact posting by rosencrantz
:returns: (*dict*)
"""
raw = issue.raw['fields']
return {
'summary': raw['summary'],
'assignee': raw['assignee']['displayName'],
'status': raw['status']['name'],
'issue_type': raw['issuetype']['name']
}
def extract_issue_data(issue):
"""pull out a short description of each issue and return a string"""
issue_data = preprocess(issue)
for key in issue_data.keys():
issue_data[key] = issue_data[key].encode('ascii', 'ignore')
issue_data['url'] = 'https://trifacta.atlassian.net/browse/{0}'.format(issue.key)
issue_data['ticket'] = issue.key
return '{assignee}: {ticket} ({issue_type}) - {summary}\n{url}'.format(**issue_data)
def get_recently_resolved_query(team):
"""Get a list of issues resolved in the last day"""
if datetime.date.today().weekday() in [6, 0] :
lag = 3
else:
lag = 1
return '''issuetype in (Bug, "Engineering Story", Story, Sub-task) AND
status in (Resolved, Closed) and updated >= -{0}d AND
assignee in membersOf("{1}") ORDER BY assignee,
updated DESC'''.format(lag, team)
def get_in_progress_query(team):
"""Take in a team name and return the jql query for retrieving issues"""
return '''
issuetype in (Bug, "Engineering Story", Story, Sub-task) AND
status = "In Progress" AND resolution = Unresolved AND
assignee in membersOf("{0}") ORDER BY assignee, updated DESC
'''.format(team)
def run_scrum_query(team):
"""Take a text query and run it against our JIRA corpus"""
conn = JIRA(OPTIONS, basic_auth=(JIRA_USER, JIRA_AUTH))
if team == 'serenity':
jql_team_name = 'Bufs'
else:
jql_team_name = team
recent_resolution_qry = get_recently_resolved_query(jql_team_name)
in_progress_qry = get_in_progress_query(jql_team_name)
recent_issues = conn.search_issues(recent_resolution_qry)
progress_issues = conn.search_issues(in_progress_qry)
recent_lines = '\n'.join(map(extract_issue_data, recent_issues))
progress_lines = '\n'.join(map(extract_issue_data, progress_issues))
msg = ['*Team recently closed/resolved:*', recent_lines,
'*And folks are currently working on:*', progress_lines]
return '\n'.join(msg)
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"(\w+\s+|\b)rs scrum (.*)", text)
if not match:
return
searchterm = match[0][1]
print searchterm
return run_scrum_query(searchterm)
| {
"repo_name": "michaelMinar/limbo",
"path": "limbo/plugins/scrum.py",
"copies": "1",
"size": "2934",
"license": "mit",
"hash": 3669781432600255500,
"line_mean": 32.3409090909,
"line_max": 88,
"alpha_frac": 0.6376959782,
"autogenerated": false,
"ratio": 3.345496009122007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4483191987322007,
"avg_score": null,
"num_lines": null
} |
"""A plugin to allow us to issue jira queries over slack"""
from jira import JIRA
import os
import re
JIRA_USER = os.environ['JIRA_USER']
JIRA_AUTH = os.environ['JIRA_AUTH']
OPTIONS = {
'server': 'https://trifacta.atlassian.net'
}
def preprocess(issue):
"""Takes a single issue pulled from a JIRA query and
extracts shortend version for more compact posting by rosencrantz
:returns: (*dict*)
"""
raw = issue.raw['fields']
return {
'summary': raw['summary'],
'assignee': raw['assignee']['displayName'],
'status': raw['status']['name'],
}
def extract_issue_data(issue):
"""pull out a short description of each issue and return a string"""
issue_data = preprocess(issue)
for key in issue_data.keys():
issue_data[key] = issue_data[key].encode('ascii', 'ignore')
issue_data['url'] = 'https://trifacta.atlassian.net/browse/{0}'.format(issue.key)
issue_data['ticket'] = issue.key
return'{ticket}: {summary}, {status}, {assignee}, {url}'.format(**issue_data)
def run_query(qry):
"""Take a text query and run it against our JIRA corpus"""
conn = JIRA(OPTIONS, basic_auth=(JIRA_USER, JIRA_AUTH))
issues = conn.search_issues(qry)
lines = map(extract_issue_data, issues[0:5])
return '\n'.join(lines)
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"(\w+\s+|\b)rs jira (.*)", text)
if not match:
return
searchterm = match[0][1]
return run_query(searchterm)
| {
"repo_name": "michaelMinar/limbo",
"path": "limbo/plugins/jira_query.py",
"copies": "1",
"size": "1519",
"license": "mit",
"hash": -3063349528694045000,
"line_mean": 28.2115384615,
"line_max": 85,
"alpha_frac": 0.6333113891,
"autogenerated": false,
"ratio": 3.2319148936170214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4365226282717022,
"avg_score": null,
"num_lines": null
} |
"""A plugin to disable django database migrations (saves a lot of time)
Use --no-migrations to disable django database migrations.
"""
from nose.plugins import Plugin
class DjangoMigrationsPlugin(Plugin):
"""Run tests without Django database migrations."""
# Inspired by https://gist.github.com/NotSqrt/5f3c76cd15e40ef62d09
# See also https://github.com/henriquebastos/django-test-without-migrations
name = 'django-migrations'
enabled = True
def options(self, parser, env):
# Do not call super to avoid adding a ``--with`` option for this plugin
parser.add_option('--no-migrations', action='store_true',
dest='no_migrations',
default=env.get('NOSE_DISABLE_DJANGO_MIGRATIONS'),
help='Disable Django database migrations to save a '
'lot of time. [NOSE_DISABLE_DJANGO_MIGRATIONS]')
def configure(self, options, conf):
if options.no_migrations:
from django.conf import settings
settings.MIGRATION_MODULES = DisableMigrations()
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return "notmigrations"
| {
"repo_name": "qedsoftware/commcare-hq",
"path": "corehq/tests/noseplugins/djangomigrations.py",
"copies": "2",
"size": "1270",
"license": "bsd-3-clause",
"hash": 9155848240782301000,
"line_mean": 33.3243243243,
"line_max": 79,
"alpha_frac": 0.6385826772,
"autogenerated": false,
"ratio": 4.261744966442953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 37
} |
# A plugin to enable link helpers for Astronomical coordinate conversions.
# This requires Astropy >= 0.4.
# Coordinate transforms (requires Astropy>)
from ...config import link_function, link_helper
from ...core.link_helpers import MultiLink
from astropy import units as u
from astropy.coordinates import ICRS, FK5, FK4, Galactic
__all__ = ["BaseCelestialMultiLink", "Galactic_to_FK5", "FK4_to_FK5",
"ICRS_to_FK5", "Galactic_to_FK4", "ICRS_to_FK4",
"ICRS_to_Galactic"]
class BaseCelestialMultiLink(MultiLink):
display = None
frame_in = None
frame_out = None
def __init__(self, in_lon, in_lat, out_lon, out_lat):
MultiLink.__init__(self, [in_lon, in_lat],
[out_lon, out_lat],
self.forward, self.backward)
def forward(self, in_lon, in_lat):
c = self.frame_in(in_lon * u.deg, in_lat * u.deg)
out = c.transform_to(self.frame_out)
return out.spherical.lon.degree, out.spherical.lat.degree
def backward(self, in_lon, in_lat):
c = self.frame_out(in_lon * u.deg, in_lat * u.deg)
out = c.transform_to(self.frame_in)
return out.spherical.lon.degree, out.spherical.lat.degree
@link_helper('Link Galactic and FK5 (J2000) Equatorial coordinates',
input_labels=['l', 'b', 'ra (fk5)', 'dec (fk5)'])
class Galactic_to_FK5(BaseCelestialMultiLink):
display = "Celestial Coordinates: Galactic <-> FK5 (J2000)"
frame_in = Galactic
frame_out = FK5
@link_helper('Link FK4 (B1950) and FK5 (J2000) Equatorial coordinates',
input_labels=['ra (fk4)', 'dec (fk4)', 'ra (fk5)', 'dec (fk5)'])
class FK4_to_FK5(BaseCelestialMultiLink):
display = "Celestial Coordinates: FK4 (B1950) <-> FK5 (J2000)"
frame_in = FK4
frame_out = FK5
@link_helper('Link ICRS and FK5 (J2000) Equatorial coordinates',
input_labels=['ra (icrs)', 'dec (icrs)', 'ra (fk5)', 'dec (fk5)'])
class ICRS_to_FK5(BaseCelestialMultiLink):
display = "Celestial Coordinates: ICRS <-> FK5 (J2000)"
frame_in = ICRS
frame_out = FK5
@link_helper('Link Galactic and FK4 (B1950) Equatorial coordinates',
input_labels=['l', 'b', 'ra (fk4)', 'dec (fk4)'])
class Galactic_to_FK4(BaseCelestialMultiLink):
display = "Celestial Coordinates: Galactic <-> FK4 (B1950)"
frame_in = Galactic
frame_out = FK4
@link_helper('Link ICRS and FK4 (B1950) Equatorial coordinates',
input_labels=['ra (icrs)', 'dec (icrs)', 'ra (fk4)', 'dec (fk4)'])
class ICRS_to_FK4(BaseCelestialMultiLink):
display = "Celestial Coordinates: ICRS <-> FK4 (B1950)"
frame_in = ICRS
frame_out = FK4
@link_helper('Link ICRS and Galactic coordinates',
input_labels=['ra (icrs)', 'dec (icrs)', 'l', 'b'])
class ICRS_to_Galactic(BaseCelestialMultiLink):
display = "Celestial Coordinates: ICRS <-> Galactic"
frame_in = ICRS
frame_out = Galactic
| {
"repo_name": "JudoWill/glue",
"path": "glue/plugins/coordinate_helpers/link_helpers.py",
"copies": "1",
"size": "2969",
"license": "bsd-3-clause",
"hash": -5370298065205755000,
"line_mean": 34.7710843373,
"line_max": 79,
"alpha_frac": 0.6308521388,
"autogenerated": false,
"ratio": 2.865830115830116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8994837568733655,
"avg_score": 0.0003689371792922039,
"num_lines": 83
} |
# A plugin to enable link helpers for Astronomical coordinate conversions.
# This requires Astropy >= 0.4.
# Coordinate transforms (requires Astropy>)
from __future__ import absolute_import, division, print_function
from astropy import units as u
from astropy.coordinates import ICRS, FK5, FK4, Galactic, Galactocentric
from glue.core.link_helpers import MultiLink
from glue.config import link_helper
__all__ = ["BaseCelestialMultiLink", "Galactic_to_FK5", "FK4_to_FK5",
"ICRS_to_FK5", "Galactic_to_FK4", "ICRS_to_FK4",
"ICRS_to_Galactic"]
class BaseCelestialMultiLink(MultiLink):
display = None
frame_in = None
frame_out = None
def __init__(self, in_lon, in_lat, out_lon, out_lat):
super(BaseCelestialMultiLink, self).__init__(in_lon, in_lat, out_lon, out_lat)
self.create_links([in_lon, in_lat], [out_lon, out_lat],
forwards=self.forward, backwards=self.backward)
def forward(self, in_lon, in_lat):
c = self.frame_in(in_lon * u.deg, in_lat * u.deg)
out = c.transform_to(self.frame_out)
return out.spherical.lon.degree, out.spherical.lat.degree
def backward(self, in_lon, in_lat):
c = self.frame_out(in_lon * u.deg, in_lat * u.deg)
out = c.transform_to(self.frame_in)
return out.spherical.lon.degree, out.spherical.lat.degree
@link_helper('Link Galactic and FK5 (J2000) Equatorial coordinates',
input_labels=['l', 'b', 'ra (fk5)', 'dec (fk5)'],
category='Astronomy')
class Galactic_to_FK5(BaseCelestialMultiLink):
display = "Galactic <-> FK5 (J2000)"
frame_in = Galactic
frame_out = FK5
@link_helper('Link FK4 (B1950) and FK5 (J2000) Equatorial coordinates',
input_labels=['ra (fk4)', 'dec (fk4)', 'ra (fk5)', 'dec (fk5)'],
category='Astronomy')
class FK4_to_FK5(BaseCelestialMultiLink):
display = "FK4 (B1950) <-> FK5 (J2000)"
frame_in = FK4
frame_out = FK5
@link_helper('Link ICRS and FK5 (J2000) Equatorial coordinates',
input_labels=['ra (icrs)', 'dec (icrs)', 'ra (fk5)', 'dec (fk5)'],
category='Astronomy')
class ICRS_to_FK5(BaseCelestialMultiLink):
display = "ICRS <-> FK5 (J2000)"
frame_in = ICRS
frame_out = FK5
@link_helper('Link Galactic and FK4 (B1950) Equatorial coordinates',
input_labels=['l', 'b', 'ra (fk4)', 'dec (fk4)'],
category='Astronomy')
class Galactic_to_FK4(BaseCelestialMultiLink):
display = "Galactic <-> FK4 (B1950)"
frame_in = Galactic
frame_out = FK4
@link_helper('Link ICRS and FK4 (B1950) Equatorial coordinates',
input_labels=['ra (icrs)', 'dec (icrs)', 'ra (fk4)', 'dec (fk4)'],
category='Astronomy')
class ICRS_to_FK4(BaseCelestialMultiLink):
display = "ICRS <-> FK4 (B1950)"
frame_in = ICRS
frame_out = FK4
@link_helper('Link ICRS and Galactic coordinates',
input_labels=['ra (icrs)', 'dec (icrs)', 'l', 'b'],
category='Astronomy')
class ICRS_to_Galactic(BaseCelestialMultiLink):
display = "ICRS <-> Galactic"
frame_in = ICRS
frame_out = Galactic
@link_helper('Link 3D Galactocentric and Galactic coordinates',
input_labels=['x (kpc)', 'y (kpc)', 'z (kpc)', 'l (deg)', 'b (deg)', 'distance (kpc)'],
category='Astronomy')
class GalactocentricToGalactic(MultiLink):
display = "3D Galactocentric <-> Galactic"
def __init__(self, x_id, y_id, z_id, l_id, b_id, d_id):
super(GalactocentricToGalactic, self).__init__(x_id, y_id, z_id, l_id, b_id, d_id)
self.create_links([x_id, y_id, z_id], [l_id, b_id, d_id],
self.forward, self.backward)
def forward(self, x_kpc, y_kpc, z_kpc):
gal = Galactocentric(x=x_kpc * u.kpc, y=y_kpc * u.kpc, z=z_kpc * u.kpc).transform_to(Galactic)
return gal.l.degree, gal.b.degree, gal.distance.to(u.kpc).value
def backward(self, l_deg, b_deg, d_kpc):
gal = Galactic(l=l_deg * u.deg, b=b_deg * u.deg, distance=d_kpc * u.kpc).transform_to(Galactocentric)
return gal.x.to(u.kpc).value, gal.y.to(u.kpc).value, gal.z.to(u.kpc).value
| {
"repo_name": "saimn/glue",
"path": "glue/plugins/coordinate_helpers/link_helpers.py",
"copies": "4",
"size": "4198",
"license": "bsd-3-clause",
"hash": 9062943850291271000,
"line_mean": 35.8245614035,
"line_max": 109,
"alpha_frac": 0.6176750834,
"autogenerated": false,
"ratio": 2.7582128777923787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5375887961192378,
"avg_score": null,
"num_lines": null
} |
""" A plugin to incorporate work-item creation in VSTS
easily out of issues detected from Sentry.io """
from __future__ import absolute_import
from mistune import markdown
from sentry.plugins.bases.issue2 import IssueTrackingPlugin2
from sentry.utils.http import absolute_uri
from sentry.integrations import FeatureDescription, IntegrationFeatures
from .mixins import VisualStudioMixin
from .repository_provider import VisualStudioRepositoryProvider
class VstsPlugin(VisualStudioMixin, IssueTrackingPlugin2):
description = "Integrate Visual Studio Team Services work items by linking a project."
slug = "vsts"
conf_key = slug
auth_provider = "visualstudio"
required_field = "instance"
feature_descriptions = [
FeatureDescription(
"""
Authorize repositories to be added to your Sentry organization to augment
sentry issues with commit data with [deployment
tracking](https://docs.sentry.io/learn/releases/).
""",
IntegrationFeatures.COMMITS,
),
FeatureDescription(
"""
Create and link Sentry issue groups directly to a Azure DevOps work item in any of
your projects, providing a quick way to jump from Sentry bug to tracked
work item!
""",
IntegrationFeatures.ISSUE_BASIC,
),
]
issue_fields = frozenset(["id", "title", "url"])
def get_configure_plugin_fields(self, request, project, **kwargs):
# TODO(dcramer): Both Account and Project can query the API an access
# token, and could likely be moved to the 'Create Issue' form
return [
{
"name": "instance",
"label": "Instance",
"type": "text",
"placeholder": "example.visualstudio.com",
"required": True,
"help": "VS Team Services account ({account}.visualstudio.com) or TFS server ({server:port}).",
},
{
"name": "default_project",
"label": "Default Project Name",
"type": "text",
"placeholder": "MyProject",
"required": False,
"help": (
"Enter the Visual Studio Team Services project name that you wish "
"to use as a default for new work items"
),
},
]
def is_configured(self, request, project, **kwargs):
for o in ("instance",):
if not bool(self.get_option(o, project)):
return False
return True
def get_issue_label(self, group, issue, **kwargs):
return u"Bug {}".format(issue["id"])
def get_issue_url(self, group, issue, **kwargs):
return issue["url"]
def get_new_issue_fields(self, request, group, event, **kwargs):
fields = super(VstsPlugin, self).get_new_issue_fields(request, group, event, **kwargs)
client = self.get_client(request.user)
instance = self.get_option("instance", group.project)
try:
projects = client.get_projects(instance)
except Exception as e:
self.raise_error(e, identity=client.auth)
return [
{
"name": "project",
"label": "Project",
"default": self.get_option("default_project", group.project),
"type": "text",
"choices": [i["name"] for i in projects["value"]],
"required": True,
}
] + fields
def get_link_existing_issue_fields(self, request, group, event, **kwargs):
return [
{"name": "item_id", "label": "Work Item ID", "default": "", "type": "text"},
{
"name": "comment",
"label": "Comment",
"default": u"I've identified this issue in Sentry: {}".format(
absolute_uri(group.get_absolute_url(params={"referrer": "vsts_plugin"}))
),
"type": "textarea",
"help": ("Markdown is supported. Leave blank if you don't want to add a comment."),
"required": False,
},
]
def create_issue(self, request, group, form_data, **kwargs):
"""
Creates the issue on the remote service and returns an issue ID.
"""
instance = self.get_option("instance", group.project)
project = form_data.get("project") or self.get_option("default_project", group.project)
client = self.get_client(request.user)
title = form_data["title"]
description = form_data["description"]
link = absolute_uri(group.get_absolute_url(params={"referrer": "vsts_plugin"}))
try:
created_item = client.create_work_item(
instance=instance,
project=project,
title=title,
comment=markdown(description),
link=link,
)
except Exception as e:
self.raise_error(e, identity=client.auth)
return {
"id": created_item["id"],
"url": created_item["_links"]["html"]["href"],
"title": title,
}
def link_issue(self, request, group, form_data, **kwargs):
client = self.get_client(request.user)
instance = self.get_option("instance", group.project)
if form_data.get("comment"):
try:
work_item = client.update_work_item(
instance=instance,
id=form_data["item_id"],
link=absolute_uri(group.get_absolute_url(params={"referrer": "vsts_plugin"})),
comment=markdown(form_data["comment"]) if form_data.get("comment") else None,
)
except Exception as e:
self.raise_error(e, identity=client.auth)
else:
try:
work_item = client.get_work_item(instance=instance, id=form_data["item_id"])
except Exception as e:
self.raise_error(e, identity=client.auth)
return {
"id": work_item["id"],
"url": work_item["_links"]["html"]["href"],
"title": work_item["fields"]["System.Title"],
}
def setup(self, bindings):
bindings.add("repository.provider", VisualStudioRepositoryProvider, id="visualstudio")
| {
"repo_name": "beeftornado/sentry",
"path": "src/sentry_plugins/vsts/plugin.py",
"copies": "1",
"size": "6480",
"license": "bsd-3-clause",
"hash": 1505127334814132700,
"line_mean": 37.1176470588,
"line_max": 111,
"alpha_frac": 0.5512345679,
"autogenerated": false,
"ratio": 4.42019099590723,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5471425563807231,
"avg_score": null,
"num_lines": null
} |
"""A plugin to interact with Plex Media Servers."""
###############################################################################
#
# Until version 1.0.0:
# TODO: [X] Discover Servers
# TODO: [ ] Access Libraries
# TODO: [ ] - Notify about new shows/Movies
# TODO: [ ] - Check for new episodes that aren't in the library yet
# TODO: [ ] Documentation
#
###############################################################################
# standard library imports
import configparser
import logging
import socket
import struct
# related third party imports
from plexapi.myplex import MyPlexAccount
from plexapi.exceptions import Unauthorized, BadRequest, NotFound
from plexapi.server import PlexServer
# application specific imports
from samantha.core import subscribe_to
from samantha.plugins.plugin import Plugin
# from samantha.tools import eventbuilder
__version__ = "1.0.0a5"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
logging.getLogger("plexapi").setLevel(logging.WARN)
logging.getLogger("urllib3.connectionpool").setLevel(logging.WARN)
# TODO Wrap this in a function and make it callable via event
config = configparser.ConfigParser()
if config.read("variables_private.ini"):
# this should be ['variables_private.ini'] if the config was found
plex_config = config["plex"]
SECRETS = (plex_config.get("username"),
plex_config.get("password", raw=True))
else:
LOGGER.warning("No config found! Are you sure the file %s exists?",
"samantha/variables_private.ini")
SECRETS = None
def discover_local_servers():
"""Find Plex Media Servers on the local network.
Heavily based on the function found at
https://github.com/iBaa/PlexConnect/blob/master/PlexAPI.py
"""
result = []
# Find all Plex servers on the network via Plex' 'GDM' protocol
gdm_ip = '239.0.0.250'
gdm_port = 32414
gdm_msg = 'M-SEARCH * HTTP/1.0'.encode("utf-8")
gdm_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
gdm_socket.settimeout(1.0)
ttl = struct.pack('b', 1)
gdm_socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
try:
LOGGER.debug("Checking for servers on the Network")
gdm_socket.sendto(gdm_msg, (gdm_ip, gdm_port))
while True:
try:
data, server = gdm_socket.recvfrom(1024)
LOGGER.debug("Received data from %s:\n%s", server, data)
result.append({'from': server, 'data': data})
except socket.timeout:
break
finally:
gdm_socket.close()
# parse the responses received from the servers
pms_list = {}
if result:
for response in result:
update = {'ip': response.get('from')[0]}
# Check if we had a positive HTTP response
if "200 OK" in response.get('data').decode("utf-8"):
for each in response.get('data').decode("utf-8").split('\n'):
# decode response data
update['discovery'] = "auto"
if "Content-Type:" in each:
update['content-type'] = each.split(':')[1].strip()
elif "Resource-Identifier:" in each:
update['uuid'] = each.split(':')[1].strip()
elif "Name:" in each:
update['serverName'] = each.split(':')[
1].strip()
elif "Port:" in each:
update['port'] = each.split(':')[1].strip()
elif "Updated-At:" in each:
update['updated'] = each.split(':')[1].strip()
elif "Version:" in each:
update['version'] = each.split(':')[1].strip()
pms_list[update['uuid']] = update
if pms_list == {}:
LOGGER.debug("No servers discovered")
else:
s = "" if len(pms_list) == 1 else "s"
LOGGER.debug("%d Server%s discovered:", len(pms_list), s)
for uuid in pms_list:
LOGGER.debug("%s at %s:%s",
pms_list[uuid]['serverName'],
pms_list[uuid]['ip'],
pms_list[uuid]['port'])
return pms_list
def get_servers_from_account():
if SECRETS is None:
return {}
try:
account = MyPlexAccount(username=SECRETS[0], password=SECRETS[1])
account_servers = {resource.clientIdentifier: resource
for resource in account.resources()
if "server" in resource.provides}
return account_servers
except Unauthorized:
LOGGER.error("Could not authorize your account with the given "
"credentials.")
return {}
except BadRequest:
LOGGER.error("Blabla")
# TODO: retry
return {}
def localize_remote_servers(local_servers, remote_servers):
# Prepare one list for all locally available servers
# and one for only remotely available servers
locally_available_servers = []
remotely_available_servers = []
# Check for each remotely available if it happens to be in the same network
for server_id in remote_servers:
remote_server_resource = remote_servers[server_id]
LOGGER.debug("Checking if server %s is available via the Internet.",
remote_server_resource.name)
remote_server = None
# local_server = None
try:
# Attempt connecting to the server
remote_server = remote_server_resource.connect(ssl=True)
LOGGER.debug("Success.")
except NotFound:
LOGGER.warning("The server %s isn't available via the Internet",
remote_server_resource.name)
LOGGER.debug("Checking if server %s is available via LAN.",
remote_server_resource.name)
if server_id in local_servers:
# Server is on the same network
local_server_data = local_servers[server_id]
baseurl = "http://{}:{}".format(local_server_data["ip"],
local_server_data["port"])
try:
LOGGER.debug("The server %s is available via LAN. Attempting "
"to replace remote version with local one...",
remote_server_resource.name)
local_server = PlexServer(baseurl,
remote_server_resource.accessToken)
del(local_servers[server_id])
locally_available_servers.append(local_server)
LOGGER.debug("Connecting to %s via %s succeeded.",
remote_server_resource.name,
baseurl)
except NotFound:
LOGGER.warning("Couldn't connect to %s via %s.",
remote_server_resource.name,
baseurl)
if remote_server is not None:
remotely_available_servers.append(remote_server)
else:
LOGGER.debug("The server %s was skipped because it wasn't "
"available in any way.",
remote_server_resource.name)
elif remote_server is not None:
# Server wasn't found in the same Network
LOGGER.debug("The server %s is not available locally. "
"Sam will be using the remote resource.",
remote_server_resource.name)
remotely_available_servers.append(remote_server)
else:
LOGGER.debug("The server %s was skipped because it wasn't "
"available in any way.",
remote_server_resource.name)
for server_id in local_servers:
this_server = local_servers[server_id]
LOGGER.warning("You don't seem to have access to the server %s "
"(ID: %s) at %s:%s via your account. Please check "
"your credentials.",
this_server["serverName"],
server_id,
this_server["ip"],
this_server["port"])
return locally_available_servers, remotely_available_servers
local = discover_local_servers()
remote = get_servers_from_account()
LOCAL_SERVERS, REMOTE_SERVERS = localize_remote_servers(local, remote)
ALL_SERVERS = LOCAL_SERVERS + REMOTE_SERVERS
LOGGER.info("%d servers were found. %d of them are locally available, %d via "
"the internet.",
len(ALL_SERVERS),
len(LOCAL_SERVERS),
len(REMOTE_SERVERS))
PLUGIN = Plugin("Plex", bool(ALL_SERVERS), LOGGER, __file__)
@subscribe_to("system.onstart")
def start_func(key, data):
"""Test the 'onstart' event."""
# TODO: Scan libraries & Update Context
LOGGER.debug("I'm now doing something productive!")
return "I'm now doing something productive!"
@subscribe_to("system.onexit")
def stop_func(key, data):
"""Test the 'onexit' event."""
LOGGER.debug("I'm not doing anything productive anymore.")
return "I'm not doing anything productive anymore."
| {
"repo_name": "Sirs0ri/PersonalAssistant",
"path": "samantha/plugins/plex_plugin.py",
"copies": "1",
"size": "9594",
"license": "mit",
"hash": 232339813623651620,
"line_mean": 38.1422594142,
"line_max": 79,
"alpha_frac": 0.544819679,
"autogenerated": false,
"ratio": 4.319675821701936,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 239
} |
"""A plugin to react to new pokes from Facebook.
It hijacks an authentication-cookie which the user has to enter manually.
Nomnomnom, Cookies!
Problem: This cookie is only alive for about a week, after that it would have
to be renewed. I'm too lazy to do that right now which is why I deactivated the
plugin for now.
Also, Facebook really doesn't like scraping their website and I don't want my
account to be terminated.
"""
###############################################################################
#
# TODO: [ ] renew the cookie that authenticates the session, or:
# TODO: [ ] switch to a requests.session that'll be authenticated ca once a day
# TODO: [ ] randomize requests to prevent the scraping from being detected.
#
###############################################################################
# standard library imports
import re
import html
import logging
from threading import Event as tEvent
import time
# related third party imports
import requests
# application specific imports
from samantha.core import subscribe_to
from samantha.plugins.plugin import Plugin
from samantha.tools.eventbuilder import eEvent
try:
import samantha.variables_private as variables_private
CURL = variables_private.fb_curl
except (ImportError, AttributeError):
variables_private = None
CURL = None
__version__ = "1.0.5"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
# PLUGIN = Plugin("Facebook", CURL is not None, LOGGER, __file__)
PLUGIN = Plugin("Facebook", False, LOGGER, __file__)
def _parse_curl(curl):
"""Parse a cURL command meant to be used in bash into URL and headers.
This plugin requires cookie-jacking to access and parse an authenticated
version of Facebook's "poke" page. Chrome allows to copy internal URL call
the browser makes including necessary cookies as bash commands. This
function allows the user to enter this command and have it parsed.
"""
if curl is None:
return None, None
curl = curl.replace("curl ", "").replace(" --compressed", "")
divider = curl[0]
# This will be the type of quote around the items in the cURL command.
# Should always be ', but better safe than sound, right?
curl = curl.replace(divider, "") # remove all the quotes
# The command is in the format "URL -H header1 -H header2 ..."
# Everything before the first appearance of -H is the URL, after each
# appearance of -H follows a header.
headers = curl.split(" -H ")
url = headers.pop(0)
header_dict = {}
for h in headers:
name, val = h.split(": ")
header_dict[name] = val
return url, header_dict
# Set as soon as a request to facebook is successful. Cleared if the requests
# fail 3x in a row. While PLUGIN_IS_ONLINE isn't set the plugin will not retry
# failed requests. While it is set, the plugin will retry up to 3x to
# reestablish a connection.
PLUGIN_IS_ONLINE = tEvent()
# Parse a command formatted for bash's cURL into URL and a dict of headers.
URL, HEADER_DICT = _parse_curl(CURL)
CACHE = []
@subscribe_to("time.schedule.min")
def check_pokes(key, data):
"""Parse the website https://m.facebook.com/pokes/ to access new pokes.
The result is compared to an existing cache of pokes to notify the user
only about new ones.
"""
global CACHE
cache = []
new_count = 0
req = None
tries = 0
retries = 3 if PLUGIN_IS_ONLINE.is_set() else 1
# Give up after one failed attempt if the plugin wasn't able to establish a
# connection before. Otherwise try up to 3x.
while tries < retries and req is None:
try:
tries += 1
req = requests.get(url=URL, headers=HEADER_DICT, timeout=15)
if req.status_code == 200:
# Update the flag after a successful connection
PLUGIN_IS_ONLINE.set()
else:
if tries == retries > 1:
m = "Reached the max. amount of retries."
elif not PLUGIN_IS_ONLINE.is_set():
m = "Not retrying because the plugin is offline."
else:
m = "Retrying in two seconds."
LOGGER.warning("The request returned the wrong status code "
"(%s) on attempt %d. %s",
req.status_code, tries, m)
req = None
time.sleep(2)
except (requests.exceptions.ConnectionError,
requests.exceptions.SSLError,
requests.exceptions.Timeout) as e:
if tries == retries > 1:
m = "Reached the max. amount of retries."
elif not PLUGIN_IS_ONLINE.is_set():
m = "Not retrying because the plugin is offline."
else:
m = "Retrying in two seconds."
LOGGER.warning("Connecting to Facebook failed on attempt %d. "
"%s Error: %s", tries, m, e)
req = None
time.sleep(2)
if req is None:
LOGGER.error("Connecting to Twitch failed.")
PLUGIN_IS_ONLINE.clear()
return "Error: Connecting to Twitch failed."
text = req.text
matches = re.findall(
r'<article class="_55wr" id="poke_live_item_[\s\S]*?</article>',
text)
if matches:
# pokes were found on the parsed webpage.
for match in matches:
_poke = {}
m = re.search((r'<a href="/[\s\S]*?">'
r'(?P<name>[\s\S]*?)</a>'
r'(?P<text>[\s\S]*?)</div>'),
match)
_poke["text"] = m.group("name") + m.group("text")
_poke["name"] = m.group("name")
m = re.search((r'<i class="img profpic"[\s\S]*?url\("'
r'(?P<imgurl>[\s\S]*?)"\)'),
match)
_poke["imgurl"] = html.unescape(m.group("imgurl"))
m = re.search((r'<a class="_56bz _54k8 _56bs _56bu" href="'
r'(?P<pokeurl>[\s\S]*?)"'),
match)
_poke["pokeurl"] = "https://m.facebook.com" + html.unescape(
m.group("pokeurl"))
if _poke["name"] not in CACHE:
LOGGER.debug(_poke["text"])
eEvent(sender_id=PLUGIN.name,
keyword="facebook.poked",
data=_poke).trigger()
new_count += 1
else:
LOGGER.warning("This poke by %s is an old one.", _poke["name"])
cache.append(_poke["name"])
else:
LOGGER.warning("No new pokes!")
CACHE = cache
return "Found {} poke{}, {} of them new. (Cache: {})".format(
len(CACHE),
"s" if len(CACHE) is not 1 else "",
new_count,
CACHE)
@subscribe_to("facebook.poke")
def poke(key, data):
"""Poke a person via a URL including Facebook's authentication cookie."""
if "pokeurl" not in data:
result = "Error: The URL is missing from the data."
elif "headers" not in data:
result = "Error: The headers are missing from the data."
elif "name" not in data:
result = "Error: The poked person's name is missing from the data."
else:
req = requests.get(url=data["pokeurl"], headers=data["headers"])
if req.status_code == 200:
result = "{} poked successfully".format(data["name"])
else:
result = "Error: the Poke returned Code {}".format(req.status_code)
return result
| {
"repo_name": "Sirs0ri/PersonalAssistant",
"path": "samantha/plugins/facebook_plugin.py",
"copies": "1",
"size": "7599",
"license": "mit",
"hash": 271982397648361500,
"line_mean": 35.0142180095,
"line_max": 79,
"alpha_frac": 0.5729701276,
"autogenerated": false,
"ratio": 3.9973698053655973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5070339932965597,
"avg_score": null,
"num_lines": null
} |
"""A plugin to test loading devices. It doesn't do anything."""
###############################################################################
#
# TODO: [ ] Add a receiver to intercept commands from the remote
#
###############################################################################
# standard library imports
import logging
import time
# related third party imports
try:
import pigpio
except ImportError:
pigpio = None
# application specific imports
from samantha.core import subscribe_to
from samantha.plugins.plugin import Plugin, Device
__version__ = "1.0.11"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
class Transmitter(object):
"""
A class to transmit the wireless codes sent by 433 MHz
wireless fobs. This class is taken 1:1 from the '433MHz keyfob RX/TX'
example that can be found at
http://abyz.co.uk/rpi/pigpio/examples.html#Python%20code
"""
def __init__(self, pi, gpio, repeats=4, bits=24, gap=9000, t0=300, t1=900):
"""
Instantiate with the Pi and the GPIO connected to the wireless
transmitter.
The number of repeats (default 6) and bits (default 24) may
be set.
The pre-/post-amble gap (default 9000 us), short pulse length
(default 300 us), and long pulse length (default 900 us) may
be set.
"""
self.pi = pi
self.gpio = gpio
self.repeats = repeats
self.bits = bits
self.gap = gap
self.t0 = t0
self.t1 = t1
self._make_waves()
pi.set_mode(gpio, pigpio.OUTPUT)
def _make_waves(self):
"""
Generates the basic waveforms needed to transmit codes.
"""
wf = [pigpio.pulse(1 << self.gpio, 0, self.t0),
pigpio.pulse(0, 1 << self.gpio, self.gap)]
self.pi.wave_add_generic(wf)
self._amble = self.pi.wave_create()
wf = [pigpio.pulse(1 << self.gpio, 0, self.t0),
pigpio.pulse(0, 1 << self.gpio, self.t1)]
self.pi.wave_add_generic(wf)
self._wid0 = self.pi.wave_create()
wf = [pigpio.pulse(1 << self.gpio, 0, self.t1),
pigpio.pulse(0, 1 << self.gpio, self.t0)]
self.pi.wave_add_generic(wf)
self._wid1 = self.pi.wave_create()
def set_repeats(self, repeats):
"""
Set the number of code repeats.
"""
if 1 < repeats < 100:
self.repeats = repeats
def set_bits(self, bits):
"""
Set the number of code bits.
"""
if 5 < bits < 65:
self.bits = bits
def set_timings(self, gap, t0, t1):
"""
Sets the code gap, short pulse, and long pulse length in us.
"""
self.gap = gap
self.t0 = t0
self.t1 = t1
self.pi.wave_delete(self._amble)
self.pi.wave_delete(self._wid0)
self.pi.wave_delete(self._wid1)
self._make_waves()
def send(self, code):
"""
Transmits the code (using the current settings of repeats,
bits, gap, short, and long pulse length).
"""
chain = [self._amble, 255, 0]
bit = (1 << (self.bits - 1))
for i in range(self.bits):
if code & bit:
chain += [self._wid1]
else:
chain += [self._wid0]
bit >>= 1
chain += [self._amble, 255, 1, self.repeats, 0]
try:
self.pi.wave_chain(chain)
except ValueError:
LOGGER.error("The automatically assigned ID for this command "
"exceeded 255 which caused this failure - it's later "
"used as a byte. Please restart the device running "
"the pigpio daemon ('pigpiod') to reset the IDs.")
while self.pi.wave_tx_busy():
time.sleep(0.1)
def cancel(self):
"""
Cancels the wireless code transmitter.
"""
self.pi.wave_delete(self._amble)
self.pi.wave_delete(self._wid0)
self.pi.wave_delete(self._wid1)
if pigpio:
PI = pigpio.pi("192.168.178.56")
else:
PI = None
LOGGER.error(
"Could not import pigpio. Please follow the instructions on %s to "
"install it manually.",
"https://github.com/joan2937/pigpio/blob/master/README#L103")
if PI is not None and PI.connected:
TRANSMITTER_PIN = 17
TRANSMITTER = Transmitter(PI, gpio=TRANSMITTER_PIN)
active = True
else:
TRANSMITTER_PIN = None
TRANSMITTER = None
active = False
LOGGER.error("Could not connect to the RasPi at 192.168.178.56.")
PLUGIN = Plugin("433", active, LOGGER, __file__)
READING_LAMP = Device("Readinglamp", active, LOGGER, __file__,
["light", "433"])
AMBIENT_LAMP = Device("Ambientlamp", active, LOGGER, __file__,
["light", "433"])
BED_LAMP = Device("Bedlamp", active, LOGGER, __file__,
["light", "433"])
@READING_LAMP.turn_on
def rl_turn_on(key, data):
"""Turn on the Reading-Lamp."""
TRANSMITTER.send(1361)
return "Reading Lamp turned on."
@subscribe_to("time.time_of_day.day")
@READING_LAMP.turn_off
def rl_turn_off(key, data):
"""Turn off the Reading-Lamp."""
TRANSMITTER.send(1364)
return "Reading Lamp turned off."
@BED_LAMP.turn_on
def bl_turn_on(key, data):
"""Turn on the Bed-Lamp."""
TRANSMITTER.send(4433)
return "Bed Lamp turned on."
@subscribe_to("time.time_of_day.day")
@BED_LAMP.turn_off
def bl_turn_off(key, data):
"""Turn off the Bed-Lamp."""
TRANSMITTER.send(4436)
return "Bed Lamp turned off."
@AMBIENT_LAMP.turn_on
def al_turn_on(key, data):
"""Turn on the Ambient-Lamp."""
TRANSMITTER.send(5201)
return "Ambient Lamp turned on."
@subscribe_to("time.time_of_day.day")
@AMBIENT_LAMP.turn_off
def al_turn_off(key, data):
"""Turn off the Ambient-Lamp."""
TRANSMITTER.send(5204)
return "Ambient Lamp turned off."
@subscribe_to("system.onexit")
def exit_plugin(key, data):
rl_turn_off(key, data)
bl_turn_off(key, data)
al_turn_off(key, data)
TRANSMITTER.cancel()
PI.wave_clear()
PI.stop()
return "Exited and freed resources."
# @subscribe_to("system.onstart")
# def start_func(key, data):
# """Start the receiver."""
# LOGGER.debug("I'm now doing something productive!")
# return True
#
#
# @subscribe_to("system.onexit")
# def stop_func(key, data):
# """Stop the receiver."""
# LOGGER.debug("I'm not doing anything productive anymore.")
# return True
| {
"repo_name": "Sirs0ri/PersonalAssistant",
"path": "samantha/plugins/433_plugin.py",
"copies": "1",
"size": "6600",
"license": "mit",
"hash": 4163711639678434300,
"line_mean": 26.3858921162,
"line_max": 79,
"alpha_frac": 0.5703030303,
"autogenerated": false,
"ratio": 3.3933161953727504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.446361922567275,
"avg_score": null,
"num_lines": null
} |
"""A plugin to test loading devices. It doesn't do anything."""
###############################################################################
#
# TODO: [ ]
#
###############################################################################
# standard library imports
import logging
# related third party imports
# application specific imports
import samantha.context as context
from samantha.core import subscribe_to
from samantha.tools import eventbuilder
from samantha.plugins.plugin import Plugin
__version__ = "1.0.10"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
PLUGIN = Plugin("Mediacenter", True, LOGGER, __file__)
CC_DISPLAY_NAME = ""
CC_CONTENT_TYPE = "audio"
CC_PLAYER_STATE = ""
@subscribe_to(["chromecast.connection_change",
"chromecast.playstate_change",
"chromecast.contenttype_change"])
def update(key, data):
global CC_CONTENT_TYPE, CC_DISPLAY_NAME, CC_PLAYER_STATE
updated = False
invalid = True
if "content_type" in data and data["content_type"]:
# playstate_change
invalid = False
if not data["content_type"] == CC_CONTENT_TYPE:
CC_CONTENT_TYPE = data["content_type"]
updated = True
LOGGER.debug("Updated CC_CONTENT_TYPE to '%s'.", CC_CONTENT_TYPE)
if "player_state" in data and data["player_state"]:
invalid = False
if not data["player_state"] == CC_PLAYER_STATE:
CC_PLAYER_STATE = data["player_state"] or ""
updated = True
LOGGER.debug("Updated CC_PLAYER_STATE to '%s'.", CC_PLAYER_STATE)
if "display_name" in data and data["display_name"]:
invalid = False
if not data["display_name"] == CC_DISPLAY_NAME:
CC_DISPLAY_NAME = data["display_name"] or ""
updated = True
LOGGER.debug("Updated CC_DISPLAY_NAME to '%s'.", CC_DISPLAY_NAME)
if invalid:
return ("Error: Invalid Data. 'content_type', 'player_state' and "
"'display_name' were all missing or empty.")
if updated:
if CC_CONTENT_TYPE and "audio" not in CC_CONTENT_TYPE:
# Ignore the updates while Audio is playing. This is only
# supposed to dim the lights while videos are playing.
if context.get_value("time.time_of_day") == "night":
if (CC_PLAYER_STATE in ["PLAYING", "BUFFERING"] and
CC_DISPLAY_NAME not in [None, "Backdrop"]):
# An app is playing video.
eventbuilder.eEvent( # Turn on ambient light
sender_id=PLUGIN.name,
keyword="turn.on.ambient.light").trigger()
return "Ambient light turned on."
else:
# No app connected or video is paused
eventbuilder.eEvent( # Turn off ambient light
sender_id=PLUGIN.name,
keyword="turn.off.ambient.light").trigger()
return "Ambient light turned off."
else:
eventbuilder.eEvent( # Turn off all light
sender_id=PLUGIN.name,
keyword="turn.off.light").trigger()
return "It's daytime. The light is supposed to stay off."
else:
return "No video is playing. Not changing the light."
else:
return "No relevant information was updated. Not changing the light."
| {
"repo_name": "Sirs0ri/PersonalAssistant",
"path": "samantha/plugins/mediacenter_plugin.py",
"copies": "1",
"size": "3484",
"license": "mit",
"hash": -3644449815534917600,
"line_mean": 38.1460674157,
"line_max": 79,
"alpha_frac": 0.5559701493,
"autogenerated": false,
"ratio": 4.274846625766871,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 89
} |
"""A plugin to test loading plugins. It doesn't do anything."""
###############################################################################
#
# TODO: [ ]
#
###############################################################################
# standard library imports
import configparser
import logging
# related third party imports
try:
from fritzconnection import FritzHosts
except ImportError:
FritzHosts = None
# application specific imports
import samantha.context as context
from samantha.core import subscribe_to
from samantha.plugins.plugin import Device
from samantha.tools import eventbuilder
__version__ = "1.1.0a1"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
def _get_hosts_info():
# Mute requests' logging < WARN while getting data from the FritzBox.
# It would otherwise produce roughly 150 messages within a second or two.
req_logger = logging.getLogger("urllib3.connectionpool")
req_logger_originallevel = req_logger.level
req_logger.setLevel(logging.WARNING)
devices_list = []
try:
# Update data from the FritzBox
devices_list = FRITZBOX.get_hosts_info()
except KeyError:
LOGGER.error("The credentials are invalid.")
devices_list = []
finally:
# Reset requests' logging to its original level.
req_logger.setLevel(req_logger_originallevel)
return devices_list
# TODO Wrap this in a function and make it callable via event
config = configparser.ConfigParser()
if config.read("variables_private.ini"):
# this should be ['variables_private.ini'] if the config was found
fritzbox_config = config["fritzbox"]
USER = fritzbox_config.get("username")
PASSWORD = fritzbox_config.get("password")
ADDRESS = fritzbox_config.get("address")
else:
LOGGER.warning("No config found! Are you sure the file %s exists?",
"samantha/variables_private.ini")
PASSWORD = None
authenticated = False
if FritzHosts:
try:
FRITZBOX = FritzHosts(address=ADDRESS,
user=USER,
password=PASSWORD)
except IOError:
LOGGER.error("Couldn't connect to a fritzbox at the default "
"address '192.168.178.1'!")
FRITZBOX = None
if FRITZBOX is not None:
hosts = _get_hosts_info()
if hosts is not []:
authenticated = True
PLUGIN = Device("FritzBox", authenticated, LOGGER, __file__)
DEVICES_DICT = context.get_children("network.devices", default={})
def _status_update(device):
status = "online" if int(device["status"]) else "offline"
LOGGER.debug("Updating device %s", device["mac"])
eventbuilder.eEvent(
sender_id=PLUGIN.name,
keyword="network.fritzbox.availability.{}.{}".format(
status, device["name"]),
data=device).trigger()
context.set_property("network.devices.{}".format(device["mac"]), device)
@subscribe_to(["system.onstart", "time.schedule.10s"])
def update_devices(key, data):
"""Check for updated device-info."""
if key == "time.schedule.10s" and data[5] % 20 is not 10:
return "Skipping this check since I'm only refreshing every 20 Sec."
ignored_macs = ["00:80:77:F2:71:23", None]
# this list holds the mac-addresses of ignored devices. They won't be able
# to trigger events such as coming on/offline or registering. The 1st
# listed address is for example my printer which dis- and reconnects every
# few minutes and only spams my logs.
# LOGGER.debug("The INDEX holds %d devices.", len(DEVICES_DICT))
# Update data from the FritzBox
devices_list = _get_hosts_info()
devices_list = sorted(devices_list,
key=lambda item: item["name"].lower())
count = 0
ignored = 0
new = 0
updated = 0
for device in devices_list:
count += 1
if device["mac"] in ignored_macs:
ignored += 1
LOGGER.debug("Ignoring '%s' as requested by the user.",
device["name"])
else:
c_device = context.get_value(
"network.devices.{}".format(device["mac"]), None)
if c_device is None:
new += 1
LOGGER.debug("%s is a new device.", device["mac"])
eventbuilder.eEvent(
sender_id=PLUGIN.name,
keyword="network.fritzbox.newdevice.{}".format(
device["name"]),
data=device).trigger()
_status_update(device)
else:
# LOGGER.debug("%s is a known device.", device["mac"])
if (int(device["status"])
is int(DEVICES_DICT[device["mac"]]["status"])
is not int(c_device["status"])):
updated += 1
LOGGER.debug("Device: %d %s, Cache: %d %s, Context: %d %s",
int(device["status"]), device["status"],
int(DEVICES_DICT[device["mac"]]["status"]),
DEVICES_DICT[device["mac"]]["status"],
int(c_device["status"]), c_device["status"])
_status_update(device)
# else:
# status = "online" if int(device["status"]) else "offline"
# LOGGER.debug("%s is still %s ('%s').",
# device["mac"], status, device["status"])
DEVICES_DICT[device["mac"]] = device
return("Processed {} devices in total, {} of them new. Ignored {} and "
"updated {}.".format(count, new, ignored, updated))
| {
"repo_name": "Sirs0ri/PersonalAssistant",
"path": "samantha/plugins/fritzbox_plugin.py",
"copies": "1",
"size": "5776",
"license": "mit",
"hash": -5830945738432336000,
"line_mean": 33.380952381,
"line_max": 79,
"alpha_frac": 0.5631925208,
"autogenerated": false,
"ratio": 4.222222222222222,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5285414743022222,
"avg_score": null,
"num_lines": null
} |
#A Ply parser for the dimacs file format.
#importing lex and yacc
import ply.lex as lex
import ply.yacc as yacc
import sys
import copy
import pdb
#Tokens for the ply
tokens = ["VARIABLE",
"TERMCHAR",
"P_LINE" ,
"CNF" ]
#Tokens defined with regular expressions
t_VARIABLE = "-?[1-9][0-9]*" #Variable
t_TERMCHAR = "0" #Clause terminating variable
t_P_LINE = "p" #P line to tell us how many vars we have
t_CNF = "cnf" #Part of the P line
t_ignore_COMMENT = "^c.*$" #Comment
t_ignore = " \t" #ignore spaces and tabs.
#Defining errors (required).
def t_error(t):
t.lexer.skip(1)
#Build the lexer
lexer = lex.lex()
#Global variables for building the clauses
clauses = []
clause = []
#The start production
#According to the .cnf specifications, a line can be a comment
#or a list of variables ended with 0(terminate clause variable)
#or just a list of variables.
def p_start(p):
'''start : P_LINE CNF VARIABLE VARIABLE
| variableList TERMCHAR
| variableList
| empty'''
global clauses
global clause
if p[1] == "COMMENT": #Then it was a comment and ignore
return
elif len(p) == 5: #Dealing with the P line (ignored)
return
elif len(p) == 2: #Then it was the variable list w/out terminating variable
return
else: #Save and restart the clause if it was terminated.
clauses.append(copy.deepcopy(clause))
clause = []
#Store the variables as they are seen in a clause.
def p_variableList(p):
'''variableList : variableList VARIABLE
| VARIABLE'''
global clause
if len(p) == 3:
clause.append(int(p[2]))
else:
clause.append(int(p[1]))
#Empty comment rule
def p_empty(p):
'''empty :'''
p[0] = "COMMENT"
# Error rule for syntax errors
def p_error(p):
print("Syntax error in input!")
#build the parser
parser = yacc.yacc()
#Use yacc to populate the clauses list.
#(Call this file)
def parse_file(fname):
global clauses
f = open(fname, 'r')
s = f.readline()
while s != "":
parser.parse(s)
s = f.readline()
f.close()
return clauses
| {
"repo_name": "KingsleyZ/PyBool",
"path": "python/PyBool/include/PyBool_dimacs_parse.py",
"copies": "2",
"size": "2324",
"license": "bsd-2-clause",
"hash": -8167084784108311000,
"line_mean": 23.2083333333,
"line_max": 83,
"alpha_frac": 0.5856282272,
"autogenerated": false,
"ratio": 3.542682926829268,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5128311154029268,
"avg_score": null,
"num_lines": null
} |
"""apman URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth import views as auth_views
from satsound.urls import *
urlpatterns = [
url(r'', include(satsound_urls), name='satsound'),
url(r'^accounts/', include('allauth.urls')),
url(r'^admin/password_reset/$', auth_views.password_reset, name='admin_password_reset'),
url(r'^admin/password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$', auth_views.password_reset_confirm,
name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.password_reset_complete, name='password_reset_complete'),
url(r'^admin/', admin.site.urls, name='admin'),
url(r'^api/', include(api_urls), name='api-root'),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {
"repo_name": "saanobhaai/apman",
"path": "apman/urls.py",
"copies": "1",
"size": "1793",
"license": "mit",
"hash": -5404255959223195000,
"line_mean": 51.7352941176,
"line_max": 115,
"alpha_frac": 0.6358059119,
"autogenerated": false,
"ratio": 3.6893004115226335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9818301404446821,
"avg_score": 0.0013609837951625227,
"num_lines": 34
} |
"""apns_demo.py : Example app which uses the APNS service to send
push notifications. """
import sys
from twisted.internet import reactor
from twisted.python import log
from pushpy import apns, apns_feedback, blackberry, gcm
USE_SANDBOX = True
CERTIFICATE_FILE = "apple_cert.pem"
KEY_FILE = "apple_key.pem"
APNS_QUEUE_SIZE = 100
def handle_apns_send_failure(failure_tuple):
""" Callback method which deals with tokens that, when attempting
to send a notification, had an error response returned from
the APNS. """
if failure_tuple[0] == 8:
log.msg("Token {0} will be deleted".format(failure_tuple[1]))
self._delete_token(failure_tuple[1])
else:
log.msg("Error code {0} received when sending message to " \
"token {1}".format(failure_tuple[0], failure_tuple[1]))
def process_failed_tokens(token_list):
""" Processes a list of tokens which have been sent by the APN
feedback service, and represent devices which should no
longer receive APNS messages. """
for token in token_list:
if len(token) >= 2:
print "Token to delete: {0}".format(token[1])
def send_apns_message(apns_token, payload):
""" Sends the payload to the APNS token specified. """
APNS_SERVICE.send_message(apns_token, payload)
APNS_SERVICE = apns.APNSService(CERTIFICATE_FILE, KEY_FILE,
handle_apns_send_failure,
USE_SANDBOX, APNS_QUEUE_SIZE)
APNS_FEEDBACK = apns_feedback.APNFeedbackService(CERTIFICATE_FILE,
KEY_FILE, process_failed_tokens,
USE_SANDBOX)
if __name__ == "__main__":
log.startLogging(sys.stdout)
reactor.run()
| {
"repo_name": "trulabs/pushpy",
"path": "apns_demo.py",
"copies": "1",
"size": "1767",
"license": "bsd-3-clause",
"hash": -111601016887598180,
"line_mean": 34.34,
"line_max": 71,
"alpha_frac": 0.6344086022,
"autogenerated": false,
"ratio": 3.6889352818371606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48233438840371606,
"avg_score": null,
"num_lines": null
} |
"""apns_feedback.py: Module which contains functionality which polls
the APNS feedback service for expired/invalid tokens. """
import base64
import struct
import common
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.internet import defer, reactor
from twisted.protocols.basic import LineReceiver
from twisted.python import log
from StringIO import StringIO
APN_HOSTNAME = "feedback.push.apple.com"
APN_SANDBOX_HOSTNAME = "feedback.sandbox.push.apple.com"
APN_FEEDBACK_PORT = 2196
FEEDBACK_INTERVAL = 43200
class APNProcessFeedback(LineReceiver):
""" Handles the data that is received from the APN feedback service. """
def __init__(self):
pass
def connectionMade(self):
""" Called when successfully connected to the feedback service. """
log.msg("Connected to the APN Feedback Service")
def rawDataReceived(self, data):
""" Called when data is received from the feedback service. """
log.msg("Receiving data from the APN Feedback Service")
self.input.write(data)
def lineReceived(self, data):
""" Called when data is received from the feedback service. """
log.msg("Receiving data from the APN Feedback Service")
self.input.write(data)
def connectionLost(self, reason):
""" Called when the connection is closed by the feedback service (this
is the behaviour when it has finished sending data. """
log.msg("Finished receiving data from the Feedback Service.")
self.factory.process_list(self.input.getvalue())
self.input.close()
class APNFeedbackClientFactory(ReconnectingClientFactory):
""" Factory which manages instances of the protocol which connect to the
APN Feedback Service to retrieve invalid client tokens. """
def __init__(self, feedback_callback):
self.deferred = defer.Deferred()
self.deferred.addCallback(self.process_list)
self.protocol = APNProcessFeedback()
self.feedback_callback = feedback_callback
def buildProtocol(self, addr):
""" Builds an instance of the APNProcessFeedback protocol. """
log.msg("Connecting to the APN feedback service")
self.initialDelay = FEEDBACK_INTERVAL
self.maxDelay = FEEDBACK_INTERVAL
self.resetDelay()
new_protocol = self.protocol
new_protocol.factory = self
new_protocol.deferred = self.deferred
new_protocol.input = StringIO()
new_protocol.setRawMode()
return new_protocol
def process_list(self, data):
""" Parses a list of tokens received from the feedback service. """
log.msg("Processing the tokens received from the APN feedback service")
token_list = []
header_size = 6
while data != "":
try:
# Format is a 4 byte time followed by a 2 byte token
# length field
feedback_time, token_length = struct.unpack_from('!lh', data, 0)
data = data[header_size:]
# Extract the token by using the length that has just been
# retrieved
token = struct.unpack_from("!{0}s".format(token_length),
data, 0)[0]
encoded_token = base64.encodestring(token).replace('\n', '')
token_list.append((feedback_time, encoded_token))
data = data[token_length:]
except struct.error:
log.err("Could not parse data received from the APN " \
"Feedback service.")
break
log.msg("Finished processing the token list received from " \
"the APN feedback service")
self.feedback_callback(token_list)
def startedConnecting(self, connector):
""" Called when a connection attempt to the APN feedback service
has started. """
log.msg('Attempting to connect to the APN feedback service')
def clientConnectionLost(self, connector, reason):
""" Called when the network connection to the APN feedback
service has been lost. """
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
""" The connection attempt to the APN feedback service has failed. """
log.err("Unable to connect to the APN feedback service")
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
class APNFeedbackService(object):
""" Sets up and controls the instances of the
APN Feedback factory. """
def __init__(self, certificate_file, key_file, feedback_callback,
use_sandbox=False):
self.apns_receiver = APNFeedbackClientFactory(feedback_callback)
if use_sandbox is True:
apns_host = APN_SANDBOX_HOSTNAME
else:
apns_host = APN_HOSTNAME
reactor.connectSSL(apns_host, APN_FEEDBACK_PORT,
self.apns_receiver,
common.APNSClientContextFactory(certificate_file,
key_file))
| {
"repo_name": "trulabs/pushpy",
"path": "pushpy/apns_feedback.py",
"copies": "1",
"size": "5213",
"license": "bsd-3-clause",
"hash": 5236653419254084000,
"line_mean": 34.9517241379,
"line_max": 80,
"alpha_frac": 0.6385958181,
"autogenerated": false,
"ratio": 4.588908450704225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5727504268804224,
"avg_score": null,
"num_lines": null
} |
"""APNS Notification platform."""
import logging
from apns2.client import APNsClient
from apns2.errors import Unregistered
from apns2.payload import Payload
import voluptuous as vol
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.config import load_yaml_config_file
from homeassistant.const import ATTR_NAME, CONF_NAME, CONF_PLATFORM
from homeassistant.helpers import template as template_helper
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_state_change
from .const import DOMAIN
APNS_DEVICES = "apns.yaml"
CONF_CERTFILE = "cert_file"
CONF_TOPIC = "topic"
CONF_SANDBOX = "sandbox"
ATTR_PUSH_ID = "push_id"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): "apns",
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_CERTFILE): cv.isfile,
vol.Required(CONF_TOPIC): cv.string,
vol.Optional(CONF_SANDBOX, default=False): cv.boolean,
}
)
REGISTER_SERVICE_SCHEMA = vol.Schema(
{vol.Required(ATTR_PUSH_ID): cv.string, vol.Optional(ATTR_NAME): cv.string}
)
def get_service(hass, config, discovery_info=None):
"""Return push service."""
name = config[CONF_NAME]
cert_file = config[CONF_CERTFILE]
topic = config[CONF_TOPIC]
sandbox = config[CONF_SANDBOX]
service = ApnsNotificationService(hass, name, topic, sandbox, cert_file)
hass.services.register(
DOMAIN, f"apns_{name}", service.register, schema=REGISTER_SERVICE_SCHEMA
)
return service
class ApnsDevice:
"""
The APNS Device class.
Stores information about a device that is registered for push
notifications.
"""
def __init__(self, push_id, name, tracking_device_id=None, disabled=False):
"""Initialize APNS Device."""
self.device_push_id = push_id
self.device_name = name
self.tracking_id = tracking_device_id
self.device_disabled = disabled
@property
def push_id(self):
"""Return the APNS id for the device."""
return self.device_push_id
@property
def name(self):
"""Return the friendly name for the device."""
return self.device_name
@property
def tracking_device_id(self):
"""
Return the device Id.
The id of a device that is tracked by the device
tracking component.
"""
return self.tracking_id
@property
def full_tracking_device_id(self):
"""
Return the fully qualified device id.
The full id of a device that is tracked by the device
tracking component.
"""
return f"{DEVICE_TRACKER_DOMAIN}.{self.tracking_id}"
@property
def disabled(self):
"""Return the state of the service."""
return self.device_disabled
def disable(self):
"""Disable the device from receiving notifications."""
self.device_disabled = True
def __eq__(self, other):
"""Return the comparison."""
if isinstance(other, self.__class__):
return self.push_id == other.push_id and self.name == other.name
return NotImplemented
def __ne__(self, other):
"""Return the comparison."""
return not self.__eq__(other)
def _write_device(out, device):
"""Write a single device to file."""
attributes = []
if device.name is not None:
attributes.append(f"name: {device.name}")
if device.tracking_device_id is not None:
attributes.append(f"tracking_device_id: {device.tracking_device_id}")
if device.disabled:
attributes.append("disabled: True")
out.write(device.push_id)
out.write(": {")
if attributes:
separator = ", "
out.write(separator.join(attributes))
out.write("}\n")
class ApnsNotificationService(BaseNotificationService):
"""Implement the notification service for the APNS service."""
def __init__(self, hass, app_name, topic, sandbox, cert_file):
"""Initialize APNS application."""
self.hass = hass
self.app_name = app_name
self.sandbox = sandbox
self.certificate = cert_file
self.yaml_path = hass.config.path(f"{app_name}_{APNS_DEVICES}")
self.devices = {}
self.device_states = {}
self.topic = topic
try:
self.devices = {
str(key): ApnsDevice(
str(key),
value.get("name"),
value.get("tracking_device_id"),
value.get("disabled", False),
)
for (key, value) in load_yaml_config_file(self.yaml_path).items()
}
except FileNotFoundError:
pass
tracking_ids = [
device.full_tracking_device_id
for (key, device) in self.devices.items()
if device.tracking_device_id is not None
]
track_state_change(hass, tracking_ids, self.device_state_changed_listener)
def device_state_changed_listener(self, entity_id, from_s, to_s):
"""
Listen for state change.
Track device state change if a device has a tracking id specified.
"""
self.device_states[entity_id] = str(to_s.state)
def write_devices(self):
"""Write all known devices to file."""
with open(self.yaml_path, "w+") as out:
for _, device in self.devices.items():
_write_device(out, device)
def register(self, call):
"""Register a device to receive push messages."""
push_id = call.data.get(ATTR_PUSH_ID)
device_name = call.data.get(ATTR_NAME)
current_device = self.devices.get(push_id)
current_tracking_id = (
None if current_device is None else current_device.tracking_device_id
)
device = ApnsDevice(push_id, device_name, current_tracking_id)
if current_device is None:
self.devices[push_id] = device
with open(self.yaml_path, "a") as out:
_write_device(out, device)
return True
if device != current_device:
self.devices[push_id] = device
self.write_devices()
return True
def send_message(self, message=None, **kwargs):
"""Send push message to registered devices."""
apns = APNsClient(
self.certificate, use_sandbox=self.sandbox, use_alternative_port=False
)
device_state = kwargs.get(ATTR_TARGET)
message_data = kwargs.get(ATTR_DATA)
if message_data is None:
message_data = {}
if isinstance(message, str):
rendered_message = message
elif isinstance(message, template_helper.Template):
rendered_message = message.render()
else:
rendered_message = ""
payload = Payload(
alert=rendered_message,
badge=message_data.get("badge"),
sound=message_data.get("sound"),
category=message_data.get("category"),
custom=message_data.get("custom", {}),
content_available=message_data.get("content_available", False),
)
device_update = False
for push_id, device in self.devices.items():
if not device.disabled:
state = None
if device.tracking_device_id is not None:
state = self.device_states.get(device.full_tracking_device_id)
if device_state is None or state == str(device_state):
try:
apns.send_notification(push_id, payload, topic=self.topic)
except Unregistered:
logging.error("Device %s has unregistered", push_id)
device_update = True
device.disable()
if device_update:
self.write_devices()
return True
| {
"repo_name": "robbiet480/home-assistant",
"path": "homeassistant/components/apns/notify.py",
"copies": "9",
"size": "8136",
"license": "apache-2.0",
"hash": -9062621551540081000,
"line_mean": 29.8181818182,
"line_max": 83,
"alpha_frac": 0.6027531957,
"autogenerated": false,
"ratio": 4.127853881278539,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.923060707697854,
"avg_score": null,
"num_lines": null
} |
"""APNS Notification platform."""
import logging
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.const import ATTR_NAME, CONF_NAME, CONF_PLATFORM
from homeassistant.helpers import template as template_helper
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_state_change
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
DOMAIN,
PLATFORM_SCHEMA,
BaseNotificationService,
)
APNS_DEVICES = "apns.yaml"
CONF_CERTFILE = "cert_file"
CONF_TOPIC = "topic"
CONF_SANDBOX = "sandbox"
DEVICE_TRACKER_DOMAIN = "device_tracker"
SERVICE_REGISTER = "apns_register"
ATTR_PUSH_ID = "push_id"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): "apns",
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_CERTFILE): cv.isfile,
vol.Required(CONF_TOPIC): cv.string,
vol.Optional(CONF_SANDBOX, default=False): cv.boolean,
}
)
REGISTER_SERVICE_SCHEMA = vol.Schema(
{vol.Required(ATTR_PUSH_ID): cv.string, vol.Optional(ATTR_NAME): cv.string}
)
def get_service(hass, config, discovery_info=None):
"""Return push service."""
name = config.get(CONF_NAME)
cert_file = config.get(CONF_CERTFILE)
topic = config.get(CONF_TOPIC)
sandbox = config.get(CONF_SANDBOX)
service = ApnsNotificationService(hass, name, topic, sandbox, cert_file)
hass.services.register(
DOMAIN, f"apns_{name}", service.register, schema=REGISTER_SERVICE_SCHEMA
)
return service
class ApnsDevice:
"""
The APNS Device class.
Stores information about a device that is registered for push
notifications.
"""
def __init__(self, push_id, name, tracking_device_id=None, disabled=False):
"""Initialize APNS Device."""
self.device_push_id = push_id
self.device_name = name
self.tracking_id = tracking_device_id
self.device_disabled = disabled
@property
def push_id(self):
"""Return the APNS id for the device."""
return self.device_push_id
@property
def name(self):
"""Return the friendly name for the device."""
return self.device_name
@property
def tracking_device_id(self):
"""
Return the device Id.
The id of a device that is tracked by the device
tracking component.
"""
return self.tracking_id
@property
def full_tracking_device_id(self):
"""
Return the fully qualified device id.
The full id of a device that is tracked by the device
tracking component.
"""
return f"{DEVICE_TRACKER_DOMAIN}.{self.tracking_id}"
@property
def disabled(self):
"""Return the state of the service."""
return self.device_disabled
def disable(self):
"""Disable the device from receiving notifications."""
self.device_disabled = True
def __eq__(self, other):
"""Return the comparison."""
if isinstance(other, self.__class__):
return self.push_id == other.push_id and self.name == other.name
return NotImplemented
def __ne__(self, other):
"""Return the comparison."""
return not self.__eq__(other)
def _write_device(out, device):
"""Write a single device to file."""
attributes = []
if device.name is not None:
attributes.append(f"name: {device.name}")
if device.tracking_device_id is not None:
attributes.append(f"tracking_device_id: {device.tracking_device_id}")
if device.disabled:
attributes.append("disabled: True")
out.write(device.push_id)
out.write(": {")
if attributes:
separator = ", "
out.write(separator.join(attributes))
out.write("}\n")
class ApnsNotificationService(BaseNotificationService):
"""Implement the notification service for the APNS service."""
def __init__(self, hass, app_name, topic, sandbox, cert_file):
"""Initialize APNS application."""
self.hass = hass
self.app_name = app_name
self.sandbox = sandbox
self.certificate = cert_file
self.yaml_path = hass.config.path(app_name + "_" + APNS_DEVICES)
self.devices = {}
self.device_states = {}
self.topic = topic
try:
self.devices = {
str(key): ApnsDevice(
str(key),
value.get("name"),
value.get("tracking_device_id"),
value.get("disabled", False),
)
for (key, value) in load_yaml_config_file(self.yaml_path).items()
}
except FileNotFoundError:
pass
tracking_ids = [
device.full_tracking_device_id
for (key, device) in self.devices.items()
if device.tracking_device_id is not None
]
track_state_change(hass, tracking_ids, self.device_state_changed_listener)
def device_state_changed_listener(self, entity_id, from_s, to_s):
"""
Listen for sate change.
Track device state change if a device has a tracking id specified.
"""
self.device_states[entity_id] = str(to_s.state)
def write_devices(self):
"""Write all known devices to file."""
with open(self.yaml_path, "w+") as out:
for _, device in self.devices.items():
_write_device(out, device)
def register(self, call):
"""Register a device to receive push messages."""
push_id = call.data.get(ATTR_PUSH_ID)
device_name = call.data.get(ATTR_NAME)
current_device = self.devices.get(push_id)
current_tracking_id = (
None if current_device is None else current_device.tracking_device_id
)
device = ApnsDevice(push_id, device_name, current_tracking_id)
if current_device is None:
self.devices[push_id] = device
with open(self.yaml_path, "a") as out:
_write_device(out, device)
return True
if device != current_device:
self.devices[push_id] = device
self.write_devices()
return True
def send_message(self, message=None, **kwargs):
"""Send push message to registered devices."""
from apns2.client import APNsClient
from apns2.payload import Payload
from apns2.errors import Unregistered
apns = APNsClient(
self.certificate, use_sandbox=self.sandbox, use_alternative_port=False
)
device_state = kwargs.get(ATTR_TARGET)
message_data = kwargs.get(ATTR_DATA)
if message_data is None:
message_data = {}
if isinstance(message, str):
rendered_message = message
elif isinstance(message, template_helper.Template):
rendered_message = message.render()
else:
rendered_message = ""
payload = Payload(
alert=rendered_message,
badge=message_data.get("badge"),
sound=message_data.get("sound"),
category=message_data.get("category"),
custom=message_data.get("custom", {}),
content_available=message_data.get("content_available", False),
)
device_update = False
for push_id, device in self.devices.items():
if not device.disabled:
state = None
if device.tracking_device_id is not None:
state = self.device_states.get(device.full_tracking_device_id)
if device_state is None or state == str(device_state):
try:
apns.send_notification(push_id, payload, topic=self.topic)
except Unregistered:
logging.error("Device %s has unregistered", push_id)
device_update = True
device.disable()
if device_update:
self.write_devices()
return True
| {
"repo_name": "Cinntax/home-assistant",
"path": "homeassistant/components/apns/notify.py",
"copies": "1",
"size": "8154",
"license": "apache-2.0",
"hash": 5324043680316840000,
"line_mean": 29.7698113208,
"line_max": 82,
"alpha_frac": 0.59909247,
"autogenerated": false,
"ratio": 4.1119515885022695,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.521104405850227,
"avg_score": null,
"num_lines": null
} |
"""apns.py: Module which contains functionality enabling push notification
messages to be sent to the APNS. """
import time
import common
import base64
import struct
import datetime
import collections
import Queue
import binascii
from twisted.internet.protocol import Protocol, ReconnectingClientFactory
from twisted.python import log
from twisted.internet import error, reactor
APNS_HOSTNAME = "gateway.push.apple.com"
APNS_SANDBOX_HOSTNAME = "gateway.sandbox.push.apple.com"
APNS_PORT = 2195
APNS_RECONNECT_FREQUENCY = 1800
FORMAT_STRING = "!ciLH32sH%ds"
COMMAND_TYPE = "\x01"
MAX_MESSAGE_SIZE_BYTES = 256
MESSAGE_RETRY_COUNT = 0
TIMEOUT_CHECK_FREQUENCY = 900
MIN_MESSAGE_ID = 1000
MAX_MESSAGE_ID = 2147483647
# Indexes relating to error tuples received as a response from the APNS
ERROR_VALUE_INDEX = 0
SENT_MESSAGE_INDEX = 1
class APNSException(Exception):
""" Class representing an Exception which is used to report issues
when connecting to or sending messages to the APNS. """
def __init__(self, error_message):
self.error_text = error_message
super(APNSException, self).__init__(self.error_text)
class APNSProtocol(Protocol):
""" Protocol class which handles connection events made to and
received from the APNS. """
def __init__(self):
self.connect_time = None
self.alerted = False
self.last_message_sent = datetime.datetime.now()
# Set the timeout check value to force reconnection, if a message
# has not been sent within a certain time interval
self.set_timeout_trigger()
def set_timeout_trigger(self):
""" Sets the trigger to call the reconnect function after a certain
period of time, to kick of a reconnection to the APNS if
necessary. """
self.reconnect_trigger = reactor.callLater(TIMEOUT_CHECK_FREQUENCY,
self.reconnect)
def reconnect(self):
""" Forces the connection to the APNS to be lost. Will then trigger
a reconnection. """
log.msg("Running APNS timeout check")
reconnect_trigger = datetime.datetime.now() - \
datetime.timedelta(0, APNS_RECONNECT_FREQUENCY)
if self.last_message_sent <= reconnect_trigger:
# Remove the last message that was sent - as we are forcefully
# reconnecting it will be old enough to be removed
log.msg("Forcing a reconnection to the APNS")
self.factory.message = None
self.transport.abortConnection()
self.set_timeout_trigger()
else:
self.set_timeout_trigger()
def connectionMade(self):
""" Called when a connection has been established, and acts as a
trigger to start processing any messages that have been received but
have not yet been sent. """
self.connect_time = datetime.datetime.now()
# Update the time that the last message was 'sent' - i.e. reset
# the timeout
self.last_message_sent = datetime.datetime.now()
if self.factory.message_error is not None:
self.factory.process_failed_sent_messages()
if hasattr(self, 'message'):
if self.factory.retry_attempts < MESSAGE_RETRY_COUNT:
self.transport.write(self.message)
del self.message
else:
# Already tried to send the message the maximum number of
# times, so delete and reset the retry count
del self.message
self.factory.retry_attempts = 0
else:
pass
self.factory.process_queue()
def connectionLost(self, reason):
""" Raise the retry attempts by 1, to prevent continuously
trying to send the same message if it is erroring. """
if (datetime.datetime.now() - self.connect_time).seconds <= 1:
if self.alerted is False:
log.err("Detected immediate disconnect. Alert")
self.alerted = True
else:
log.err("Immediate disconnect detected. Already alerted")
else:
if self.alerted is True:
log.msg("APNS Connectivity issue resolved")
self.alerted = False
self.factory.retry_attempts += 1
log.msg("APNS Connection lost")
def dataReceived(self, data):
""" No data is received when messages are sent successfully - a
response is only received when something has gone wrong. """
error_tuple = struct.unpack("!bbi", data)
log.msg("Error code {0} received when sending message id {1}".format(
error_tuple[1], error_tuple[2]))
try:
if int(error_tuple[2]) == 0:
log.msg("Error response contained a message id of 0. Resend " \
"process not being invoked.")
else:
self.factory.message_error = int(error_tuple[2])
self.factory.error_callback((error_tuple[1], error_tuple[2]))
except ValueError:
log.err("Could not parse the message id from the response: {0}".
format(error_tuple))
def sendMessage(self, message):
""" Sends the fully formed message to the APNS. """
self.transport.write(message)
self.last_message_sent = datetime.datetime.now()
def shutdown(self):
""" Cancels the deferred task to periodically reconnect to the APNS. """
self.reconnect_trigger.cancel()
class APNSClientFactory(ReconnectingClientFactory):
""" Factory which manages instances of the protocol which connect to the
APNS to dispatch messages to clients. """
def __init__(self, error_callback, backlog_queue_size=1):
log.msg("init called")
self._connected = False
self.message = None
self.error_callback = error_callback
self.message_queue = Queue.Queue(maxsize=backlog_queue_size)
self.protocol = APNSProtocol()
self.retry_attempts = 0
self.sent_messages = collections.deque(maxlen=1000)
self.sequence_number = MIN_MESSAGE_ID
self.message_error = None
def process_queue(self):
""" Processes the messages in the backlog queue, if there
are any that have not already been sent. Messages would be
in the queue if they have been consumed from the MQ but no
connection to the APNS was available. """
log.msg(("Processing the backlog of APNS messages."))
# According to the documentation, Queue.qsize() technically only
# returns an 'approximate' size - so it's possible that a few messages
# may get missed or it will try and process more messages than there
# are available.
for _ in range(0, self.message_queue.qsize()):
try:
message = self.message_queue.get(block=False)
self.sendMessage(message[0], message[1])
except Queue.Empty:
break
log.msg(("Finished processing the APNS message backlog."))
def buildProtocol(self, addr):
""" Builds an instance of the APNSProtocol which is used to
connect to the APNS. """
log.msg("Building a new APNS protocol")
new_protocol = self.protocol
# Reset the delay between connection attempts, and let the protocol
# know about this factory
self.resetDelay()
new_protocol.factory = self
log.msg(("Connected to the APNS at {0}:{1}").format(
addr.host, addr.port))
# Restart processing incoming messages
self._connected = True
return new_protocol
def startedConnecting(self, connector):
""" Called when a connection attempt to the APNS has started. """
log.msg("Attempting to connect to the APNS.")
def enque_message(self, device_token, payload):
""" Adds a payload with the corresponding device token
to the queue. Used when a connection to the APNS is unavailable
but where it is useful to have the option to send messages
upon reconnection. """
if not self.message_queue.full():
try:
self.message_queue.put((device_token, payload),
block=False)
log.msg(("Message for device {0} stored in " +
"queue as no APNS connection is available").format(
device_token))
except Queue.Full:
log.msg(("No connection to the APNS is available, and " +
"the queue is full. Discarding message."))
else:
try:
# Pop the first item off to make space for the newer message
self.message_queue.get(block=False)
self.message_queue.put((device_token, payload),
block=False)
log.msg(("Full Queue - message popped to make way for newer " +
"message for device {0}, as no " +
"APNS connection is available").format(
device_token))
except Queue.Full:
log.msg("No connection to the APNS is available, and the " +
"queue is full. Discarding message.")
except Queue.Empty:
log.msg("No connection to the APNS is available, and " +
"unable to store message in queue. Discarding " +
"message.")
def sendMessage(self, device_token, payload):
""" Notification messages are binary messages in network order
using the following format:
<1 byte command> <2 bytes length><token> <2 bytes length><payload> """
try:
decoded_token = base64.decodestring(device_token)
message_format = FORMAT_STRING % len(payload)
except binascii.Error:
raise APNSException("Unable to decode APNS device token {0}. " \
"Discarding message".format(device_token))
expiry = int(time.time()) + 3600
try:
self.message = struct.pack(message_format, COMMAND_TYPE,
int(self.sequence_number), expiry,
len(decoded_token), decoded_token,
len(payload), payload)
except struct.error:
raise APNSException("Unable to pack message with payload {0} to " \
"send to device {1}. Discarding " \
"message".format(payload, device_token))
if self._connected is True:
if len(self.message) <= MAX_MESSAGE_SIZE_BYTES:
self.sent_messages.append({self.sequence_number :
[device_token, self.message]})
self.protocol.sendMessage(self.message)
if self.sequence_number >= MAX_MESSAGE_ID:
self.sequence_number = MIN_MESSAGE_ID
else:
self.sequence_number = self.sequence_number + 1
else:
raise APNSException("The message size ({0}) exceeds the " \
"maximum permitted by the APNS ({1}). " \
"Discarding message".format(
str(len(self.message)),
str(MAX_MESSAGE_SIZE_BYTES)))
log.msg(("Message pushed to device with " \
"APNS token: {1}").format(device_token))
else:
self.enque_message(device_token, payload)
def process_failed_sent_messages(self):
""" Processes messages that were sent AFTER the message that
caused the connection to be cut. """
if len(self.sent_messages) > (MAX_MESSAGE_ID - MIN_MESSAGE_ID):
# As the number of saved sent messages is higher than the
# number of unique message id's, we cannot work out which
# ones have been sent and which ones haven't. Either the number
# of saved sent messages should be decreased or the range
# of message id's should be increased.
log.msg("Can't resend messages as the " \
"proportion of cached sent messages is higher than" \
"the difference between the max message ID and min"
"message ID. The number of sent messages stored should " \
"be decreased, or the range of message id's increased")
else:
log.msg("Resending messages that were sent after the failed " \
"message (id: {0})".format(self.message_error))
# This check is to cover the case where the message counter has
# has recently been reset, following it hitting the maximum value.
# We don't want to resend messages that were sent succesfully but
# have a higher sequence number, so we take into account the number
# of messages sent since the reset if the error is higher than the
# current sequence number.
if len(self.sent_messages) > \
((self.sequence_number - MIN_MESSAGE_ID)):
number_beyond_min = self.sequence_number - MIN_MESSAGE_ID
max_id_to_resend = MAX_MESSAGE_ID - (len(self.sent_messages) -
number_beyond_min)
log.msg("Detected counter reset. NumberBeyondMin: {0}, " \
"MaxIDToResend: {1}".format(number_beyond_min,
max_id_to_resend))
looped = True
else:
max_id_to_resend = MAX_MESSAGE_ID
looped = False
for message in self.sent_messages:
for key, value in message.iteritems():
resend = False
if looped is True:
if key > int(self.message_error):
if key < max_id_to_resend or \
int(self.message_error) > self.sequence_number:
resend = True
else:
if key < max_id_to_resend and \
int(self.message_error) > max_id_to_resend:
resend = True
else:
if key > int(self.message_error):
resend = True
if resend is True:
log.msg("Resending message with id: " + str(key))
self.protocol.sendMessage(value[1])
self.message_error = None
def clientConnectionLost(self, connector, reason):
""" Called when the network connection to the APNS has been lost,
so set the connected flag to False and initiate the reconnection
process. """
self._connected = False
log.msg(("Lost connection to the APNS. Reason: {0}").format(
reason.getErrorMessage()))
# Try and work out what the problem is, for informational
# purposes
if reason.check(error.TimeoutError):
log.msg("APNS TimeoutError exception was caught")
elif reason.check(error.ConnectionLost):
log.msg("APNS ConnectionLost exception was caught")
# Add the message to the queue so that upon reconnection,
# it will try to send the message straight away
if self.message is not None:
self.protocol.message = self.message
ReconnectingClientFactory.clientConnectionLost(self, connector,
reason)
def clientConnectionFailed(self, connector, reason):
""" The connection attempt to the APNS has failed. """
log.err(("Unable to connect to the APNS. Reason: {0}").format(reason))
ReconnectingClientFactory.clientConnectionFailed(self, connector,
reason)
class APNSService(object):
""" Sets up and controls the instances of the APNS and
APN Feedback factories. """
def __init__(self, certificate_file, key_file,
error_callback=None, use_sandbox=False,
apns_queue_size=1):
self.error_callback = error_callback
self.apns_factory = APNSClientFactory(self.handle_error,
apns_queue_size)
if use_sandbox is True:
apns_host = APNS_SANDBOX_HOSTNAME
else:
apns_host = APNS_HOSTNAME
reactor.connectSSL(apns_host, APNS_PORT,
self.apns_factory,
common.APNSClientContextFactory(certificate_file,
key_file))
def handle_error(self, error_tuple):
""" Method which handles error response that have been received from
the APNS, for example when a token is no longer valid. """
if self.error_callback is not None:
invalid_token = 0
error_value = error_tuple[ERROR_VALUE_INDEX]
for message in self.apns_factory.sent_messages:
if message.has_key(int(error_tuple[SENT_MESSAGE_INDEX])):
invalid_token = message[int(error_tuple[
SENT_MESSAGE_INDEX])][0]
break
response = (error_value, invalid_token)
self.error_callback(response)
def send_message(self, device_token, payload):
""" Initiates the process to send the payload to the
device with the specified token. """
self.apns_factory.sendMessage(device_token, payload)
| {
"repo_name": "trulabs/pushpy",
"path": "pushpy/apns.py",
"copies": "1",
"size": "18061",
"license": "bsd-3-clause",
"hash": 5171642258946634000,
"line_mean": 40.6152073733,
"line_max": 80,
"alpha_frac": 0.5715630364,
"autogenerated": false,
"ratio": 4.639352684305163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0037095361795600958,
"num_lines": 434
} |
"""APNS Router"""
import socket
import uuid
from typing import Any # noqa
from hyper.http20.exceptions import ConnectionError, HTTP20Error
from twisted.internet.threads import deferToThread
from twisted.logger import Logger
from autopush.exceptions import RouterException
from autopush.metrics import make_tags
from autopush.router.apns2 import (
APNSClient,
APNS_MAX_CONNECTIONS,
)
from autopush.router.interface import RouterResponse
from autopush.types import JSONDict # noqa
# https://github.com/djacobs/PyAPNs
class APNSRouter(object):
"""APNS Router Implementation"""
log = Logger()
apns = None
def _connect(self, rel_channel, load_connections=True):
"""Connect to APNS
:param rel_channel: Release channel name (e.g. Firefox. FirefoxBeta,..)
:type rel_channel: str
:param load_connections: (used for testing)
:type load_connections: bool
:returns: APNs to be stored under the proper release channel name.
:rtype: apns.APNs
"""
default_topic = "com.mozilla.org." + rel_channel
cert_info = self.router_conf[rel_channel]
return APNSClient(
cert_file=cert_info.get("cert"),
key_file=cert_info.get("key"),
use_sandbox=cert_info.get("sandbox", False),
max_connections=cert_info.get("max_connections",
APNS_MAX_CONNECTIONS),
topic=cert_info.get("topic", default_topic),
logger=self.log,
metrics=self.metrics,
load_connections=load_connections,
max_retry=cert_info.get('max_retry', 2)
)
def __init__(self, conf, router_conf, metrics, load_connections=True):
"""Create a new APNS router and connect to APNS
:param conf: Configuration settings
:type conf: autopush.config.AutopushConfig
:param router_conf: Router specific configuration
:type router_conf: dict
:param load_connections: (used for testing)
:type load_connections: bool
"""
self.conf = conf
self.router_conf = router_conf
self.metrics = metrics
self._base_tags = ["platform:apns"]
self.apns = dict()
for rel_channel in router_conf:
self.apns[rel_channel] = self._connect(rel_channel,
load_connections)
self.log.debug("Starting APNS router...")
def register(self, uaid, router_data, app_id, *args, **kwargs):
# type: (str, JSONDict, str, *Any, **Any) -> None
"""Register an endpoint for APNS, on the `app_id` release channel.
This will validate that an APNs instance token is in the
`router_data`,
:param uaid: User Agent Identifier
:param router_data: Dict containing router specific configuration info
:param app_id: The release channel identifier for cert info lookup
"""
if app_id not in self.apns:
raise RouterException("Unknown release channel specified",
status_code=400,
response_body="Unknown release channel")
if not router_data.get("token"):
raise RouterException("No token registered", status_code=400,
response_body="No token registered")
router_data["rel_channel"] = app_id
def amend_endpoint_response(self, response, router_data):
# type: (JSONDict, JSONDict) -> None
"""Stubbed out for this router"""
def route_notification(self, notification, uaid_data):
"""Start the APNS notification routing, returns a deferred
:param notification: Notification data to send
:type notification: autopush.endpoint.Notification
:param uaid_data: User Agent specific data
:type uaid_data: dict
"""
router_data = uaid_data["router_data"]
# Kick the entire notification routing off to a thread
return deferToThread(self._route, notification, router_data)
def _route(self, notification, router_data):
"""Blocking APNS call to route the notification
:param notification: Notification data to send
:type notification: dict
:param router_data: Pre-initialized data for this connection
:type router_data: dict
"""
router_token = router_data["token"]
rel_channel = router_data["rel_channel"]
apns_client = self.apns[rel_channel]
# chid MUST MATCH THE CHANNELID GENERATED BY THE REGISTRATION SERVICE
# Currently this value is in hex form.
payload = {
"chid": notification.channel_id.hex,
"ver": notification.version,
}
if notification.data:
payload["con"] = notification.headers.get(
"content-encoding", notification.headers.get("encoding"))
if payload["con"] != "aes128gcm":
if "encryption" in notification.headers:
payload["enc"] = notification.headers["encryption"]
if "crypto_key" in notification.headers:
payload["cryptokey"] = notification.headers["crypto_key"]
elif "encryption_key" in notification.headers:
payload["enckey"] = notification.headers["encryption_key"]
payload["body"] = notification.data
payload['aps'] = router_data.get('aps', {
"mutable-content": 1,
"alert": {
"loc-key": "SentTab.NoTabArrivingNotification.body",
"title-loc-key": "SentTab.NoTabArrivingNotification.title",
}
})
apns_id = str(uuid.uuid4()).lower()
# APNs may force close a connection on us without warning.
# if that happens, retry the message.
try:
apns_client.send(router_token=router_token, payload=payload,
apns_id=apns_id)
except Exception as e:
# We sometimes see strange errors around sending push notifications
# to APNS. We get reports that after a new deployment things work,
# but then after a week or so, messages across the APNS bridge
# start to fail. The connections appear to be working correctly,
# so we don't think that this is a problem related to how we're
# connecting.
if isinstance(e, ConnectionError):
reason = "connection_error"
elif isinstance(e, (HTTP20Error, socket.error)):
reason = "http2_error"
else:
reason = "unknown"
if isinstance(e, RouterException) and e.status_code in [404, 410]:
raise RouterException(
str(e),
status_code=e.status_code,
errno=106,
response_body="User is no longer registered",
log_exception=False
)
self.metrics.increment("notification.bridge.error",
tags=make_tags(self._base_tags,
application=rel_channel,
reason=reason))
raise RouterException(
str(e),
status_code=502,
response_body="APNS returned an error processing request",
)
location = "%s/m/%s" % (self.conf.endpoint_url, notification.version)
self.metrics.increment("notification.bridge.sent",
tags=make_tags(self._base_tags,
application=rel_channel))
self.metrics.increment(
"updates.client.bridge.apns.{}.sent".format(
router_data["rel_channel"]
),
tags=self._base_tags
)
self.metrics.increment("notification.message_data",
notification.data_length,
tags=make_tags(self._base_tags,
destination='Direct'))
return RouterResponse(status_code=201, response_body="",
headers={"TTL": notification.ttl,
"Location": location},
logged_status=200)
| {
"repo_name": "mozilla-services/autopush",
"path": "autopush/router/apnsrouter.py",
"copies": "1",
"size": "8491",
"license": "mpl-2.0",
"hash": 4763950799466837000,
"line_mean": 40.8275862069,
"line_max": 79,
"alpha_frac": 0.5677776469,
"autogenerated": false,
"ratio": 4.579827400215749,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5647605047115749,
"avg_score": null,
"num_lines": null
} |
bl_info = {
"name": "Apocalypse Model (APM) format",
"author": "Madd Games",
"blender": (2, 69, 0),
"location": "File > Import-Export",
"description": "Export models for use with the Apocalypse Engine.",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"support": 'OFFICIAL', # unsure about this one.
"category": "Import-Export"}
DEBUG = True
import bpy
import struct
import sys, os
import json
from PIL import Image
from bpy.props import (StringProperty,
FloatProperty,
IntProperty,
BoolProperty,
EnumProperty,
)
from bpy_extras.io_utils import (ImportHelper,
ExportHelper,
axis_conversion,
)
nextTexIndex = 1
texIndices = {}
def getTextureIndex(filepath):
global nextTexIndex
if not os.path.exists(filepath):
return 0
if filepath not in texIndices:
texIndices[filepath] = nextTexIndex
nextTexIndex += 1
return texIndices[filepath]
class ExportAPM(bpy.types.Operator, ExportHelper):
bl_idname = "export_mesh.apm"
bl_label = "Export APM"
filename_ext = ".apm"
filter_glob = StringProperty(
default="*.apm",
options={'HIDDEN'}
)
def execute(self, context):
scene = bpy.context.scene
filename = self.filepath
objlist = list(bpy.data.objects)
f = open(filename, "wb")
# Write the APM header
f.write(bytes("APM\0", "ascii"))
# Report sizeof(Vertex) as 48, so that u-tangents and v-tangents are skipped as they are
# auto-computed.
# Also, zeros will be put in place of the texture table offset and size for now, we'll change
# it later on in this script when we know the values!
f.write(struct.pack("IIIII", 24, len(objlist), 48, 0, 0))
for obj in objlist:
f.write(bytes(obj.name, "utf-8"))
f.write(bytes("\0", "ascii"))
mesh = obj.to_mesh(scene, True, 'PREVIEW', calc_tessface=True)
mesh.calc_normals_split()
mesh.transform(obj.matrix_world)
uvdata = None
try:
mesh.tessface_uv_textures.active.data[0]
uvdata = mesh.tessface_uv_textures.active.data
except:
pass
vertices = []
uvs = []
normals = []
for face in mesh.tessfaces:
if len(face.vertices) < 3:
# Ignore faces that can't even make triangles.
continue
else:
if len(face.vertices) == 4:
vertices.extend((face.vertices[0], face.vertices[1], face.vertices[2]))
vertices.extend((face.vertices[2], face.vertices[3], face.vertices[0]))
normals.extend([face.normal]*6)
if uvdata is not None:
i = face.index
uvs.extend((uvdata[i].uv1, uvdata[i].uv2, uvdata[i].uv3))
uvs.extend((uvdata[i].uv3, uvdata[i].uv4, uvdata[i].uv1))
else:
vertices.extend(face.vertices)
normals.extend([face.normal]*3)
if uvdata is not None:
i = face.index
uvs.extend((uvdata[i].uv1, uvdata[i].uv2, uvdata[i].uv3))
mat = obj.active_material
diffuseColor = (255, 255, 255, 255)
specularColor = (0, 0, 0, 255)
shininess = 0.0
if mat is not None:
diffuseColor = (int(mat.diffuse_color[0]*255.0), int(mat.diffuse_color[1]*255.0), int(mat.diffuse_color[2]*255.0), 255)
specularColor = (int(mat.specular_color[0]*mat.specular_intensity*255.0), int(mat.specular_color[1]*mat.specular_intensity*255.0), int(mat.specular_color[2]*mat.specular_intensity*255.0), 255)
shininess = (mat.specular_hardness - 1) * 1.9607843137254901
colorIndex = 0
specularIndex = 0
normalIndex = 0
illumIndex = 0
warpIndex = 0
try:
filepath = obj.active_material.active_texture.image.filepath
colorIndex = getTextureIndex(filepath)
if filepath.endswith(".png"):
basename = filepath[:-4]
specularIndex = getTextureIndex(basename+"_spec.png")
normalIndex = getTextureIndex(basename+"_normal.png")
illumIndex = getTextureIndex(basename+"_illum.png")
warpIndex = getTextureIndex(basename+"_warp.png")
except:
pass
f.write(struct.pack("I", len(vertices))) # number of vertices
f.write(struct.pack("I", colorIndex)) # index of the color texture
f.write(struct.pack("I", specularIndex)) # index of the specular texture
f.write(struct.pack("I", normalIndex)) # index of the normal map
f.write(struct.pack("BBBB", *diffuseColor)) # the diffuse color
f.write(struct.pack("BBBB", *specularColor)) # the specular color
f.write(struct.pack("f", shininess)) # the specular exponent
f.write(struct.pack("I", 40)) # size of this structure
f.write(struct.pack("I", illumIndex)) # index of the illumination map
f.write(struct.pack("I", warpIndex)) # index of the warp map
for i, verti in enumerate(vertices):
uv = (0.0, 0.0)
if uvdata is not None:
uv = (uvs[i][0], uvs[i][1])
# Swap Y and Z to make it agree with Apocalypse space.
vert = (mesh.vertices[verti].co[0], mesh.vertices[verti].co[2], mesh.vertices[verti].co[1], 1.0,
uv[0], uv[1], 0.0, 0.0, # UV
normals[i][0], normals[i][2], normals[i][1], 0.0)
f.write(struct.pack("f"*12, *vert))
# OK, now we know where we shall write the texture table, so update the
# APM header.
ttOffset = f.tell()
f.seek(16)
f.write(struct.pack("II", ttOffset, len(texIndices)))
f.seek(ttOffset)
# So yeah, write the texture table.
for filepath, index in texIndices.items():
texflags = 0
if filepath.endswith(".png"):
try:
jf = open(filepath[:-4]+".json", "r")
data = json.load(jf)
jf.close()
if data["allowMipmaps"] == "false":
texflags = texflags | 1
except:
pass
img = Image.open(filepath)
width, height = img.size
f.write(struct.pack("IIIH", index, width, height, texflags))
for y in range(0, height):
for x in range(0, width):
pixel = img.getpixel((x, height-y-1))
if len(pixel) == 3:
pixel = (pixel[0], pixel[1], pixel[2], 255)
f.write(struct.pack("BBBB", *pixel))
f.close()
return {'FINISHED'}
def menu_func_export(self, context):
self.layout.operator(ExportAPM.bl_idname, text="Apocalypse Model (.apm)")
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func_export)
if __name__ == "__main__":
register()
| {
"repo_name": "madd-games/apocalypse",
"path": "SDK/apoc_blender.py",
"copies": "1",
"size": "6515",
"license": "bsd-2-clause",
"hash": 1056802498718785400,
"line_mean": 30.9362745098,
"line_max": 196,
"alpha_frac": 0.6353031466,
"autogenerated": false,
"ratio": 2.9573309123921923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40926340589921917,
"avg_score": null,
"num_lines": null
} |
"""A Pointer is the main handler when interacting with remote data.
A Pointer object represents an API for interacting with data (of any type)
at a specific location. The pointer should never be instantiated, only subclassed.
The relation between pointers and data is many to one,
there can be multiple pointers pointing to the same piece of data, meanwhile,
a pointer cannot point to multiple data sources.
A pointer is just an object id on a remote location and a set of methods that can be
executed on the remote machine directly on that object. One note that has to be made
is that all operations between pointers will return a pointer, the only way to have access
to the result is by calling .get() on the pointer.
There are two proper ways of receiving a pointer on some data:
1. When sending that data on a remote machine the user receives a pointer.
2. When the user searches for the data in an object store it receives a pointer to that data,
if it has the correct permissions for that.
After receiving a pointer, one might want to get the data behind the pointer locally. For that the
user should:
1. Request access by calling .request().
Example:
.. code-block::
pointer_object.request(name = "Request name", reason = "Request reason")
2. The data owner has to approve the request (check the domain node docs).
3. The data user checks if the request has been approved (check the domain node docs).
4. After the request has been approved, the data user can call .get() on the pointer to get the
data locally.
Example:
.. code-block::
pointer_object.get()
Pointers are being generated for most types of objects in the data science scene, but what you can
do on them is not the pointers job, see the lib module for more details. One can see the pointer
as a proxy to the actual data, the filtering and the security being applied where the data is being
held.
Example:
.. code-block::
# creating the data holder domain
domain_1 = Domain(name="Data holder domain")
# creating dummy data
tensor = th.tensor([1, 2, 3])
# creating the data holder client
domain_1_client = domain_1.get_root_client()
# sending the data to the client and receiving a pointer of that data.
data_ptr_domain_1 = tensor.send(domain_1_client)
# creating the data user domain
domain_2 = Domain(name="Data user domain")
# creating a request to access the data
data_ptr_domain_1.request(
name="My Request", reason="I'd lke to see this pointer"
)
# getting the remote id of the object
requested_object = data_ptr_domain_1.id_at_location
# getting the request id
message_request_id = domain_1_client.requests.get_request_id_from_object_id(
object_id=requested_object
)
# the data holder accepts the request
domain_1.requests[0].owner_client_if_available = domain_1_client
domain_1.requests[0].accept()
# the data user checks if the data holder approved his request
response = data_ptr_domain_1.check_access(node=domain_2, request_id=message_request_id)
"""
# stdlib
import time
from typing import Any
from typing import List
from typing import Optional
import warnings
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
from nacl.signing import VerifyKey
# syft absolute
import syft as sy
# syft relative
from ...logger import debug
from ...logger import error
from ...logger import warning
from ...proto.core.pointer.pointer_pb2 import Pointer as Pointer_PB
from ..common.pointer import AbstractPointer
from ..common.serde.deserialize import _deserialize
from ..common.serde.serializable import bind_protobuf
from ..common.uid import UID
from ..io.address import Address
from ..node.abstract.node import AbstractNode
from ..node.common.action.get_object_action import GetObjectAction
from ..node.common.service.get_repr_service import GetReprMessage
from ..node.common.service.obj_search_permission_service import (
ObjectSearchPermissionUpdateMessage,
)
from ..store.storeable_object import StorableObject
# TODO: Fix the Client, Address, Location confusion
@bind_protobuf
class Pointer(AbstractPointer):
"""
The pointer is the handler when interacting with remote data.
Automatically generated subclasses of Pointer need to be able to look up
the path and name of the object type they point to as a part of serde. For more
information on how subclasses are automatically generated, please check the ast
module.
:param location: The location where the data is being held.
:type location: Address
:param id_at_location: The UID of the object on the remote location.
:type id_at_location: UID
"""
path_and_name: str
_pointable: bool = False
def __init__(
self,
client: Any,
id_at_location: Optional[UID] = None,
object_type: str = "",
tags: Optional[List[str]] = None,
description: str = "",
) -> None:
super().__init__(
client=client,
id_at_location=id_at_location,
tags=tags,
description=description,
)
self.object_type = object_type
# _exhausted becomes True in get() call
# when delete_obj is True and network call
# has already been made
self._exhausted = False
def _get(self, delete_obj: bool = True, verbose: bool = False) -> StorableObject:
"""Method to download a remote object from a pointer object if you have the right
permissions.
:return: returns the downloaded data
:rtype: StorableObject
"""
debug(
f"> GetObjectAction for id_at_location={self.id_at_location} "
+ f"with delete_obj={delete_obj}"
)
obj_msg = GetObjectAction(
id_at_location=self.id_at_location,
address=self.client.address,
reply_to=self.client.address,
delete_obj=delete_obj,
)
obj = self.client.send_immediate_msg_with_reply(msg=obj_msg).data
if self.is_enum:
enum_class = self.client.lib_ast.query(self.path_and_name).object_ref
return enum_class(obj)
return obj
def get_copy(
self,
request_block: bool = False,
timeout_secs: int = 20,
reason: str = "",
verbose: bool = False,
) -> Optional[StorableObject]:
"""Method to download a remote object from a pointer object if you have the right
permissions. Optionally can block while waiting for approval.
:return: returns the downloaded data
:rtype: Optional[StorableObject]
"""
return self.get(
request_block=request_block,
timeout_secs=timeout_secs,
reason=reason,
delete_obj=False,
verbose=verbose,
)
def print(self) -> "Pointer":
obj = None
try:
obj_msg = GetReprMessage(
id_at_location=self.id_at_location,
address=self.client.address,
reply_to=self.client.address,
)
obj = self.client.send_immediate_msg_with_reply(msg=obj_msg).repr
except Exception as e:
if "You do not have permission to .get()" in str(
e
) or "UnknownPrivateException" in str(e):
# syft relative
from ..node.domain.service import RequestStatus
response_status = self.request(
reason="Calling remote print",
block=True,
timeout_secs=3,
)
if (
response_status is not None
and response_status == RequestStatus.Accepted
):
return self.print()
# TODO: Create a remote print interface for objects which displays them in a
# nice way, we could also even buffer this between chained ops until we return
# so that we can print once and display a nice list of data and ops
# issue: https://github.com/OpenMined/PySyft/issues/5167
if obj is not None:
print(obj)
else:
print(f"No permission to print() {self}")
return self
def get(
self,
request_block: bool = False,
timeout_secs: int = 20,
reason: str = "",
delete_obj: bool = True,
verbose: bool = False,
) -> Optional[StorableObject]:
"""Method to download a remote object from a pointer object if you have the right
permissions. Optionally can block while waiting for approval.
:return: returns the downloaded data
:rtype: Optional[StorableObject]
"""
# syft relative
from ..node.domain.service import RequestStatus
if self._exhausted:
raise ReferenceError(
"Object has already been deleted. This pointer is exhausted"
)
if not request_block:
result = self._get(delete_obj=delete_obj, verbose=verbose)
else:
response_status = self.request(
reason=reason,
block=True,
timeout_secs=timeout_secs,
verbose=verbose,
)
if (
response_status is not None
and response_status == RequestStatus.Accepted
):
result = self._get(delete_obj=delete_obj, verbose=verbose)
else:
return None
if result is not None and delete_obj:
self.gc_enabled = False
self._exhausted = True
return result
def _object2proto(self) -> Pointer_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: Pointer_PB
.. note::
This method is purely an internal method. Please use sy.serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
"""
return Pointer_PB(
points_to_object_with_path=self.path_and_name,
pointer_name=type(self).__name__,
id_at_location=sy.serialize(self.id_at_location),
location=sy.serialize(self.client.address),
tags=self.tags,
description=self.description,
object_type=self.object_type,
attribute_name=getattr(self, "attribute_name", ""),
)
@staticmethod
def _proto2object(proto: Pointer_PB) -> "Pointer":
"""Creates a Pointer from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of Pointer
:rtype: Pointer
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
# TODO: we need _proto2object to include a reference to the node doing the
# deserialization so that we can convert location into a client object. At present
# it is an address object which will cause things to break later.
points_to_type = sy.lib_ast.query(proto.points_to_object_with_path)
pointer_type = getattr(points_to_type, proto.pointer_name)
# WARNING: This is sending a serialized Address back to the constructor
# which currently depends on a Client for send_immediate_msg_with_reply
return pointer_type(
id_at_location=_deserialize(blob=proto.id_at_location),
client=_deserialize(blob=proto.location),
tags=proto.tags,
description=proto.description,
object_type=proto.object_type,
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return Pointer_PB
def request(
self,
reason: str = "",
block: bool = False,
timeout_secs: Optional[int] = None,
verbose: bool = False,
) -> Any:
"""Method that requests access to the data on which the pointer points to.
Example:
.. code-block::
# data holder domain
domain_1 = Domain(name="Data holder")
# data
tensor = th.tensor([1, 2, 3])
# generating the client for the domain
domain_1_client = domain_1.get_root_client()
# sending the data and receiving a pointer
data_ptr_domain_1 = tensor.send(domain_1_client)
# requesting access to the pointer
data_ptr_domain_1.request(name="My Request", reason="Research project.")
:param name: The title of the request that the data owner is going to see.
:type name: str
:param reason: The description of the request. This is the reason why you want to have
access to the data.
:type reason: str
.. note::
This method should be used when the remote data associated with the pointer wants to be
downloaded locally (or use .get() on the pointer).
"""
# syft relative
from ..node.domain.service import RequestMessage
# if you request non-blocking you don't need a timeout
# if you request blocking you need a timeout, so lets set a default on here
# a timeout of 0 would be a way to say don't block my local notebook but if the
# duet partner has a rule configured it will get executed first before the
# request would time out
if timeout_secs is None and block is False:
timeout_secs = -1 # forever
msg = RequestMessage(
request_description=reason,
address=self.client.address,
owner_address=self.client.address,
object_id=self.id_at_location,
object_type=self.object_type,
requester_verify_key=self.client.verify_key,
timeout_secs=timeout_secs,
)
self.client.send_immediate_msg_without_reply(msg=msg)
# wait long enough for it to arrive and trigger a handler
time.sleep(0.1)
if not block:
return None
else:
if timeout_secs is None:
timeout_secs = 30 # default if not explicitly set
# syft relative
from ..node.domain.service import RequestAnswerMessage
from ..node.domain.service import RequestStatus
output_string = "> Waiting for Blocking Request: "
output_string += f" {self.id_at_location}"
if len(reason) > 0:
output_string += f": {reason}"
if len(output_string) > 0 and output_string[-1] != ".":
output_string += "."
debug(output_string)
status = None
start = time.time()
last_check: float = 0.0
while True:
now = time.time()
try:
# won't run on the first pass because status is None which allows
# for remote request handlers to auto respond before timeout
if now - start > timeout_secs:
log = (
f"\n> Blocking Request Timeout after {timeout_secs} seconds"
)
debug(log)
return status
# only check once every second
if now - last_check > 1:
last_check = now
debug(f"> Sending another Request Message {now - start}")
status_msg = RequestAnswerMessage(
request_id=msg.id,
address=self.client.address,
reply_to=self.client.address,
)
response = self.client.send_immediate_msg_with_reply(
msg=status_msg
)
status = response.status
if response.status == RequestStatus.Pending:
time.sleep(0.1)
continue
else:
# accepted or rejected lets exit
status_text = "REJECTED"
if status == RequestStatus.Accepted:
status_text = "ACCEPTED"
log = f" {status_text}"
debug(log)
return status
except Exception as e:
error(f"Exception while running blocking request. {e}")
# escape the while loop
return status
@property
def searchable(self) -> bool:
msg = "`searchable` is deprecated please use `pointable` in future"
warning(msg, print=True)
warnings.warn(
msg,
DeprecationWarning,
)
return self._pointable
@searchable.setter
def searchable(self, value: bool) -> None:
msg = "`searchable` is deprecated please use `pointable` in future"
warning(msg, print=True)
warnings.warn(
msg,
DeprecationWarning,
)
self.pointable = value
@property
def pointable(self) -> bool:
return self._pointable
@pointable.setter
def pointable(self, value: bool) -> None:
if value != self._pointable:
self.update_searchability(not self._pointable)
def update_searchability(
self,
pointable: bool = True,
target_verify_key: Optional[VerifyKey] = None,
searchable: Optional[bool] = None,
) -> None:
"""Make the object pointed at pointable or not for other people. If
target_verify_key is not specified, the searchability for the VERIFYALL group
will be toggled.
:param pointable: If the target object should be made pointable or not.
:type target_verify_key: bool
:param target_verify_key: The verify_key of the client to which we want to give
search permission.
:type target_verify_key: Optional[VerifyKey]
"""
if searchable is not None:
warn_msg = "`searchable` is deprecated please use `pointable` in future"
warning(warn_msg, print=True)
warnings.warn(
warn_msg,
DeprecationWarning,
)
pointable = searchable
self._pointable = pointable
msg = ObjectSearchPermissionUpdateMessage(
add_instead_of_remove=pointable,
target_verify_key=target_verify_key,
target_object_id=self.id_at_location,
address=self.client.address,
)
self.client.send_immediate_msg_without_reply(msg=msg)
def check_access(self, node: AbstractNode, request_id: UID) -> any: # type: ignore
"""Method that checks the status of an already made request. There are three
possible outcomes when requesting access:
1. RequestStatus.Accepted - your request has been approved, you can not .get() your data.
2. RequestStatus.Pending - your request has not been reviewed yet.
3. RequestStatus.Rejected - your request has been rejected.
:param node: The node that queries the request status.
:type node: AbstractNode
:param request_id: The request on which you are querying the status.
:type request_id: UID
"""
# syft relative
from ..node.domain.service import RequestAnswerMessage
msg = RequestAnswerMessage(
request_id=request_id, address=self.client.address, reply_to=node.address
)
response = self.client.send_immediate_msg_with_reply(msg=msg)
return response.status
def __del__(self) -> None:
_client_type = type(self.client)
if (_client_type == Address) or issubclass(_client_type, AbstractNode):
# it is a serialized pointer that we receive from another client do nothing
return
if self.gc_enabled:
self.client.gc.apply(self)
| {
"repo_name": "OpenMined/PySyft",
"path": "packages/syft/src/syft/core/pointer/pointer.py",
"copies": "1",
"size": "21445",
"license": "apache-2.0",
"hash": -4318425704001569000,
"line_mean": 35.5332197615,
"line_max": 99,
"alpha_frac": 0.6073210539,
"autogenerated": false,
"ratio": 4.527126873548659,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009869609482060504,
"num_lines": 587
} |
""" A pointmass maze env."""
from gym.envs.mujoco import mujoco_env
from gym import utils
from d4rl import offline_env
from d4rl.pointmaze.dynamic_mjc import MJCModel
import numpy as np
import random
WALL = 10
EMPTY = 11
GOAL = 12
def parse_maze(maze_str):
lines = maze_str.strip().split('\\')
width, height = len(lines), len(lines[0])
maze_arr = np.zeros((width, height), dtype=np.int32)
for w in range(width):
for h in range(height):
tile = lines[w][h]
if tile == '#':
maze_arr[w][h] = WALL
elif tile == 'G':
maze_arr[w][h] = GOAL
elif tile == ' ' or tile == 'O' or tile == '0':
maze_arr[w][h] = EMPTY
else:
raise ValueError('Unknown tile type: %s' % tile)
return maze_arr
def point_maze(maze_str):
maze_arr = parse_maze(maze_str)
mjcmodel = MJCModel('point_maze')
mjcmodel.root.compiler(inertiafromgeom="true", angle="radian", coordinate="local")
mjcmodel.root.option(timestep="0.01", gravity="0 0 0", iterations="20", integrator="Euler")
default = mjcmodel.root.default()
default.joint(damping=1, limited='false')
default.geom(friction=".5 .1 .1", density="1000", margin="0.002", condim="1", contype="2", conaffinity="1")
asset = mjcmodel.root.asset()
asset.texture(type="2d",name="groundplane",builtin="checker",rgb1="0.2 0.3 0.4",rgb2="0.1 0.2 0.3",width=100,height=100)
asset.texture(name="skybox",type="skybox",builtin="gradient",rgb1=".4 .6 .8",rgb2="0 0 0",
width="800",height="800",mark="random",markrgb="1 1 1")
asset.material(name="groundplane",texture="groundplane",texrepeat="20 20")
asset.material(name="wall",rgba=".7 .5 .3 1")
asset.material(name="target",rgba=".6 .3 .3 1")
visual = mjcmodel.root.visual()
visual.headlight(ambient=".4 .4 .4",diffuse=".8 .8 .8",specular="0.1 0.1 0.1")
visual.map(znear=.01)
visual.quality(shadowsize=2048)
worldbody = mjcmodel.root.worldbody()
worldbody.geom(name='ground',size="40 40 0.25",pos="0 0 -0.1",type="plane",contype=1,conaffinity=0,material="groundplane")
particle = worldbody.body(name='particle', pos=[1.2,1.2,0])
particle.geom(name='particle_geom', type='sphere', size=0.1, rgba='0.0 0.0 1.0 0.0', contype=1)
particle.site(name='particle_site', pos=[0.0,0.0,0], size=0.2, rgba='0.3 0.6 0.3 1')
particle.joint(name='ball_x', type='slide', pos=[0,0,0], axis=[1,0,0])
particle.joint(name='ball_y', type='slide', pos=[0,0,0], axis=[0,1,0])
worldbody.site(name='target_site', pos=[0.0,0.0,0], size=0.2, material='target')
width, height = maze_arr.shape
for w in range(width):
for h in range(height):
if maze_arr[w,h] == WALL:
worldbody.geom(conaffinity=1,
type='box',
name='wall_%d_%d'%(w,h),
material='wall',
pos=[w+1.0,h+1.0,0],
size=[0.5,0.5,0.2])
actuator = mjcmodel.root.actuator()
actuator.motor(joint="ball_x", ctrlrange=[-1.0, 1.0], ctrllimited=True, gear=100)
actuator.motor(joint="ball_y", ctrlrange=[-1.0, 1.0], ctrllimited=True, gear=100)
return mjcmodel
LARGE_MAZE = \
"############\\"+\
"#OOOO#OOOOO#\\"+\
"#O##O#O#O#O#\\"+\
"#OOOOOO#OOO#\\"+\
"#O####O###O#\\"+\
"#OO#O#OOOOO#\\"+\
"##O#O#O#O###\\"+\
"#OO#OOO#OGO#\\"+\
"############"
LARGE_MAZE_EVAL = \
"############\\"+\
"#OO#OOO#OGO#\\"+\
"##O###O#O#O#\\"+\
"#OO#O#OOOOO#\\"+\
"#O##O#OO##O#\\"+\
"#OOOOOO#OOO#\\"+\
"#O##O#O#O###\\"+\
"#OOOO#OOOOO#\\"+\
"############"
MEDIUM_MAZE = \
'########\\'+\
'#OO##OO#\\'+\
'#OO#OOO#\\'+\
'##OOO###\\'+\
'#OO#OOO#\\'+\
'#O#OO#O#\\'+\
'#OOO#OG#\\'+\
"########"
MEDIUM_MAZE_EVAL = \
'########\\'+\
'#OOOOOG#\\'+\
'#O#O##O#\\'+\
'#OOOO#O#\\'+\
'###OO###\\'+\
'#OOOOOO#\\'+\
'#OO##OO#\\'+\
"########"
SMALL_MAZE = \
"######\\"+\
"#OOOO#\\"+\
"#O##O#\\"+\
"#OOOO#\\"+\
"######"
U_MAZE = \
"#####\\"+\
"#GOO#\\"+\
"###O#\\"+\
"#OOO#\\"+\
"#####"
U_MAZE_EVAL = \
"#####\\"+\
"#OOG#\\"+\
"#O###\\"+\
"#OOO#\\"+\
"#####"
OPEN = \
"#######\\"+\
"#OOOOO#\\"+\
"#OOGOO#\\"+\
"#OOOOO#\\"+\
"#######"
class MazeEnv(mujoco_env.MujocoEnv, utils.EzPickle, offline_env.OfflineEnv):
def __init__(self,
maze_spec=U_MAZE,
reward_type='dense',
reset_target=False,
**kwargs):
offline_env.OfflineEnv.__init__(self, **kwargs)
self.reset_target = reset_target
self.str_maze_spec = maze_spec
self.maze_arr = parse_maze(maze_spec)
self.reward_type = reward_type
self.reset_locations = list(zip(*np.where(self.maze_arr == EMPTY)))
self.reset_locations.sort()
self._target = np.array([0.0,0.0])
model = point_maze(maze_spec)
with model.asfile() as f:
mujoco_env.MujocoEnv.__init__(self, model_path=f.name, frame_skip=1)
utils.EzPickle.__init__(self)
# Set the default goal (overriden by a call to set_target)
# Try to find a goal if it exists
self.goal_locations = list(zip(*np.where(self.maze_arr == GOAL)))
if len(self.goal_locations) == 1:
self.set_target(self.goal_locations[0])
elif len(self.goal_locations) > 1:
raise ValueError("More than 1 goal specified!")
else:
# If no goal, use the first empty tile
self.set_target(np.array(self.reset_locations[0]).astype(self.observation_space.dtype))
self.empty_and_goal_locations = self.reset_locations + self.goal_locations
def step(self, action):
action = np.clip(action, -1.0, 1.0)
self.clip_velocity()
self.do_simulation(action, self.frame_skip)
self.set_marker()
ob = self._get_obs()
if self.reward_type == 'sparse':
reward = 1.0 if np.linalg.norm(ob[0:2] - self._target) <= 0.5 else 0.0
elif self.reward_type == 'dense':
reward = np.exp(-np.linalg.norm(ob[0:2] - self._target))
else:
raise ValueError('Unknown reward type %s' % self.reward_type)
done = False
return ob, reward, done, {}
def _get_obs(self):
return np.concatenate([self.sim.data.qpos, self.sim.data.qvel]).ravel()
def get_target(self):
return self._target
def set_target(self, target_location=None):
if target_location is None:
idx = self.np_random.choice(len(self.empty_and_goal_locations))
reset_location = np.array(self.empty_and_goal_locations[idx]).astype(self.observation_space.dtype)
target_location = reset_location + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)
self._target = target_location
def set_marker(self):
self.data.site_xpos[self.model.site_name2id('target_site')] = np.array([self._target[0]+1, self._target[1]+1, 0.0])
def clip_velocity(self):
qvel = np.clip(self.sim.data.qvel, -5.0, 5.0)
self.set_state(self.sim.data.qpos, qvel)
def reset_model(self):
idx = self.np_random.choice(len(self.empty_and_goal_locations))
reset_location = np.array(self.empty_and_goal_locations[idx]).astype(self.observation_space.dtype)
qpos = reset_location + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
if self.reset_target:
self.set_target()
return self._get_obs()
def reset_to_location(self, location):
self.sim.reset()
reset_location = np.array(location).astype(self.observation_space.dtype)
qpos = reset_location + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
pass
| {
"repo_name": "rail-berkeley/d4rl",
"path": "d4rl/pointmaze/maze_model.py",
"copies": "1",
"size": "8509",
"license": "apache-2.0",
"hash": 1773687547745133800,
"line_mean": 33.7306122449,
"line_max": 126,
"alpha_frac": 0.5296744623,
"autogenerated": false,
"ratio": 2.9908611599297013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8942080297645283,
"avg_score": 0.015691064916883823,
"num_lines": 245
} |
""" A point-to-point drawn polygon. """
# Enthought library imports.
from enable.primitives.api import Polygon
from traits.api import Int, Instance
from drawing_tool import DrawingTool
class PointPolygon(DrawingTool):
""" A point-to-point drawn polygon. """
# The actual polygon primitive we are editing.
polygon = Instance(Polygon, args=())
# The pixel distance from a vertex that is considered 'on' the vertex.
proximity_distance = Int(4)
# Override the default value of this inherited trait
draw_mode = "overlay"
# The index of the vertex being dragged, if any.
_dragged = Int
def reset(self):
self.polygon.model.reset()
self.event_state = "normal"
return
#------------------------------------------------------------------------
# "complete" state
#------------------------------------------------------------------------
def complete_draw(self, gc):
""" Draw a closed polygon. """
self.polygon.border_dash = None
self.polygon._draw_closed(gc)
return
def complete_left_down(self, event):
""" Handle the left mouse button coming up in the 'complete' state. """
# Ignore the click if it contains modifiers we do not handle.
polygon = self.polygon
if event.shift_down or event.alt_down:
event.handled = False
else:
# If we are over a point, we will either move it or remove it.
over = self._over_point(event, polygon.model.points)
if over is not None:
# Control down means remove it.
if event.control_down:
del polygon.model.points[over]
# Otherwise, prepare to drag it.
else:
self._dragged = over
event.window.set_pointer('right arrow')
self.event_state = 'drag_point'
self.request_redraw()
return
def complete_mouse_move(self, event):
""" Handle the mouse moving in the 'complete' state. """
# If we are over a point, then we have to prepare to move it.
over = self._over_point(event, self.polygon.model.points)
if over is not None:
if event.control_down:
event.window.set_pointer('bullseye')
else:
event.window.set_pointer('right arrow')
else:
event.handled = False
event.window.set_pointer('arrow')
self.request_redraw()
return
#------------------------------------------------------------------------
# "drag_point" state
#------------------------------------------------------------------------
def drag_point_draw(self, gc):
""" Draw the polygon in the 'drag_point' state. """
self.complete_draw(gc)
return
def drag_point_left_up(self, event):
""" Handle the left mouse coming up in the 'drag_point' state. """
self.event_state = 'complete'
self.request_redraw()
return
def drag_point_mouse_move(self, event):
""" Handle the mouse moving in the 'drag_point' state. """
# Only worry about the event if it's inside our bounds.
polygon = self.polygon
dragged_point = polygon.model.points[self._dragged]
# If the point has actually moved, update it.
if dragged_point != (event.x, event.y):
polygon.model.points[self._dragged] = \
(event.x + self.x, event.y - self.y)
self.request_redraw()
return
#------------------------------------------------------------------------
# "incomplete" state
#------------------------------------------------------------------------
def incomplete_draw(self, gc):
""" Draw the polygon in the 'incomplete' state. """
self.polygon.border_dash = (4.0, 2.0)
self.polygon._draw_open(gc)
return
def incomplete_left_dclick(self, event):
""" Handle a left double-click in the incomplete state. """
# Remove the point that was placed by the first mouse up, since
# another one will be placed on the up stroke of the double click.
del self.polygon.model.points[-1]
event.window.set_pointer('right arrow')
self.event_state = 'complete'
self.complete = True
self.request_redraw()
return
def incomplete_left_up(self, event):
""" Handle the left mouse button coming up in incomplete state. """
# If the click was over the start vertex, we are done.
if self._is_over_start( event ):
del self.polygon.model.points[-1]
self.event_state = 'complete'
event.window.set_pointer('right arrow')
self.complete = True
# Otherwise, add the point and move on.
else:
self.polygon.model.points.append((event.x + self.x, event.y - self.y))
self.request_redraw()
return
def incomplete_mouse_move(self, event):
""" Handle the mouse moving in incomplete state. """
# If we move over the initial point, then we change the cursor.
if self._is_over_start( event ):
event.window.set_pointer('bullseye')
else:
event.window.set_pointer('pencil')
# If the point has actually changed, then we need to update our model.
if self.polygon.model.points != (event.x + self.x, event.y - self.y):
self.polygon.model.points[-1] = (event.x + self.x, event.y - self.y)
self.request_redraw()
return
#------------------------------------------------------------------------
# "normal" state
#------------------------------------------------------------------------
def normal_left_up(self, event):
""" Handle the left button up in the 'normal' state. """
# Append the current point twice, because we need to have the starting
# point and the current point be separate, since the current point
# will be moved with the mouse from now on.
pt = (event.x + self.x, event.y - self.y)
self.polygon.model.points.append(pt)
self.polygon.model.points.append(pt)
self.event_state = 'incomplete'
return
def normal_mouse_move(self, event):
""" Handle the mouse moving in the 'normal' state. """
event.window.set_pointer('pencil')
return
#------------------------------------------------------------------------
# private methods
#------------------------------------------------------------------------
def _is_near_point(self, point, event):
""" Determine if the pointer is near a specified point. """
event_point = (event.x + self.x, event.y - self.y)
return ((abs(point[0] - event_point[0]) + \
abs(point[1] - event_point[1])) <= self.proximity_distance)
def _is_over_start(self, event):
""" Test if the event is 'over' the starting vertex. """
return (len(self.polygon.model.points) > 0 and
self._is_near_point(self.polygon.model.points[0], event))
def _over_point(self, event, points):
""" Return the index of a point in points that event is 'over'.
Returns none if there is no such point.
"""
for i, point in enumerate(points):
if self._is_near_point(point, event):
result = i
break
else:
result = None
return result
# EOF
| {
"repo_name": "tommy-u/enable",
"path": "enable/drawing/point_polygon.py",
"copies": "1",
"size": "7595",
"license": "bsd-3-clause",
"hash": 8934173843412647000,
"line_mean": 34.9952606635,
"line_max": 82,
"alpha_frac": 0.5249506254,
"autogenerated": false,
"ratio": 4.4182664339732405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014203496451758951,
"num_lines": 211
} |
""" A point-to-point drawn polygon. """
from __future__ import with_statement
from enable.api import cursor_style_trait, Line
from traits.api import Event, Int, Instance
from drawing_tool import DrawingTool
class PointLine(DrawingTool):
""" A point-to-point drawn line. """
# Our contained "Line" instance; it stores the points and does the actual
# drawing.
line = Instance(Line, args=())
# Override the draw_mode value we inherit from DrawingTool
draw_mode = "overlay"
# The pixel distance from a vertex that is considered 'on' the vertex.
proximity_distance = Int(4)
# The cursor shapes to use for various modes
normal_cursor = cursor_style_trait('arrow')
drawing_cursor = cursor_style_trait('pencil')
delete_cursor = cursor_style_trait('bullseye')
move_cursor = cursor_style_trait('sizing')
# The index of the vertex being dragged, if any.
_dragged = Int
complete = Event
def add_point(self, point):
""" Add the point. """
self.line.points.append(point)
return
def get_point(self, index):
""" Get the point at the specified index. """
return self.line.points[ index ]
def set_point(self, index, point):
""" Set the point at the specified index to point. """
self.line.points[index] = point
return
def remove_point(self, index):
""" Remove the point with the specified index. """
del self.line.points[index]
return
#------------------------------------------------------------------------
# DrawingTool interface
#------------------------------------------------------------------------
def reset(self):
self.line.points = []
self.event_state = "normal"
return
#------------------------------------------------------------------------
# "complete" state
#------------------------------------------------------------------------
def complete_draw(self, gc):
# Draw the completed line
self.line.line_dash = None
with gc:
self.line._draw_mainlayer(gc)
return
def complete_left_down(self, event):
""" Handle the left mouse button going down in the 'complete' state. """
# Ignore the click if it contains modifiers we do not handle.
if event.shift_down or event.alt_down:
event.handled = False
else:
# If we are over a point, we will either move it or remove it.
over = self._over_point(event, self.line.points)
if over is not None:
# Control down means remove it.
if event.control_down:
self.remove_point(over)
self.updated = self
# Otherwise, prepare to drag it.
else:
self._dragged = over
event.window.set_pointer(self.move_cursor)
self.event_state = 'drag_point'
self.request_redraw()
return
def complete_mouse_move(self, event):
""" Handle the mouse moving in the 'complete' state. """
# If we are over a point, then we have to prepare to move it.
over = self._over_point(event, self.line.points)
if over is not None:
if event.control_down:
event.window.set_pointer(self.delete_cursor)
else:
event.window.set_pointer(self.move_cursor)
else:
event.handled = False
event.window.set_pointer(self.normal_cursor)
self.request_redraw()
return
#------------------------------------------------------------------------
# "drag" state
#------------------------------------------------------------------------
def drag_point_draw(self, gc):
""" Draw the polygon in the 'drag_point' state. """
self.line._draw_mainlayer(gc)
return
def drag_point_left_up(self, event):
""" Handle the left mouse coming up in the 'drag_point' state. """
self.event_state = 'complete'
self.updated = self
return
def drag_point_mouse_move(self, event):
""" Handle the mouse moving in the 'drag_point' state. """
# Only worry about the event if it's inside our bounds.
dragged_point = self.get_point(self._dragged)
# If the point has actually moved, update it.
if dragged_point != (event.x, event.y):
self.set_point(self._dragged, (event.x, event.y))
self.request_redraw()
return
#------------------------------------------------------------------------
# "incomplete" state
#------------------------------------------------------------------------
def incomplete_draw(self, gc):
""" Draw the line in the 'incomplete' state. """
with gc:
gc.set_fill_color((0, 0, 0, 0))
gc.rect(50, 50, 100, 100)
self.line._draw_mainlayer(gc)
return
def incomplete_left_dclick(self, event):
""" Handle a left double-click in the incomplete state. """
# Remove the point that was placed by the first mouse down, since
# another one will be placed on the down stroke of the double click.
self.remove_point(-1)
event.window.set_pointer(self.move_cursor)
self.event_state = 'complete'
self.complete = True
self.request_redraw()
return
def incomplete_left_down(self, event):
""" Handle the left mouse button coming up in incomplete state. """
# Add the point.
self.add_point((event.x, event.y))
self.updated = self
return
def incomplete_mouse_move(self, event):
""" Handle the mouse moving in incomplete state. """
# If we move over the initial point, then we change the cursor.
event.window.set_pointer(self.drawing_cursor)
# If the point has actually changed, then we need to update our model.
if self.get_point(-1) != (event.x, event.y):
self.set_point(-1, (event.x, event.y))
self.request_redraw()
return
#------------------------------------------------------------------------
# "normal" state
#------------------------------------------------------------------------
def normal_left_down(self, event):
""" Handle the left button up in the 'normal' state. """
# Append the current point twice, because we need to have the starting
# point and the current point be separate, since the current point
# will be moved with the mouse from now on.
self.add_point((event.x, event.y))
self.add_point((event.x, event.y))
self.event_state = 'incomplete'
self.updated = self
self.line_dash = (4.0, 2.0)
return
def normal_mouse_move(self, event):
""" Handle the mouse moving in the 'normal' state. """
event.window.set_pointer(self.drawing_cursor)
return
#------------------------------------------------------------------------
# Private interface
#------------------------------------------------------------------------
def _updated_fired(self, event):
# The self.updated trait is used by point_line and can be used by
# others to indicate that the model has been updated. For now, the
# only action taken is to do a redraw.
self.request_redraw()
def _is_near_point(self, point, event):
""" Determine if the pointer is near a specified point. """
event_point = (event.x, event.y)
return ((abs( point[0] - event_point[0] ) + \
abs( point[1] - event_point[1] )) <= self.proximity_distance)
def _is_over_start(self, event):
""" Test if the event is 'over' the starting vertex. """
return (len(self.points) > 0 and
self._is_near_point(self.points[0], event))
def _over_point(self, event, points):
""" Return the index of a point in points that event is 'over'.
Returns None if there is no such point.
"""
for i, point in enumerate(points):
if self._is_near_point(point, event):
result = i
break
else:
result = None
return result
# EOF
| {
"repo_name": "tommy-u/enable",
"path": "enable/drawing/point_line.py",
"copies": "1",
"size": "8384",
"license": "bsd-3-clause",
"hash": 7033445797198004000,
"line_mean": 34.5254237288,
"line_max": 80,
"alpha_frac": 0.5213501908,
"autogenerated": false,
"ratio": 4.428948758584258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001253577677606507,
"num_lines": 236
} |
# A political problem
# Suppose that you are a politician trying to win an election. Your district has three
# different types of areas - urban, suburban, and rural. These areas have, respectively,
# 100,000, 200,000, and 50,000 registered voters. Although not all the registered voters
# actually go to the polls, you decide that to govern effectively, you would like at least
# half the registered voters in each of the three regions to vote for you. You are honorable
# and would never consider supporting policies in which you do not believe. You realize,
# however, that certain issues may be more effective in winning votes in certain places.
# Your primary issues are building more roads, gun control, farm subsidies, and a gasoline tax
# dedicated to improved public transit.
# According to your campaign staff's research, you can estimate how many votes you win or lose
# from each population segment by spending $1,000 on advertising on each issue.
# POLICY | URBAN SUBURBAN RURAL
# -----------------------------------------
# build roads | -2 5 3
# gun control | 8 2 -5
# farm subsidies | 0 0 10
# gasoline tax | 10 0 -2
#
# In this table, each entry indicates the number of thousands of either urban, suburban,
# or rural voters who would be won over by spending $1,000 on advertising in support of
# a particular issue. Negative entries denote votes that would be lost.
# Your task is to figure out the minimum amount of mony that you need to spend in order to win
# 50,000 urban votes, 10,000 suburban votes, and 25,000 rural votes.
from minizinc import *
m = Model()
x1 = m.Variable(0.0,10000.0)
x2 = m.Variable(0.0,10000.0)
x3 = m.Variable(0.0,10000.0)
x4 = m.Variable(0.0,10000.0)
m.Constraint( -2.0*x1 + 8.0*x2 + 0.0*x3 + 10.0*x4 >= 50.0,
5.0*x1 + 2.0*x2 + 0.0*x3 + 0.0*x4 >= 100.0,
3.0*x1 - 5.0*x2 + 10.0*x3 - 2.0*x4 >= 25.0,
)
m._debugprint()
m.mznmodel.set_time_limit(10)
m.minimize(x1+x2+x3+x4)
m.next()
print "Minimize cost = ", x1 + x2 + x3 + x4
print "where x1 = ", x1
print " x2 = ", x2
print " x3 = ", x3
print " x4 = ", x4
| {
"repo_name": "nathanielbaxter/libminizinc",
"path": "interfaces/python/policy.py",
"copies": "2",
"size": "2168",
"license": "mpl-2.0",
"hash": -8221732782836870000,
"line_mean": 39.9056603774,
"line_max": 94,
"alpha_frac": 0.667896679,
"autogenerated": false,
"ratio": 2.8229166666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4490813345666667,
"avg_score": null,
"num_lines": null
} |
'''
Created on Jul 22, 2014
@author: galena
This code implements proximal stochastic gradient descent with AdaGrad for very large, sparse, multilabel problems.
The weights are stored in a sparse matrix structure that permits changing of the sparsity pattern on the fly, via lazy computation of the iterated proximal operator.
AdaGrad can be applied with step-sizes shared for each feature over all labels (appropriate for large problems) or with individual step sizes for each feature/label
'''
import math
import numpy as np
import scipy.sparse as sp
import numpy.linalg as linalg
from scipy.stats import logistic
import sys, getopt, re, gzip
import cProfile, pstats, StringIO
import cPickle
# Print some information about a vector
def printStats(x):
print "max: " + str(np.amax(x)) + " min: " + str(np.amin(x)) + " mean: " + str(np.mean(x)) + " median: " + str(np.median(x))
# Compute nnz for a matrix
def nnz(A):
nr, nc = A.shape
return nr * nc - list(A.reshape(A.size)).count(0)
# Print the online loss "ol", test loss "tl", test f1 for each label set, and nnz(W)
def printOutputLine(subEpoch, wRows, wData, b, testX, testY, l1, l2, loss):
print str(epoch) + '-' + str(subEpoch),
loss = loss + getRegLoss(wData,l1,l2)
print "ol: %.15f" % loss,
if haveTestData:
testLoss, f1 = getLoss(testX, wRows,wData, b, testY)
print "tl: %f" % testLoss,
print "f1: %f" % f1,
macroF1 = getLossMacro(testX, wRows, wData, b, testY)
print "mf1: %f" % macroF1,
if l1 > 0:
nnz = sum([len(x) for x in wRows])
print "nnz_w: %d" % nnz,
print
# Get the next instance, either drawn uniformly at random
# or looping over the data. The sparse representation is returned
# X is assumed to be a csr_matrix
def getSample(X, t):
if usePerm:
row = perm[t % nr]
elif sampleWithReplacement:
row = np.random.randint(nr)
else:
row = t % nr
startRow = X.indptr[row]
endRow = X.indptr[row+1]
xInd = X.indices[startRow:endRow]
xVal = X.data[startRow:endRow]
return (row, xInd, xVal)
# vectorized computation of the iterated proximal map
# under the assumption that w is positive
# l1 and l2 may be arrays of the same dimensions as w,
# in which case k may also be an array, or it can be a constant
# if l1 and l2 are constants, it is assumed k is constant
def iteratedProx_pos(w, k, l1, l2):
result = np.ndarray(w.shape)
if isinstance(l2, np.ndarray):
i = l2 > 0
if i.sum() > 0:
a = 1.0 / (1.0 + l2[i])
if isinstance(k, np.ndarray):
aK = a ** k[i]
else:
aK = a ** k
result[i] = aK * w[i] - a * l1[i] * (1 - aK) / (1 - a)
i = ~i
if isinstance(k, np.ndarray):
result[i] = w[i]-k[i]*l1[i]
else:
result[i] = w[i]-k*l1[i]
else:
if l2 > 0:
a = 1.0 / (1.0 + l2)
aK = a ** k
result = aK * w - a * l1 * (1 - aK) / (1 - a)
else:
result = w - k*l1
return np.clip(result, 0.0, np.inf)
# vectorized computation of the proximal map
def prox(w, l1, l2):
if isinstance(l1, np.ndarray):
useL1 = (l1 > 0).sum() > 0
else:
useL1 = (l1 > 0)
if useL1:
v = np.abs(w) - l1
v = np.clip(v, 0, np.inf)
v *= np.sign(w) / (1 + l2)
return v
else:
return w / (1 + l2)
# vectorized computation of iterated proximal map
def iteratedProx(w, k, l1, l2):
neg = w < 0
w[neg] *= -1
res = iteratedProx_pos(w, k, l1, l2)
res[neg] *= -1
return res
# take dense "result" and store it sparsely in W
def reassignToConvertedW(wRows, wData, xInds, result):
for i in range(xInds.size):
xInd = xInds[i]
row = result[i,:]
wRows[xInd] = np.flatnonzero(row)
wData[xInd] = row[wRows[xInd]]
# update all rows of W to incorporate proximal mappings
def bringAllUpToDate(wRows, wData, tVec, t):
nc = tVec.size
for feat in range(nc):
k = t - tVec[feat]
if useAdaGrad:
if useSharedStep:
etaVec = eta/(1+np.sqrt(n[feat]))
else:
etaVec = eta/(1+np.sqrt(n[feat,:]))
else:
etaVec = eta
wData[feat] = iteratedProx(wData[feat], k, l1*etaVec, l2*etaVec)
#sparsify
nz = np.flatnonzero(wData[feat])
wRows[feat] = wRows[feat][nz]
wData[feat] = wData[feat][nz]
# train weights with proximal stochastic gradient (optionally AdaGrad)
def trainProx(wRows, wData, n, b, X, y, eta, l1, l2, outputFreq):
nr,nc = X.shape
nl = y.shape[1]
assert y.shape[0] == nr
assert b.size == nl
if useAdaGrad:
if useSharedStep:
assert n.size == nc
else:
assert n.shape == (nc,nl)
# vector of time step at which each coordinate is up-to-date
tVec = np.zeros(nc, dtype=np.int64)
onlineLoss = 0
totalOnlineLoss = 0
subEpoch = 0
for t in range(nr):
if t % 100 == 0:
print "training row: " + str(t)
(row, xInds, xVals) = getSample(X, t)
if xInds.size == 0:
continue
# 1. Lazily update relevant rows of w, storing them in tempW
totalNnzW = sum(wRows[xInd].size for xInd in xInds)
tempW = np.ndarray(totalNnzW)
kVec = np.ndarray(totalNnzW, dtype=np.int64)
if useAdaGrad:
etaVec = np.ndarray(totalNnzW)
else:
etaVec = eta
pos = 0
for xInd in xInds:
numW = wRows[xInd].size
endPos = pos+numW
kVec[pos:endPos] = t - tVec[xInd]
if useAdaGrad:
if useSharedStep:
etaVec[pos:endPos] = eta / (1 + math.sqrt(n[xInd]))
else:
etaVec[pos:endPos] = eta / (1 + np.sqrt(n[xInd,wRows[xInd]]))
tempW[pos:endPos] = wData[xInd]
pos = endPos
tempW = iteratedProx(tempW, kVec, l1*etaVec, l2*etaVec)
tVec[xInds] = t
# 2. Compute scores
scores = b.copy()
pos = 0
for (xInd, xVal) in zip(xInds, xVals):
numW = wRows[xInd].size
endPos = pos+numW
scores[wRows[xInd]] += tempW[pos:endPos] * xVal
pos = endPos
# 3. Compute loss and subtract labels from (transformed) scores for gradient
(startY, endY) = y.indptr[row], y.indptr[row+1]
yCols = y.indices[startY:endY]
yVals = y.data[startY:endY]
if useSqErr:
# linear probability model
# quadratic loss for incorrect prediction, no penalty for invalid (out of range) correct prediction
scores[yCols] = yVals - scores[yCols]
scores = np.clip(scores, 0, np.inf)
scores[yCols] *= -1
loss = 0.5 * np.dot(scores, scores)
onlineLoss += loss
totalOnlineLoss += loss
else:
pos = logistic.logcdf(scores)
neg = logistic.logcdf(-scores)
pos -= neg
scores = logistic.cdf(scores)
loss = -np.dot(pos[yCols], yVals)-neg.sum()
scores[yCols] -= yVals
onlineLoss += loss
totalOnlineLoss += loss
# 4. Compute gradient as outer product
# this will be dense in general, unfortunately
g = np.outer(xVals, scores)
# 5. Compute updated point (store it in g)
if useAdaGrad:
if useSharedStep:
n[xInds] += np.square(g).sum(1)
etaVec = np.tile(eta/(1+np.sqrt(n[xInds])), (nl,1)).T
else:
n[xInds,:] += np.square(g)
etaVec = eta/(1+np.sqrt(n[xInds,:]))
else:
etaVec = eta
g *= -etaVec
pos = 0
for xI in range(xInds.size):
xInd = xInds[xI]
numW = wRows[xInd].size
endPos = pos+numW
g[xI,wRows[xInd]] += tempW[pos:endPos]
pos = endPos
# 6. Sparsify updated point and store it back to W
# now g holds dense (over labels) W - eta*g
reassignToConvertedW(wRows, wData, xInds, g)
# Print output periodically
if (t+1) % outputFreq == 0:
bringAllUpToDate(wRows, wData, tVec, t+1)
tVec = np.tile(t+1, nc)
printOutputLine(subEpoch, wRows, wData, b, testX, testY, l1, l2, onlineLoss / outputFreq)
subEpoch += 1
onlineLoss = 0
# print output for whole epoch
if nr % outputFreq != 0: # otherwise we are already up to date
bringAllUpToDate(wRows, wData, tVec, nr)
printOutputLine("*", wRows, wData, b, testX, testY, l1, l2, totalOnlineLoss / nr)
print
# Compute regularization value
def getRegLoss(wData, l1, l2):
val = 0
for row in wData:
val += l1 * linalg.norm(row,1)
val += l2 / 2 * np.dot(row,row)
return val
# compute the loss and example-based F1
def getLoss(X, wRows, wData, b, y):
nr,nc = X.shape
assert y.shape == (nr,nl)
assert wRows.size == wData.size == nc
loss = 0
scores = np.ndarray(nl)
classes = np.ndarray(nl)
if useSqErr:
thresh = 0.3
else:
thresh = math.log(0.3 / 0.7)
totalF1 = 0
for r in range(nr):
startRow, endRow = X.indptr[r], X.indptr[r+1]
xInds = X.indices[startRow:endRow]
xVals = X.data[startRow:endRow]
rowLen = endRow - startRow
scores = np.zeros(nl)
for (ind, val) in zip(xInds, xVals):
weightVals = wData[ind]
weightInds = wRows[ind]
scores[weightInds] += val * weightVals
scores += b
positives = scores > thresh
startRow, endRow = y.indptr[r], y.indptr[r+1]
yInds = y.indices[startRow:endRow]
yVals = y.data[startRow:endRow]
if useSqErr:
scores[yInds] = yVals - scores[yInds]
scores = np.clip(scores, 0, np.inf)
scores[yInds] *= -1
loss += 0.5 * np.dot(scores, scores)
else:
pos = logistic.logcdf(scores)
neg = logistic.logcdf(-scores)
pos -= neg
loss += (-pos[yInds].dot(yVals)-neg.sum())
tp = positives[yInds].sum()
fn = (~positives)[yInds].sum()
fp = positives.sum() - tp # tp + fp = p
if tp > 0:
totalF1 += (2.0 * tp) / (2.0 * tp + fn + fp)
elif fn + fp == 0:
totalF1 += 1
loss /= nr
f1Arr = totalF1 / nr
return loss, f1Arr
# Get macro F1 and optionally output per-label F1 and label frequencies to file
def getLossMacro(X, wRows, wData, b, y, outputFilename=""):
nr,nc = X.shape
assert y.shape == (nr,nl)
assert wRows.size == wData.size == nc
if useSqErr:
thresh = 0.3
else:
thresh = math.log(0.3 / 0.7)
tp = np.zeros(nl, dtype="int")
fp = np.zeros(nl, dtype="int")
fn = np.zeros(nl, dtype="int")
sZeros = 0
for r in range(nr):
startRow, endRow = X.indptr[r], X.indptr[r+1]
xInds = X.indices[startRow:endRow]
xVals = X.data[startRow:endRow]
rowLen = endRow - startRow
scores = np.zeros(nl)
for (ind, val) in zip(xInds, xVals):
weightVals = wData[ind]
weightInds = wRows[ind]
scores[weightInds] += val * weightVals
sZeros = (scores == 0).sum()
scores += b
positives = scores > thresh
startRow, endRow = y.indptr[r], y.indptr[r+1]
yVals = y.indices[startRow:endRow]
truth = np.zeros(nl, dtype="bool")
truth[yVals] = True
tps = np.logical_and(truth, positives)
tp[tps] += 1
fps = np.logical_and(~truth, positives)
fp[fps] += 1
fns = np.logical_and(truth, ~positives)
fn[fns] += 1
nonZeros = tp > 0
f1 = np.zeros(nl)
f1[nonZeros] = (2.0 * tp[nonZeros]) / (2.0 * tp[nonZeros] + fp[nonZeros] + fn[nonZeros])
goodZeros = np.logical_and(tp == 0, np.logical_and(fp == 0, fn == 0))
f1[goodZeros] = 1
macroF1 = np.average(f1)
if outputFilename != "":
labFreq = y.sum(0).getA1() / nr
with open(outputFilename, "w") as outputFile:
for (freq, f1val) in zip(labFreq, f1):
outputFile.write(str(freq) + "\t" + str(f1val) + "\n")
return macroF1
# split a csr_matrix into two
def split(indptr, indices, data, splitPoint):
nc = indices.max() + 1
nr = indptr.size - 1
testIndptr = indptr[splitPoint:].copy()
beginTestIdx = testIndptr[0]
testIndices = indices[beginTestIdx:]
testData = data[beginTestIdx:]
testIndptr -= beginTestIdx
indptr = indptr[:splitPoint+1]
indices = indices[:beginTestIdx]
data = data[:beginTestIdx]
train = sp.csr_matrix((data, indices, indptr), (splitPoint, nc))
test = sp.csr_matrix((testData, testIndices, testIndptr), (nr - splitPoint, nc))
return train, test
# read data formatted for bioASQ
def makeBioASQData(dataFilename, testDataFilename, trainN, trainFrac, labelFrac, testN):
assert 0 <= trainFrac <= 1
assert not ((testDataFilename == "") and (testN == 0))
if dataFilename.endswith(".gz"):
datafile = gzip.open(dataFilename)
else:
datafile = open(dataFilename)
nr = 0
numVals = 0
numLabVals = 0
keeperCounter = 0
featCounts = {}
line_process_counter = 0
for line in datafile:
line_process_counter += 1
if line_process_counter % 100 == 0:
print "pass 1 of 4: " + str(line_process_counter)
keeperCounter += trainFrac
if keeperCounter < 1:
continue
else:
keeperCounter -= 1
splitLine = line.split('\t')
assert (len(splitLine) == 2)
feats = set(splitLine[0].split(' '))
numVals += len(feats)
for feat in feats:
intFeat = int(feat)
if intFeat in featCounts:
featCounts[intFeat] += 1
else:
featCounts[intFeat] = 1
numLabVals += splitLine[1].count(' ') + 1
nr += 1
if nr == trainN: break
datafile.close()
print "Made it past reading data file"
Xdata = np.ndarray(numVals)
Xindices = np.ndarray(numVals, dtype='int64')
Xindptr = np.ndarray(nr+1, dtype="int64")
Xindptr[0] = 0
Ydata = np.ndarray(numLabVals)
Yindices = np.ndarray(numLabVals, dtype='int64')
Yindptr = np.ndarray(nr+1, dtype="int64")
Yindptr[0] = 0
insNum = 0
featIdx = 0
labIdx = 0
keeperCounter = 0
def addFeat(indices, data, idx, feat, count):
indices[idx] = feat
adjCount = featCounts[feat] - 0.5 #absolute discounting
data[idx] = math.log1p(count) * math.log(float(nr) / adjCount)
def addIns(splitFeats, idx, indices, data):
intFeats = []
for strFeat in splitFeats:
intFeats.append(int(strFeat))
intFeats.sort()
startIdx = idx
# add feats, using log(1+count) * log(nr/totalCount) as feature value
count = 0
currFeat = -1
for feat in intFeats:
if feat != currFeat:
if currFeat in featCounts:
addFeat(indices, data, idx, currFeat, count)
idx +=1
count = 1
else:
count += 1
currFeat = feat
if currFeat in featCounts:
addFeat(indices, data, idx, currFeat, count)
idx += 1
# normalize to unit 2-norm
xVec = data[startIdx:idx]
xVec /= linalg.norm(xVec)
return idx
if dataFilename.endswith(".gz"):
datafile = gzip.open(dataFilename)
else:
datafile = open(dataFilename)
print "second datafile loop"
second_line_counter = 0
for line in datafile:
second_line_counter += 1
if second_line_counter % 100 == 0:
print "pass 2 of 4: " + str(second_line_counter)
keeperCounter += trainFrac
if keeperCounter < 1:
continue
else:
keeperCounter -= 1
splitLine = line.split('\t')
assert (len(splitLine) == 2)
# extract feats as integers and sort
splitFeats = splitLine[0].split(' ')
featIdx = addIns(splitFeats, featIdx, Xindices, Xdata)
Xindptr[insNum+1] = featIdx
# same stuff with labels (here there should be only 1 per line)
splitLabels = splitLine[1].split(' ')
intLabels = []
for strLab in splitLabels:
intLabels.append(int(strLab))
intLabels.sort()
numLabels = len(intLabels)
endLabIdx = labIdx + numLabels
Yindices[labIdx:endLabIdx] = intLabels
Ydata[labIdx:endLabIdx] = np.ones(numLabels)
Yindptr[insNum+1] = endLabIdx
labIdx = endLabIdx
insNum += 1
if insNum == trainN: break
datafile.close()
assert insNum == nr
if testDataFilename != "":
if testDataFilename.endswith(".gz"):
datafile = gzip.open(testDataFilename)
else:
datafile = open(testDataFilename)
testNumVals = 0
testNumLabVals = 0
testNR = 0
third_line_counter = 0
for line in datafile:
third_line_counter += 1
if third_line_counter % 100 == 0:
print "pass 3 of 4: " + str(third_line_counter)
splitLine = line.split('\t')
assert (len(splitLine) == 2)
feats = set(splitLine[0].split(' '))
for feat in feats:
if int(feat) in featCounts:
testNumVals += 1
testNumLabVals += splitLine[1].count(' ') + 1
testNR += 1
if testNR == testN: break
datafile.close()
testXdata = np.ndarray(testNumVals)
testXindices = np.ndarray(testNumVals, dtype='int64')
testXindptr = np.ndarray(testNR+1, dtype="int64")
testXindptr[0] = 0
testYdata = np.ndarray(testNumLabVals)
testYindices = np.ndarray(testNumLabVals, dtype='int64')
testYindptr = np.ndarray(testNR+1, dtype="int64")
testYindptr[0] = 0
insNum = 0
featIdx = 0
labIdx = 0
if testDataFilename.endswith(".gz"):
datafile = gzip.open(testDataFilename)
else:
datafile = open(testDataFilename)
fourth_line_count = 0
for line in datafile:
fourth_line_count += 1
if fourth_line_count % 100 == 0:
print "pass 4 of 4: " + str(fourth_line_count)
splitLine = line.split('\t')
assert (len(splitLine) == 2)
# extract feats as integers and sort
splitFeats = splitLine[0].split(' ')
featIdx = addIns(splitFeats, featIdx, testXindices, testXdata)
testXindptr[insNum+1] = featIdx
# same stuff with labels (here there should be only 1 per line)
splitLabels = splitLine[1].split(' ')
intLabels = []
for strLab in splitLabels:
intLabels.append(int(strLab))
intLabels.sort()
numLabels = len(intLabels)
endLabIdx = labIdx + numLabels
testYindices[labIdx:endLabIdx] = intLabels
testYdata[labIdx:endLabIdx] = np.ones(numLabels)
testYindptr[insNum+1] = endLabIdx
labIdx = endLabIdx
insNum += 1
if insNum == testN: break
datafile.close()
assert insNum == testNR
numFeats = max(featCounts.keys()) + 1
print "setting CSR matrices before returning"
X = sp.csr_matrix((Xdata, Xindices, Xindptr), (nr, numFeats))
testX = sp.csr_matrix((testXdata, testXindices, testXindptr), (testNR, numFeats))
numLab = max(Yindices.max(), testYindices.max()) + 1
y = sp.csr_matrix((Ydata, Yindices, Yindptr), (nr, numLab))
testY = sp.csr_matrix((testYdata, testYindices, testYindptr), (testNR, numLab))
else:
beginTest = nr - testN
X, testX = split(Xindptr, Xindices, Xdata, beginTest)
y, testY = split(Yindptr, Yindices, Ydata, beginTest)
if trainN < np.inf:
# compact to remove all zero features and labels
# for testing only
featTotals = X.sum(0).getA1() + testX.sum(0).getA1()
nonZero = featTotals > 0
nzCount = nonZero.sum()
print "Removing %d zero features" % (nonZero.size - nzCount)
X = sp.csr_matrix(X.todense()[:,nonZero])
testX = sp.csr_matrix(testX.todense()[:,nonZero])
labTotals = y.sum(0).getA1() + testY.sum(0).getA1()
nonZero = labTotals > 0
nzCount = nonZero.sum()
print "Removing %d zero labels" % (nonZero.size - nzCount)
y = sp.csr_matrix(y.todense()[:,nonZero])
testY = sp.csr_matrix(testY.todense()[:,nonZero])
# remove infrequent labels
if labelFrac < 1:
labCounts = y.sum(0).getA1()
percentile = np.percentile(labCounts, (1-labelFrac)*100)
keepLabs = np.where(labCounts > percentile)[0]
y = y[:,keepLabs]
testY = testY[:,keepLabs]
return X, y, testX, testY
# set default values before reading command line
l1 = 0
l2 = 0
useBias = False
useAdaGrad = False
useSharedStep = False
profile = False
sampleWithReplacement = False
useSqErr = False
usePerm = False
useScaledAdaGrad = False
eta = 1
epochs=10
dataFilename = ""
testDataFilename = ""
modelOutputFile = ""
modelInputFile = ""
maxN=np.inf
testN=0
outFreq=np.inf
trainFrac=1
labelFrac=1
usage = """options:
-a: use AdaGrad
-r: sample with replacement (not looping over the data)
-p: choose new permutation for each pass
-d: data file (tsv format, may be gzipped, based on extension)
-b: add fixed bias term based on base rates for each label
-q: use squared error (default is logistic)
-s: use shared AdaGrad step sizes for all labels
-n: use prefix of data of this size
-t: read at most this many test instances
-T: number of training epochs
-o: output frequency (if smaller than one epoch)
long options:
--l1: weight for l1 regularization (default: 0)
--l2: weight for l2 regularization (default: 0)
--eta: step size (default: 1e-1)
--profile: turn on profiling
--trainFrac: fraction of train instances to keep
--labelFrac: fraction of labels to keep
--testD: test data file
--outputFile: file to write model to
--inputFile: file to read model from (no model will be trained)
--scaledAdaGrad: scale AdaGrad step by sqrt(# labels)
"""
try:
opts, args = getopt.getopt(sys.argv[1:],
"arqt:n:T:bpsd:o:",
["l1=","l2=","eta=","profile",
"trainFrac=", "labelFrac=", "testD=",
"outputFile=", "inputFile=", "scaledAdaGrad"])
except getopt.GetoptError:
print usage
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print usage
sys.exit()
elif opt == '-s':
useSharedStep = True
elif opt == '-a':
useAdaGrad = True
elif opt == '-r':
sampleWithReplacement = True
elif opt == '-q':
useSqErr = True
elif opt == '-p':
usePerm = True
elif opt == '-b':
useBias = True
elif opt == '-d':
dataFilename = arg
elif opt == '--testD':
testDataFilename = arg
elif opt == '-n':
maxN = int(arg)
assert 0 < maxN
elif opt == '-t':
testN = int(arg)
assert 0 <= testN
elif opt == '-o':
outFreq = int(arg)
assert 0 < outFreq
elif opt == '-T':
epochs = int(arg)
assert 0 < epochs
elif opt == '--l1':
l1 = float(arg)
assert 0 <= l1
elif opt == '--l2':
l2 = float(arg)
assert 0 <= l2
elif opt == '--scaledAdaGrad':
useScaledAdaGrad = True
elif opt == '--eta':
eta = float(arg)
assert 0 < eta
elif opt == '--trainFrac':
trainFrac = float(arg)
assert 0 < trainFrac
elif opt == '--outputFile':
modelOutputFile = arg
elif opt == '--inputFile':
modelInputFile = arg
elif opt == '--labelFrac':
labelFrac = float(arg)
assert 0 < labelFrac
elif opt == '--profile':
profile = True
# can't turn on shared step without AdaGrad
assert useAdaGrad or not useSharedStep
# can't turn on scaled AdaGrad without shared step
assert useSharedStep or not useScaledAdaGrad
# can't both train a model and read pre-trained model
assert not (modelOutputFile and modelInputFile)
print "Running with options:"
if len(dataFilename) > 0:
print "data filename: " + dataFilename
print "useAdaGrad: " + str(useAdaGrad)
print "useSharedStep: " + str(useSharedStep)
print "useScaledAdaGrad: " + str(useScaledAdaGrad)
print "sampleWithReplacement: " + str(sampleWithReplacement)
print "useSqErr: " + str(useSqErr)
print "use fixed bias: " + str(useBias)
print "usePerm: " + str(usePerm)
print "epochs: " + str(epochs)
if maxN < np.inf:
print "n: " + str(maxN)
if testN < np.inf:
print "testN: " + str(testN)
if outFreq < np.inf:
print "outputFreq: " + str(outFreq)
print "l1: %e" % l1
print "l2: %e" % l2
print "eta: %e" % eta
if trainFrac < 1:
print "trainFrac: %e" % trainFrac
if labelFrac < 1:
print "labelFrac: %e" % labelFrac
if modelOutputFile != "":
print "modelOutputFile: " + modelOutputFile
if modelInputFile != "":
print "modelInputFile: " + modelInputFile
print
# X, y, testX, testY = makeArtificialDataMulti(3, maxN, 50, 0.2, 123, testN)
# haveTestData = True
# X, y, testX, testY = makeMNISTdata(maxN, 123)
# haveTestData = True
np.random.seed(123)
X, y, testX, testY = makeBioASQData(dataFilename, testDataFilename, maxN, trainFrac, labelFrac, testN)
haveTestData = True
print ("pre-processing returned")
f_X = open("X.pickle", "w")
f_y = open("y.pickle", "w")
f_testX = open("testX.pickle", "w")
f_testY = open("testY.pickle", "w")
cPickle.dump(X, f_X)
cPickle.dump(y, f_y)
cPickle.dump(testX, f_testX)
cPickle.dump(testY, f_testY )
f_X.close()
f_y.close()
f_testX.close()
f_testY.close()
print ("wrote files to disk")
nr,nc = X.shape
nl = y.shape[1]
print str(nr) + " train instances, " + str(testX.shape[0]) + " test instances, " + str(nc) + " features, " + str(nl) + " labels."
print str(nc * nl) + " total weights."
posFrac = y.sum() / (nr * nl)
print "%f nnz feats, " % (1. * X.size / (nr * nc)),
print "%f nnz labels" % posFrac
# w represents the weight vector
wRows, wData = np.ndarray(nc, dtype=object), np.ndarray(nc, dtype=object)
for c in range(nc):
wRows[c] = np.ndarray(0, np.dtype(int))
wData[c] = np.ndarray(0, np.dtype(float))
# b is the bias
b = np.zeros(nl)
if useBias:
if useSqErr:
b = y.sum(0) / nr
else:
# set bias using base rate with add-one smoothing
b = (y.sum(0) + 1.) / (nr + 2.)
b = np.log(b/(1-b))
if isinstance(b,np.matrix):
b = b.getA1()
if useAdaGrad:
# n is the sum of squared gradients, used by AdaGrad
if useSharedStep:
n = np.zeros(nc)
else:
n = np.zeros((nc,nl))
if useScaledAdaGrad:
eta *= math.sqrt(nl)
if profile:
pr = cProfile.Profile()
pr.enable()
if modelInputFile == "":
for epoch in range(epochs+1):
if epoch == epochs:
break
if usePerm:
perm = np.random.permutation(nr)
print "beginning traning"
trainProx(wRows, wData, n, b, X, y, eta, l1, l2, outFreq)
print "done training"
if modelOutputFile != "":
np.savez_compressed(modelOutputFile, b=b, wRows=wRows, wData=wData)
else:
print "Loading input file: ", modelInputFile
data = np.load(modelInputFile)
b = data['b']
wRows = data['wRows']
wData = data['wData']
print "Training set:"
testLoss, f1 = getLoss(X, wRows, wData, b, y)
print "loss: %f" % testLoss
print "per-example f1: %f" % f1
f1 = getLossMacro(X, wRows, wData, b, y, "trainMacroF1")
print "macro F1: ", f1
print "Test set:"
testLoss, f1 = getLoss(testX, wRows, wData, b, testY)
print "loss: %f" % testLoss
print "per-example f1: %f" % f1
f1 = getLossMacro(testX, wRows, wData, b, testY, "testMacroF1")
print "Test macro F1: ", f1
# testLoss, testF1 = getLoss(testX, wRows, wData, b, testY)
#
# print "Test loss: ", testLoss
# print "Test F1: ", testF1
if profile:
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print s.getvalue()
| {
"repo_name": "zackchase/sparse-multilabel-sgd",
"path": "src/sgd2.py",
"copies": "1",
"size": "29756",
"license": "mit",
"hash": -1235638635385683500,
"line_mean": 28.9356136821,
"line_max": 165,
"alpha_frac": 0.5569969082,
"autogenerated": false,
"ratio": 3.320611538890749,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43776084470907484,
"avg_score": null,
"num_lines": null
} |
# Apollonius Problem:
# Finde einen Kreis, der drei vorgegebene Kreise von innen berührt
# rekursive Erweiterung
from tkinter import *
import random
MAX_ITERATIONS = 3000
class Apollonius:
def __init__(self):
self.root = Tk()
self.root.title("HIB - Apollonius Problem")
SIZE = 600
self.c = Canvas(self.root,width=SIZE+6,height=SIZE+6,bg="#e0e0e0")
self.c.pack()
self.mid = SIZE/2+3
# plot circle
def circle(self,x,y,r,c=0):
scale = self.mid
if c != 0: color = "#ff0000"
else: color = "#0000ff"
self.c.create_oval(self.mid+scale*(x-r),self.mid+scale*(y-r),
self.mid+scale*(x+r),self.mid+scale*(y+r),outline=color )
self.c.update()
def start(self,depth=4,shrink=100.0):
D = 4
h = D/2.0*(3**0.5)
x1,y1,r1 = -D/2,-h/3+h/12,D/2
x2,y2,r2 = D/2,-h/3+h/12,D/2
x3,y3,r3 = 0.0,h*2/3+h/12,D/2
self.circle(x1,y1,r1,1)
self.circle(x2,y2,r2,1)
self.circle(x3,y3,r3,1)
self.solution = []
self.minimal_r = float(D)/shrink
self.recurse((x1,y1,r1),(x2,y2,r2),(x3,y3,r3),depth)
print(len(self.solution),"circles found")
def recurse(self,s1,s2,s3,depth):
if depth==0: return
x1,y1,r1 = s1
x2,y2,r2 = s2
x3,y3,r3 = s3
## new radius
r = (r1*r2*r3)/(r1*r2+r2*r3+r1*r3+2*(r1*r2*r3*(r1+r2+r3))**0.5)
## new center
a,a1 = 2*(x1-x2),2*(x1-x3)
b,b1 = 2*(y1-y2),2*(y1-y3)
c,c1 = 2*(r1-r2),2*(r1-r3)
d,d1 = (x1**2+y1**2-r1**2)-(x2**2+y2**2-r2**2),(x1**2+y1**2-r1**2)-(x3**2+y3**2-r3**2)
x = (b1*d-b*d1-b1*c*r+b*c1*r)/(a*b1-b*a1)
y = (-a1*d+a*d1+a1*c*r-a*c1*r)/(a*b1-b*a1)
s = (x,y,r)
if r >= self.minimal_r:
self.circle(x,y,r)
self.solution.append(s)
self.recurse(s1,s2,s,depth-1)
self.recurse(s1,s,s3,depth-1)
self.recurse(s,s2,s3,depth-1)
# save list of spheres as povray union object
def save(self,name):
if self.solution == []: return
f = open(name,"w")
f.write("// automatic include file from apollonius.py\n\n")
f.write("#declare ApolloObject = union {\n")
for x,y,r in self.solution:
f.write(" sphere {<%f,%f,%f>,%f}\n"%(x,0.0,y,r))
f.write("}\n")
f.close()
a = Apollonius()
a.start(10,500) # perhaps depth 12, shrink factor 1000
a.save("./apollo.inc") | {
"repo_name": "kantel/Virtuelle-Wunderkammer",
"path": "sources/apollonius/apollonius.py",
"copies": "1",
"size": "2559",
"license": "mit",
"hash": 7869500383131071000,
"line_mean": 29.8313253012,
"line_max": 94,
"alpha_frac": 0.5129007037,
"autogenerated": false,
"ratio": 2.3382084095063984,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8158631154141752,
"avg_score": 0.038495591812929275,
"num_lines": 83
} |
"""A PolyData file reader object.
"""
# Author: R.Sreekanth <sreekanth [at] aero.iitb.ac.in>
# Suyog Dutt Jain <suyog.jain [at] aero.iitb.ac.in>
# Copyright (c) 2009-2015, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import basename
# Enthought imports.
from traits.api import Instance, Str,Dict
from traitsui.api import View, Item, Group, Include
from tvtk.api import tvtk
# Local imports
from mayavi.core.file_data_source import FileDataSource
from mayavi.core.pipeline_info import PipelineInfo
from mayavi.core.common import error
########################################################################
# `PolyDataReader` class
########################################################################
class PolyDataReader(FileDataSource):
"""A PolyData file reader. The reader supports all the
different types of poly data files.
"""
# The version of this class. Used for persistence.
__version__ = 0
# The PolyData file reader
reader = Instance(tvtk.Object, allow_none=False,
record=True)
######################################################################
# Private Traits
_reader_dict = Dict(Str, Instance(tvtk.Object))
# Our View.
view = View(Group(Include('time_step_group'),
Item(name='base_file_name'),
Item(name='reader',
style='custom',
resizable=True),
show_labels=False),
resizable=True)
#output_info = PipelineInfo(datasets=['none'])
output_info = PipelineInfo(datasets=['poly_data'],
attribute_types=['any'],
attributes=['any'])
######################################################################
# `object` interface
######################################################################
def __set_pure_state__(self, state):
# The reader has its own file_name which needs to be fixed.
state.reader.file_name = state.file_path.abs_pth
# Now call the parent class to setup everything.
super(PolyDataReader, self).__set_pure_state__(state)
######################################################################
# `FileDataSource` interface
######################################################################
def update(self):
self.reader.update()
if len(self.file_path.get()) == 0:
return
self.render()
def has_output_port(self):
""" Return True as the reader has output port."""
return True
def get_output_object(self):
""" Return the reader output port."""
return self.reader.output_port
######################################################################
# Non-public interface
######################################################################
def _file_path_changed(self, fpath):
value = fpath.get()
if len(value) == 0:
return
# Extract the file extension
splitname = value.strip().split('.')
extension = splitname[-1].lower()
# Select polydata reader based on file type
old_reader = self.reader
if extension in self._reader_dict:
self.reader = self._reader_dict[extension]
else:
error('Invalid extension for file: %s'%value)
return
self.reader.file_name = value.strip()
self.reader.update()
self.reader.update_information()
if old_reader is not None:
old_reader.on_trait_change(self.render, remove=True)
self.reader.on_trait_change(self.render)
old_outputs = self.outputs
self.outputs = [self.reader.output]
if self.outputs == old_outputs:
self.data_changed = True
# Change our name on the tree view
self.name = self._get_name()
def _get_name(self):
""" Returns the name to display on the tree view. Note that
this is not a property getter.
"""
fname = basename(self.file_path.get())
ret = "%s"%fname
if len(self.file_list) > 1:
ret += " (timeseries)"
if '[Hidden]' in self.name:
ret += ' [Hidden]'
return ret
def __reader_dict_default(self):
"""Default value for reader dict."""
rd = {'stl':tvtk.STLReader(),
'stla':tvtk.STLReader(),
'stlb':tvtk.STLReader(),
'txt':tvtk.SimplePointsReader(),
'raw':tvtk.ParticleReader(),
'ply':tvtk.PLYReader(),
'pdb':tvtk.PDBReader(),
'slc':tvtk.SLCReader(),
'xyz':tvtk.XYZMolReader(),
'obj':tvtk.OBJReader(),
'facet':tvtk.FacetReader(),
'cube':tvtk.GaussianCubeReader(),
'g':tvtk.BYUReader(),
}
return rd
# Callable to check if the reader can actually read the file
def can_read(cls,filename):
""" Class method to check if the reader can actually
read the file. Returns 'True' if it can read it succesfully
else 'False'
"""
# Extract the file extension
splitname = filename.strip().split('.')
extension = splitname[-1].lower()
if extension == 'xyz':
from vtk import vtkObject
o = vtkObject
w = o.GetGlobalWarningDisplay()
o.SetGlobalWarningDisplay(0) # Turn it off.
r = tvtk.XYZMolReader()
r.file_name = filename
r.update()
o.SetGlobalWarningDisplay(w)
if len(r.output.points) != 0:
return True
return False
return None
can_read = classmethod(can_read)
| {
"repo_name": "dmsurti/mayavi",
"path": "mayavi/sources/poly_data_reader.py",
"copies": "1",
"size": "5872",
"license": "bsd-3-clause",
"hash": 7157303271701450000,
"line_mean": 32.5542857143,
"line_max": 74,
"alpha_frac": 0.5027247956,
"autogenerated": false,
"ratio": 4.462006079027356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5464730874627356,
"avg_score": null,
"num_lines": null
} |
"""A PolyData file reader object.
"""
# Author: R.Sreekanth <sreekanth [at] aero.iitb.ac.in>
# Suyog Dutt Jain <suyog.jain [at] aero.iitb.ac.in>
# Copyright (c) 2009, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import basename
# Enthought imports.
from traits.api import Instance, Str,Dict
from traitsui.api import View, Item, Group, Include
from tvtk.api import tvtk
# Local imports
from mayavi.core.file_data_source import FileDataSource
from mayavi.core.pipeline_info import PipelineInfo
from mayavi.core.common import error
########################################################################
# `PolyDataReader` class
########################################################################
class PolyDataReader(FileDataSource):
"""A PolyData file reader. The reader supports all the
different types of poly data files.
"""
# The version of this class. Used for persistence.
__version__ = 0
# The PolyData file reader
reader = Instance(tvtk.Object, allow_none=False,
record=True)
######################################################################
# Private Traits
_reader_dict = Dict(Str, Instance(tvtk.Object))
# Our View.
view = View(Group(Include('time_step_group'),
Item(name='base_file_name'),
Item(name='reader',
style='custom',
resizable=True),
show_labels=False),
resizable=True)
#output_info = PipelineInfo(datasets=['none'])
output_info = PipelineInfo(datasets=['poly_data'],
attribute_types=['any'],
attributes=['any'])
######################################################################
# `object` interface
######################################################################
def __set_pure_state__(self, state):
# The reader has its own file_name which needs to be fixed.
state.reader.file_name = state.file_path.abs_pth
# Now call the parent class to setup everything.
super(PolyDataReader, self).__set_pure_state__(state)
######################################################################
# `FileDataSource` interface
######################################################################
def update(self):
self.reader.update()
if len(self.file_path.get()) == 0:
return
self.render()
def has_output_port(self):
""" Return True as the reader has output port."""
return True
def get_output_object(self):
""" Return the reader output port."""
return self.reader.output_port
######################################################################
# Non-public interface
######################################################################
def _file_path_changed(self, fpath):
value = fpath.get()
if len(value) == 0:
return
# Extract the file extension
splitname = value.strip().split('.')
extension = splitname[-1].lower()
# Select polydata reader based on file type
old_reader = self.reader
if self._reader_dict.has_key(extension):
self.reader = self._reader_dict[extension]
else:
error('Invalid extension for file: %s'%value)
return
self.reader.file_name = value.strip()
self.reader.update()
self.reader.update_information()
if old_reader is not None:
old_reader.on_trait_change(self.render, remove=True)
self.reader.on_trait_change(self.render)
old_outputs = self.outputs
self.outputs = [self.reader.output]
if self.outputs == old_outputs:
self.data_changed = True
# Change our name on the tree view
self.name = self._get_name()
def _get_name(self):
""" Returns the name to display on the tree view. Note that
this is not a property getter.
"""
fname = basename(self.file_path.get())
ret = "%s"%fname
if len(self.file_list) > 1:
ret += " (timeseries)"
if '[Hidden]' in self.name:
ret += ' [Hidden]'
return ret
def __reader_dict_default(self):
"""Default value for reader dict."""
rd = {'stl':tvtk.STLReader(),
'stla':tvtk.STLReader(),
'stlb':tvtk.STLReader(),
'txt':tvtk.SimplePointsReader(),
'raw':tvtk.ParticleReader(),
'ply':tvtk.PLYReader(),
'pdb':tvtk.PDBReader(),
'slc':tvtk.SLCReader(),
'xyz':tvtk.XYZMolReader(),
'obj':tvtk.OBJReader(),
'facet':tvtk.FacetReader(),
'cube':tvtk.GaussianCubeReader(),
'g':tvtk.BYUReader(),
}
return rd
# Callable to check if the reader can actually read the file
def can_read(cls,filename):
""" Class method to check if the reader can actually
read the file. Returns 'True' if it can read it succesfully
else 'False'
"""
# Extract the file extension
splitname = filename.strip().split('.')
extension = splitname[-1].lower()
if extension == 'xyz':
from vtk import vtkObject
o = vtkObject
w = o.GetGlobalWarningDisplay()
o.SetGlobalWarningDisplay(0) # Turn it off.
r = tvtk.XYZMolReader()
r.file_name = filename
r.update()
o.SetGlobalWarningDisplay(w)
if len(r.output.points) != 0:
return True
return False
return None
can_read = classmethod(can_read)
| {
"repo_name": "liulion/mayavi",
"path": "mayavi/sources/poly_data_reader.py",
"copies": "2",
"size": "5877",
"license": "bsd-3-clause",
"hash": -2997594761834467000,
"line_mean": 32.5828571429,
"line_max": 74,
"alpha_frac": 0.5022970904,
"autogenerated": false,
"ratio": 4.459028831562974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006531491423016843,
"num_lines": 175
} |
#aponxi v0.6.0
import sublime
import sys
from os import path
import os
from subprocess import Popen, PIPE
from sublime_plugin import TextCommand
from sublime_plugin import WindowCommand
import sublime_plugin
import time
import functools
settings = sublime.load_settings('CoffeeScript.sublime-settings')
def run(cmd, args=[], source="", cwd=None, env=None):
if not type(args) is list:
args = [args]
if sys.platform == "win32":
proc = Popen([cmd] + args, env=env, cwd=cwd, stdout=PIPE, stdin=PIPE, stderr=PIPE, shell=True)
stat = proc.communicate(input=source.encode('utf-8'))
else:
if env is None:
env = {"PATH": settings.get('binDir', '/usr/local/bin')}
if source == "":
command = [cmd] + args
else:
command = [cmd] + args + [source]
# print "Debug - coffee command: "
# print command
proc = Popen(command, env=env, cwd=cwd, stdout=PIPE, stderr=PIPE)
stat = proc.communicate()
okay = proc.returncode == 0
return {"okay": okay, "out": stat[0].decode('utf-8'), "err": stat[1].decode('utf-8')}
def brew(args, source):
if sys.platform == "win32":
args.append("-s")
else:
args.append("-e")
return run("coffee", args=args, source=source)
def cake(task, cwd):
return run("cake", args=task, cwd=cwd)
def isCoffee(view=None):
if view is None:
view = sublime.active_window().active_view()
return 'source.coffee' in view.scope_name(0)
class Text():
@staticmethod
def all(view):
return view.substr(sublime.Region(0, view.size()))
@staticmethod
def sel(view):
text = []
for region in view.sel():
if region.empty():
continue
text.append(view.substr(region))
return "".join(text)
@staticmethod
def get(view):
text = Text.sel(view)
if len(text) > 0:
return text
return Text.all(view)
class CompileCommand(TextCommand):
def is_enabled(self):
return isCoffee(self.view)
def run(self, *args, **kwargs):
no_wrapper = settings.get('noWrapper', True)
compile_dir = settings.get('compileDir')
args = ['-c', self.view.file_name()]
# print self.view.file_name()
if no_wrapper:
args = ['-b'] + args
# print compile_dir
# print isinstance(compile_dir, unicode)
if compile_dir and isinstance(compile_dir, str) or isinstance(compile_dir, unicode):
print "Compile dir specified: " + compile_dir
if not os.path.exists(compile_dir):
os.makedirs(compile_dir)
print "Compile dir did not exist, created folder: " + compile_dir
folder, file_nm = os.path.split(self.view.file_name())
print folder
args = ['--output', compile_dir] + args
# print args
# print args
result = run("coffee", args=args)
if result['okay'] is True:
status = 'Compilation Succeeded'
else:
status = 'Compilation Failed'
sublime.status_message(status)
class CompileAndDisplayCommand(TextCommand):
def is_enabled(self):
return isCoffee(self.view)
def run(self, edit, **kwargs):
output = self.view.window().new_file()
output.set_scratch(True)
opt = kwargs["opt"]
if opt == '-p':
output.set_syntax_file('Packages/JavaScript/JavaScript.tmLanguage')
no_wrapper = settings.get('noWrapper', True)
args = [opt]
print args
if no_wrapper:
args = ['-b'] + args
res = brew(args, Text.get(self.view))
if res["okay"] is True:
output.insert(edit, 0, res["out"])
else:
output.insert(edit, 0, res["err"].split("\n")[0])
class CheckSyntaxCommand(TextCommand):
def is_enabled(self):
return isCoffee(self.view)
def run(self, edit):
res = brew(['-b', '-p'], Text.get(self.view))
if res["okay"] is True:
status = 'Valid'
else:
status = res["err"].split("\n")[0]
sublime.status_message('Syntax %s' % status)
class RunScriptCommand(WindowCommand):
def finish(self, text):
if text == '':
return
text = "{puts, print} = require 'util'\n" + text
res = brew(['-b'], text)
if res["okay"] is True:
output = self.window.new_file()
output.set_scratch(True)
edit = output.begin_edit()
output.insert(edit, 0, res["out"])
output.end_edit(edit)
else:
sublime.status_message('Syntax %s' % res["err"].split("\n")[0])
def run(self):
sel = Text.sel(sublime.active_window().active_view())
if len(sel) > 0:
if not isCoffee():
return
self.finish(sel)
else:
self.window.show_input_panel('Coffee >', '', self.finish, None, None)
class RunCakeTaskCommand(WindowCommand):
def finish(self, task):
if task == '':
return
if not self.window.folders():
cakepath = path.dirname(self.window.active_view().file_name())
else:
cakepath = path.join(self.window.folders()[0], 'Cakefile')
if not path.exists(cakepath):
cakepath = path.dirname(self.window.active_view().file_name())
if not path.exists(cakepath):
return sublime.status_message("Cakefile not found.")
res = cake(task, cakepath)
if res["okay"] is True:
if "No such task" in res["out"]:
msg = "doesn't exist"
else:
msg = "suceeded"
else:
msg = "failed"
sublime.status_message("Task %s - %s." % (task, msg))
def run(self):
self.window.show_input_panel('Cake >', '', self.finish, None, None)
# _
# __ _ _ __ ___ _ __ __ _(_)
# / _` | '_ \ / _ \| '_ \\ \/ / |
# | (_| | |_) | (_) | | | |> <| |
# \__,_| .__/ \___/|_| |_/_/\_\_|
# |_|
def watched_filename(view_id):
view = ToggleWatch.views[view_id]['input_obj']
if view.file_name() is not None:
filename = view.file_name().split('/')[-1]
else:
filename = "Unsaved File"
return filename
class ToggleWatch(TextCommand):
views = {}
outputs = {}
def is_enabled(self):
return isCoffee(self.view)
def run(self, edit):
myvid = self.view.id()
if not myvid in ToggleWatch.views:
views = ToggleWatch.views
views[myvid] = {'watched': True, 'modified': True, 'input_closed': False}
views[myvid]["input_obj"] = self.view
print "Now watching", watched_filename(myvid)
createOut(myvid)
else:
views = ToggleWatch.views
views[myvid]['watched'] = not views[myvid]['watched']
if not views[myvid]['watched']:
print "Stopped watching", watched_filename(myvid)
if views[myvid]['output_open'] is False:
print "Openning output and watching", watched_filename(myvid)
createOut(myvid)
elif views[myvid]['watched'] is True:
print "Resuming watching", watched_filename(myvid)
refreshOut(myvid)
def cleanUp(input_view_id):
del ToggleWatch.outputs[ToggleWatch.views[input_view_id]['output_id']]
del ToggleWatch.views[input_view_id]
return
def get_output_filename(input_view_id):
input_filename = watched_filename(input_view_id)
fileName, fileExtension = os.path.splitext(input_filename)
output_filename = fileName + '.js'
return output_filename
def createOut(input_view_id):
#create output panel and save
this_view = ToggleWatch.views[input_view_id]
outputs = ToggleWatch.outputs
#print this_view
input_filename = watched_filename(input_view_id)
print input_filename
output = this_view["input_obj"].window().new_file()
output.set_scratch(True)
output.set_syntax_file('Packages/JavaScript/JavaScript.tmLanguage')
this_view['output_id'] = output.id()
this_view["output_obj"] = output
this_view["output_open"] = True
# setting output filename
# print output.settings().set('filename', '[Compiled]' + input_filename)
# Getting file extension
output_filename = get_output_filename(input_view_id)
output.set_name(output_filename)
if not output.id() in outputs:
outputs[output.id()] = {'boundto': input_view_id}
refreshOut(input_view_id)
return output
def refreshOut(view_id):
this_view = ToggleWatch.views[view_id]
this_view['last_modified'] = time.mktime(time.gmtime())
#refresh the output view
no_wrapper = settings.get('noWrapper', True)
args = ['-p']
if no_wrapper:
args = ['-b'] + args
res = brew(args, Text.get(this_view['input_obj']))
output = this_view['output_obj']
this_view['modified'] = False
if res["okay"] is True:
edit = output.begin_edit()
output.erase(edit, sublime.Region(0, output.size()))
output.insert(edit, 0, res["out"])
output.end_edit(edit)
print "Refreshed"
else:
edit = output.begin_edit()
output.erase(edit, sublime.Region(0, output.size()))
output.insert(edit, 0, res["err"].split("\n")[0])
output.end_edit(edit)
return
def isView(view_id):
# are they modifying a view (rather than a panel, etc.)
if not view_id:
return False
window = sublime.active_window()
view = window.active_view() if window != None else None
return (view is not None and view.id() == view_id)
def close_output(input_id):
views = ToggleWatch.views
v = views[input_id]
output = v['output_obj']
# output_id = v['output_id']
# print "close output"
if v['output_open'] is True:
#print "the output is open so we should attempt to close it"
output.window().focus_view(output)
output.window().run_command("close")
print watched_filename(input_id), "was closed. Closing the Output"
#v['output_open'] = False
cleanUp(input_id)
return
class CaptureEditing(sublime_plugin.EventListener):
def handleTimeout(self, vid):
this_view = ToggleWatch.views[vid]
modified = this_view['modified']
if modified is True:
# been 1000ms since the last modification
#print "handling"
refreshOut(vid)
def on_modified(self, view):
vid = view.id()
watch_modified = settings.get('watchOnModified')
if watch_modified is not False and vid in ToggleWatch.views:
if watch_modified is True:
delay = 0.5
elif watch_modified < 0.5:
delay = 0.5
else:
delay = watch_modified
#then we have a watched input.
this_view = ToggleWatch.views[vid]
#print " this view is ", this_view
if this_view['modified'] is False:
this_view['modified'] = True
#print " trigger "
if this_view['watched'] is True:
sublime.set_timeout(functools.partial(self.handleTimeout, vid), int(delay * 1000))
return
def on_post_save(self, view):
# print "isCoffee " + str(isCoffee())
watch_save = settings.get('watchOnSave', True)
if watch_save:
save_id = view.id()
views = ToggleWatch.views
if save_id in views:
# getting view object
save_view = ToggleWatch.views[save_id]
# check if modified
if save_view['modified'] is True:
refreshOut(save_id)
compile_on_save = settings.get('compileOnSave', True)
if compile_on_save is True and isCoffee() is True:
print "Compiling on save..."
view.run_command("compile")
show_compile_output_on_save = settings.get('showOutputOnSave', True)
if show_compile_output_on_save is True and isCoffee() is True and CompileOutput.IS_OPEN is True:
print "Updating output panel..."
view.run_command("compile_output")
return
def on_close(self, view):
close_id = view.id()
views = ToggleWatch.views
if close_id in views:
#this is an input
#print "input was closed"
views[close_id]['input_closed'] = True
close_output(close_id)
if close_id in ToggleWatch.outputs and views[ToggleWatch.outputs[close_id]['boundto']]['input_closed'] is not True:
#this is an output
#print "an output was closed!"
boundview = ToggleWatch.outputs[close_id]['boundto']
thatview = views[boundview]
thatview['output_open'] = False
thatview['watched'] = False
filename = watched_filename(boundview)
print "The output was closed. No longer watching", filename
return
class CompileOutput(TextCommand):
PANEL_NAME = 'coffee_compile_output'
IS_OPEN = False
def is_enabled(self):
return isCoffee(self.view)
def run(self, edit):
window = self.view.window()
#refresh the output view
no_wrapper = settings.get('noWrapper', True)
# args = ['-p']
if no_wrapper:
args = ['-b']
res = brew(args, Text.get(self.view))
panel = window.get_output_panel(self.PANEL_NAME)
panel.set_syntax_file('Packages/JavaScript/JavaScript.tmLanguage')
panel.set_read_only(False)
output = panel
# print res["err"]
if res["okay"] is True:
edit = output.begin_edit()
output.erase(edit, sublime.Region(0, output.size()))
output.insert(edit, 0, res["out"])
output.end_edit(edit)
# print "Refreshed"
else:
edit = output.begin_edit()
output.erase(edit, sublime.Region(0, output.size()))
output.insert(edit, 0, res["err"])
output.end_edit(edit)
output.sel().clear()
output.set_read_only(True)
window.run_command('show_panel', {'panel': 'output.%s' % self.PANEL_NAME})
self.IS_OPEN = True
return
| {
"repo_name": "michaelray/Iristyle-ChocolateyPackages",
"path": "EthanBrown.SublimeText2.WebPackages/tools/PackageCache/sublime-better-coffeescript/CoffeeScript.py",
"copies": "2",
"size": "14536",
"license": "mit",
"hash": 6779400021826224000,
"line_mean": 30.4632034632,
"line_max": 123,
"alpha_frac": 0.56796918,
"autogenerated": false,
"ratio": 3.713847726111395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5281816906111395,
"avg_score": null,
"num_lines": null
} |
"""A pooled converter that behaves in very similar ways to convert_sub_dirs.py. Lower logging and stat print outs than
convert_sub_dirs.py. Useful for batch converting lower resolution video files that can't fully use all cores. """
import os
import sys
import time
from os import path
from .. import makeMediaObjectsInDirectory, MediaConverter, MediaConverterQueue, seconds_to_timecode
from .. import calc_bits_per_pixel, split_ext
"""Converts media files in specified sub directories of parent_dir to x265 video and opus audio. Keeps only the
first video and audio stream found.Doesn't attempt to retain subtitles or attachment streams. Attempts to
calculate the bits/pixel of each file and uses a user specified crf quality for bits/pixel intervals, otherwise
uses a user specified default CRF and audio bitrate & channel setup. Prints helpful stats and can save a log
file to each sub directory."""
parent_dir = '/media/television/'
dirs_to_convert = ['folder1', 'folder2']
speed = 'superfast' # Reminder: this is x265, don't expect x264 speeds
log_file = True
# Quality intervals for quality dicts: X & Y are bits/pixel thresholds; a, b, & c are crfs corresponding to intervals
# Bits/pixel X Y
# <----------------](----------](----------->
# CRF a b c
# Upper bounds are always inclusive, lower bounds are exclusive
# Some example quality dicts. They can be any number of intervals, but threshold[0] == 0, and each entry
# except 'default' must be equal lengths. see save_bits_per_pixel_dist() in tools.py for help visualizing bits/pixel
# distribution for defining your own intervals
s = 'stereo'
m = 'mono'
qualities_HQ = { # HQ
'threshold': [0, 0.08, 0.11],
'video': [25, 23, 20],
'audio': [(64, m), (96, m), (128, s)],
'default': [23, (96, s)]} # default to stereo here 96k
qualities_LQ = {
'threshold': [0, 0.10, 0.14],
'video': [27, 25, 23],
'audio': [(64, m), (82, m), (96, m)],
'default': [23, (96, s)]}
qualities = qualities_HQ
class PrintLogger:
def __init__(self, log_file_path, logging):
self.log_file_path = log_file_path
self.logging = logging
def pl(self, s):
print(s)
if self.logging:
with open(self.log_file_path, 'a', encoding='utf8') as log:
log.write(s + "\n")
log.close()
def convert_folder_x265(dir_path, log=True):
# declare some media arguments
concurrency = 2
speed = 'veryfast'
codec = 'x265'
# Figure out what files need to be converted to h265
all_files = makeMediaObjectsInDirectory(dir_path)
files_to_move = []
for media in all_files:
if media.videoCodec != 'hevc':
files_to_move.append(media)
# move files
original_files_dir = path.join(dir_path, 'original_files')
for media in files_to_move:
if not path.isdir(original_files_dir):
os.mkdir(original_files_dir)
try:
os.rename(media.filePath, path.join(original_files_dir, media.fileName))
except FileExistsError:
print("\nFile: {}\n\tAlready exists! Skipping this one...".format(media.filePath))
continue
# Build MediaConverter object array
files_to_convert = makeMediaObjectsInDirectory(original_files_dir)
c = []
accum_input_size = 1 # 1 byte to avoid div by 0 errors in case nothing gets converted
number_of_files = 0
for media in files_to_convert:
video_rate, audio_rate, audio_channels = decide_quality(media)
name, ext = split_ext(media.fileName)
output_file_path = path.join(dir_path, name + '.mkv')
if path.isfile(output_file_path):
print("Output file {} \n\tAlready exists! skipping...".format(output_file_path))
continue
accum_input_size += path.getsize(media.filePath)
number_of_files += 1
cvt = MediaConverter(media, output_file_path)
try:
cvt.createVideoStream(codec, 'crf', video_rate, speed)
except IndexError:
print("NO VIDEO FOUND")
try:
cvt.createAudioStream(media.audioStreams[0], 'opus', audioBitrate=audio_rate, audioChannels=audio_channels)
except IndexError:
print("NO AUDIO FOUND")
c.append(cvt)
print("----- CONVERTING -----")
q = MediaConverterQueue(max_processes=concurrency)
q.add_jobs(c)
q.run()
while not q.done:
time.sleep(5)
print("Working on {}\n".format(dir_path))
MB_Min = (accum_input_size/1000000)/(q.total_time/60)
print("\n\nDone converting {} files in {} at {}".format(number_of_files, dir_path, time.strftime("%I:%M:%S %p")))
print("Conversion took {}, at an average rate of {} MB/min\n\n".format(seconds_to_timecode(q.total_time), MB_Min))
def decide_quality(qualities, media_object):
"""Chooses the crf quality of the video as well as the bitrate and channels of the audio files from the
supplied qualities dict.
:param qualities: dict, see notes at top of file
:param media_object: MediaObject
:return:
"""
q = qualities
bits_pixel = calc_bits_per_pixel(media_object)
# Making sure qualities is valid
n = len(q['threshold'])
if (len(q['video']) != n) or (len(q['audio']) != n):
print("\n\nYour qualities variable isn't set up correctly!")
print("'threshold', 'video', and audio values need to have equal length.")
print("Additionally, 'threshold'[0] needs to be 0")
print("Exiting...")
sys.exit()
# Set defaults up front
crf = q['default'][0]
audio_bitrate = q['default'][1][0]
audio_channels = q['default'][1][1]
if bits_pixel <= 0: # Print warning if it looks like defaults will be used
print("Unable to calculate bits per pixel, defaulting to: "
"crf = {}, audio = {}k, channels = {}".format(crf, audio_bitrate, audio_channels))
for x in range(0, n):
if bits_pixel > q['threshold'][x]:
crf = q['video'][x]
audio_bitrate = q['audio'][x][0]
audio_channels = q['audio'][x][1]
return crf, audio_bitrate, audio_channels
if __name__ == '__main__':
for folder in dirs_to_convert:
dir_path = path.join(parent_dir, folder)
if not path.isdir(dir_path):
print("Folder {} doesn't seem to exist, aborting!".format(dir_path))
sys.exit()
else:
print("{} Exists!".format(dir_path))
print()
for folder in dirs_to_convert:
d = path.join(parent_dir, folder)
convert_folder_x265(d)
| {
"repo_name": "taishengy/tympeg",
"path": "tympeg/scripts/pooled_converter.py",
"copies": "1",
"size": "6821",
"license": "mit",
"hash": -970495494036562700,
"line_mean": 35.8944444444,
"line_max": 119,
"alpha_frac": 0.6100278552,
"autogenerated": false,
"ratio": 3.6339904102290888,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47440182654290886,
"avg_score": null,
"num_lines": null
} |
"""A pool for sqlite connections."""
import threading
from sqlalchemy.pool import NullPool, StaticPool
class RecorderPool(StaticPool, NullPool):
"""A hybird of NullPool and StaticPool.
When called from the creating thread acts like StaticPool
When called from any other thread, acts like NullPool
"""
def __init__(self, *args, **kw): # pylint: disable=super-init-not-called
"""Create the pool."""
self._tid = threading.current_thread().ident
StaticPool.__init__(self, *args, **kw)
def _do_return_conn(self, conn):
if threading.current_thread().ident == self._tid:
return super()._do_return_conn(conn)
conn.close()
def dispose(self):
"""Dispose of the connection."""
if threading.current_thread().ident == self._tid:
return super().dispose()
def _do_get(self):
if threading.current_thread().ident == self._tid:
return super()._do_get()
return super( # pylint: disable=bad-super-call
NullPool, self
)._create_connection()
| {
"repo_name": "kennedyshead/home-assistant",
"path": "homeassistant/components/recorder/pool.py",
"copies": "2",
"size": "1090",
"license": "apache-2.0",
"hash": 5566731144021901000,
"line_mean": 31.0588235294,
"line_max": 77,
"alpha_frac": 0.6165137615,
"autogenerated": false,
"ratio": 4.082397003745318,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5698910765245319,
"avg_score": null,
"num_lines": null
} |
"A pool of Chromosomes a simulation can draw from"
from itertools import chain
import numpy as np
from pydigree.recombination import recombine
def richards(A, C, M, B, T):
'''
Generates a population growth model function
A: Lower Asymptope
C: Upper Asymptope
M: Maximum growth time
B: Growth Rate
T: Maximum growth position
'''
return lambda gen: A + (C / (1 + T * np.exp(-B * (gen - M)) ** (1 / T)))
class ChromosomePool(object):
"""
A pool of Chromosomes a simulation can draw from
"""
def __init__(self, population=None, chromosomes=None, size=0):
"""
Create the pool.
:param population:
:param chromosomes: the set of chromosomes
:param size: size of the pool
:type size: int
:type chromosomes: ChromosomeSet
:type population: IndividualContainer
"""
if population:
self.chromosomes = population.chromosomes
elif chromosomes:
self.chromosomes = chromosomes
self.pool = [[] * len(self.chromosomes)]
self.n0 = size
# Pool functions
def size(self):
""" Returns the size of the pool of available chromosomes """
return len(self.pool[0])
def initialize_pool(self, size=None):
""" Initializes a pool of chromosomes for simulation """
if self.n0 and not size:
size = self.n0
for i, q in enumerate(self.chromosomes):
self.pool[i] = q.linkageequilibrium_chromosomes(2 * size)
def fix(self, loc, value):
''' Sets all alleles at loc to value'''
chromidx, posidx = loc
p = self.pool[chromidx]
for chrom in p:
chrom[posidx] = value
def iterate_pool(self, gensize):
"""
Iterate pool simulates a generation of random mating
between chromosomes instead of individuals. The pool of
population chromosomes then contains chromosomes from the
new generation.
Arguements:
gensize: The size of the next generation (rounded down to the integer)
Returns: Nothing
"""
# Generation sizes calculated from mathematical functions can have
# non-integer values, which doesn't make much sense here.
gensize = int(gensize)
for i, c in enumerate(self.chromosomes):
# Chromosomes have a 1/2 chance of being recombined with another
def choose_chrom(pool, chrmap):
"""
Get two random chromosomes, recombine them, return the result
"""
# Since Alleles is a subclass of ndarray, numpy has been
# treating pool as a multidimensional array. We'll generate
# the indices ourself and get them that way. Eventually
# I'll come back and fix the isinstancing of Alleles.
qi, qw = np.random.randint(0, len(pool), 2)
q, w = pool[qi], pool[qw]
r = recombine(q, w, chrmap)
return r
newpool = [choose_chrom(self.pool[i], c.genetic_map)
for x in range(gensize)]
self.pool[i] = newpool
# Chromosome functions
def chromosome(self, chromindex):
"""
Get a random chromomsome from the pool
:param chromindex: which chromosome are we looking for?
:type chromindex: int
:rtype: AlleleContainer
"""
chidx = np.random.randint(0, len(self.pool[chromindex]))
return self.pool[chromindex][chidx]
def get_genotype_set(self):
''' Gives a full set of genotypes drawn from the chromosome pool '''
return [[self.chromosome(i), self.chromosome(i)]
for i, x in enumerate(self.chromosomes)]
def evolve(self, growth_func, gens):
'''
Iterates the pool according to a popuation growth model.
:param growth_func: A function that takes a generation number as an
argument and returns a generation size
:param gens: number of generations to advance
:type growth_func: Callable
:type gens: int
:rtype void:
'''
for x in range(gens):
self.iterate_pool(growth_func(x))
@staticmethod
def from_population(pop):
"""
Creates a pool from an existing population.
:param pop: Base population
:type pop: Population
:rtype: ChromosomePool
"""
newpool = ChromosomePool(chromosomes=pop.chromosomes)
newpool.n0 = len(pop.individuals) * 2
for chridx, _ in enumerate(pop.chromosomes):
poolchroms = chain.from_iterable(ind.genotypes[chridx]
for ind in pop.individuals)
thischrom = list(poolchroms)
newpool.pool[chridx] = thischrom
return newpool
| {
"repo_name": "jameshicks/pydigree",
"path": "pydigree/simulation/chromosomepool.py",
"copies": "1",
"size": "4925",
"license": "apache-2.0",
"hash": 5105893126295137000,
"line_mean": 31.6158940397,
"line_max": 78,
"alpha_frac": 0.5906598985,
"autogenerated": false,
"ratio": 4.033579033579033,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5124238932079033,
"avg_score": null,
"num_lines": null
} |
"""A poor substitute for PHP's strtotime function."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlsys_strtotime
#
# Public Functions:
# describe_duration_string_to_time_delta
# duration_string_to_time_delta
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
def describe_duration_string_to_time_delta():
return str('time can be specified like "5 hours 20 minutes", use '
'combinations of seconds, minutes, hours, days, weeks. '
'each unit should only appear once. you may use floating '
'point numbers and negative numbers. '
'e.g. "1 weeks -1.5 days".')
def duration_string_to_time_delta(s):
"""Return a datetime.timedelta based on the supplied string 's'.
Usage examples:
>>> str(duration_string_to_time_delta("1 seconds"))
'0:00:01'
>>> str(duration_string_to_time_delta("2 minutes"))
'0:02:00'
>>> str(duration_string_to_time_delta("2 hours 2 minutes"))
'2:02:00'
>>> str(duration_string_to_time_delta("1 days 2 hours 2 minutes"))
'1 day, 2:02:00'
>>> str(duration_string_to_time_delta("1.5 days"))
'1 day, 12:00:00'
>>> str(duration_string_to_time_delta("1 days -1 hours"))
'23:00:00'
>>> str(duration_string_to_time_delta("1 milliseconds"))
'0:00:00.001000'
:s: a string in the appropriate time format
:returns: a datetime.timedelta
"""
clauses = s.split()
if len(clauses) % 2:
raise ValueError("odd number of clauses: " + s)
pairs = zip(clauses[::2], clauses[1::2])
d = {p[1]: float(p[0]) for p in pairs}
if len(d) != len(pairs):
raise ValueError("duplicated clauses: " + s)
return datetime.timedelta(**d)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"repo_name": "cs-shadow/phabricator-tools",
"path": "py/phl/phlsys_strtotime.py",
"copies": "4",
"size": "2951",
"license": "apache-2.0",
"hash": 1204266037822501400,
"line_mean": 34.5542168675,
"line_max": 79,
"alpha_frac": 0.5438834293,
"autogenerated": false,
"ratio": 4.121508379888268,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 83
} |
"""A POP3 client class.
Based on the J. Myers POP3 draft, Jan. 96
"""
# Author: David Ascher <david_ascher@brown.edu>
# [heavily stealing from nntplib.py]
# Updated: Piers Lauder <piers@cs.su.oz.au> [Jul '97]
# String method conversion and test jig improvements by ESR, February 2001.
# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <urtubia@mrbook.org> Aug 2003
# Example (see the test function at the end of this file)
# Imports
import errno
import re
import socket
import sys
try:
import ssl
HAVE_SSL = True
except ImportError:
HAVE_SSL = False
__all__ = ["POP3","error_proto"]
# Exception raised when an error or invalid response is received:
class error_proto(Exception): pass
# Standard Port
POP3_PORT = 110
# POP SSL PORT
POP3_SSL_PORT = 995
# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
CR = b'\r'
LF = b'\n'
CRLF = CR+LF
# maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 1939 limits POP3 line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
class POP3:
"""This class supports both the minimal and optional command sets.
Arguments can be strings or integers (where appropriate)
(e.g.: retr(1) and retr('1') both work equally well.
Minimal Command Set:
USER name user(name)
PASS string pass_(string)
STAT stat()
LIST [msg] list(msg = None)
RETR msg retr(msg)
DELE msg dele(msg)
NOOP noop()
RSET rset()
QUIT quit()
Optional Commands (some servers support these):
RPOP name rpop(name)
APOP name digest apop(name, digest)
TOP msg n top(msg, n)
UIDL [msg] uidl(msg = None)
CAPA capa()
STLS stls()
UTF8 utf8()
Raises one exception: 'error_proto'.
Instantiate with:
POP3(hostname, port=110)
NB: the POP protocol locks the mailbox from user
authorization until QUIT, so be sure to get in, suck
the messages, and quit, each time you access the
mailbox.
POP is a line-based protocol, which means large mail
messages consume lots of python cycles reading them
line-by-line.
If it's available on your mail server, use IMAP4
instead, it doesn't suffer from the two problems
above.
"""
encoding = 'UTF-8'
def __init__(self, host, port=POP3_PORT,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.port = port
self._tls_established = False
sys.audit("poplib.connect", self, host, port)
self.sock = self._create_socket(timeout)
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
def _create_socket(self, timeout):
return socket.create_connection((self.host, self.port), timeout)
def _putline(self, line):
if self._debugging > 1: print('*put*', repr(line))
sys.audit("poplib.putline", self, line)
self.sock.sendall(line + CRLF)
# Internal: send one command to the server (through _putline())
def _putcmd(self, line):
if self._debugging: print('*cmd*', repr(line))
line = bytes(line, self.encoding)
self._putline(line)
# Internal: return one line from the server, stripping CRLF.
# This is where all the CPU time of this module is consumed.
# Raise error_proto('-ERR EOF') if the connection is closed.
def _getline(self):
line = self.file.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise error_proto('line too long')
if self._debugging > 1: print('*get*', repr(line))
if not line: raise error_proto('-ERR EOF')
octets = len(line)
# server can send any combination of CR & LF
# however, 'readline()' returns lines ending in LF
# so only possibilities are ...LF, ...CRLF, CR...LF
if line[-2:] == CRLF:
return line[:-2], octets
if line[:1] == CR:
return line[1:-1], octets
return line[:-1], octets
# Internal: get a response from the server.
# Raise 'error_proto' if the response doesn't start with '+'.
def _getresp(self):
resp, o = self._getline()
if self._debugging > 1: print('*resp*', repr(resp))
if not resp.startswith(b'+'):
raise error_proto(resp)
return resp
# Internal: get a response plus following text from the server.
def _getlongresp(self):
resp = self._getresp()
list = []; octets = 0
line, o = self._getline()
while line != b'.':
if line.startswith(b'..'):
o = o-1
line = line[1:]
octets = octets + o
list.append(line)
line, o = self._getline()
return resp, list, octets
# Internal: send a command and get the response
def _shortcmd(self, line):
self._putcmd(line)
return self._getresp()
# Internal: send a command and get the response plus following text
def _longcmd(self, line):
self._putcmd(line)
return self._getlongresp()
# These can be useful:
def getwelcome(self):
return self.welcome
def set_debuglevel(self, level):
self._debugging = level
# Here are all the POP commands:
def user(self, user):
"""Send user name, return response
(should indicate password required).
"""
return self._shortcmd('USER %s' % user)
def pass_(self, pswd):
"""Send password, return response
(response includes message count, mailbox size).
NB: mailbox is locked by server from here to 'quit()'
"""
return self._shortcmd('PASS %s' % pswd)
def stat(self):
"""Get mailbox status.
Result is tuple of 2 ints (message count, mailbox size)
"""
retval = self._shortcmd('STAT')
rets = retval.split()
if self._debugging: print('*stat*', repr(rets))
numMessages = int(rets[1])
sizeMessages = int(rets[2])
return (numMessages, sizeMessages)
def list(self, which=None):
"""Request listing, return result.
Result without a message number argument is in form
['response', ['mesg_num octets', ...], octets].
Result when a message number argument is given is a
single response: the "scan listing" for that message.
"""
if which is not None:
return self._shortcmd('LIST %s' % which)
return self._longcmd('LIST')
def retr(self, which):
"""Retrieve whole message number 'which'.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('RETR %s' % which)
def dele(self, which):
"""Delete message number 'which'.
Result is 'response'.
"""
return self._shortcmd('DELE %s' % which)
def noop(self):
"""Does nothing.
One supposes the response indicates the server is alive.
"""
return self._shortcmd('NOOP')
def rset(self):
"""Unmark all messages marked for deletion."""
return self._shortcmd('RSET')
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
resp = self._shortcmd('QUIT')
self.close()
return resp
def close(self):
"""Close the connection without assuming anything about it."""
try:
file = self.file
self.file = None
if file is not None:
file.close()
finally:
sock = self.sock
self.sock = None
if sock is not None:
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError as exc:
# The server might already have closed the connection.
# On Windows, this may result in WSAEINVAL (error 10022):
# An invalid operation was attempted.
if (exc.errno != errno.ENOTCONN
and getattr(exc, 'winerror', 0) != 10022):
raise
finally:
sock.close()
#__del__ = quit
# optional commands:
def rpop(self, user):
"""Not sure what this does."""
return self._shortcmd('RPOP %s' % user)
timestamp = re.compile(br'\+OK.[^<]*(<.*>)')
def apop(self, user, password):
"""Authorisation
- only possible if server has supplied a timestamp in initial greeting.
Args:
user - mailbox user;
password - mailbox password.
NB: mailbox is locked by server from here to 'quit()'
"""
secret = bytes(password, self.encoding)
m = self.timestamp.match(self.welcome)
if not m:
raise error_proto('-ERR APOP not supported by server')
import hashlib
digest = m.group(1)+secret
digest = hashlib.md5(digest).hexdigest()
return self._shortcmd('APOP %s %s' % (user, digest))
def top(self, which, howmuch):
"""Retrieve message header of message number 'which'
and first 'howmuch' lines of message body.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('TOP %s %s' % (which, howmuch))
def uidl(self, which=None):
"""Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets]
"""
if which is not None:
return self._shortcmd('UIDL %s' % which)
return self._longcmd('UIDL')
def utf8(self):
"""Try to enter UTF-8 mode (see RFC 6856). Returns server response.
"""
return self._shortcmd('UTF8')
def capa(self):
"""Return server capabilities (RFC 2449) as a dictionary
>>> c=poplib.POP3('localhost')
>>> c.capa()
{'IMPLEMENTATION': ['Cyrus', 'POP3', 'server', 'v2.2.12'],
'TOP': [], 'LOGIN-DELAY': ['0'], 'AUTH-RESP-CODE': [],
'EXPIRE': ['NEVER'], 'USER': [], 'STLS': [], 'PIPELINING': [],
'UIDL': [], 'RESP-CODES': []}
>>>
Really, according to RFC 2449, the cyrus folks should avoid
having the implementation split into multiple arguments...
"""
def _parsecap(line):
lst = line.decode('ascii').split()
return lst[0], lst[1:]
caps = {}
try:
resp = self._longcmd('CAPA')
rawcaps = resp[1]
for capline in rawcaps:
capnm, capargs = _parsecap(capline)
caps[capnm] = capargs
except error_proto as _err:
raise error_proto('-ERR CAPA not supported by server')
return caps
def stls(self, context=None):
"""Start a TLS session on the active connection as specified in RFC 2595.
context - a ssl.SSLContext
"""
if not HAVE_SSL:
raise error_proto('-ERR TLS support missing')
if self._tls_established:
raise error_proto('-ERR TLS session already established')
caps = self.capa()
if not 'STLS' in caps:
raise error_proto('-ERR STLS not supported by server')
if context is None:
context = ssl._create_stdlib_context()
resp = self._shortcmd('STLS')
self.sock = context.wrap_socket(self.sock,
server_hostname=self.host)
self.file = self.sock.makefile('rb')
self._tls_established = True
return resp
if HAVE_SSL:
class POP3_SSL(POP3):
"""POP3 client class over SSL connection
Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None,
context=None)
hostname - the hostname of the pop3 over ssl server
port - port number
keyfile - PEM formatted file that contains your private key
certfile - PEM formatted certificate chain file
context - a ssl.SSLContext
See the methods of the parent class POP3 for more documentation.
"""
def __init__(self, host, port=POP3_SSL_PORT, keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, context=None):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
if keyfile is not None or certfile is not None:
import warnings
warnings.warn("keyfile and certfile are deprecated, use a "
"custom context instead", DeprecationWarning, 2)
self.keyfile = keyfile
self.certfile = certfile
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.context = context
POP3.__init__(self, host, port, timeout)
def _create_socket(self, timeout):
sock = POP3._create_socket(self, timeout)
sock = self.context.wrap_socket(sock,
server_hostname=self.host)
return sock
def stls(self, keyfile=None, certfile=None, context=None):
"""The method unconditionally raises an exception since the
STLS command doesn't make any sense on an already established
SSL/TLS session.
"""
raise error_proto('-ERR TLS session already established')
__all__.append("POP3_SSL")
if __name__ == "__main__":
import sys
a = POP3(sys.argv[1])
print(a.getwelcome())
a.user(sys.argv[2])
a.pass_(sys.argv[3])
a.list()
(numMsgs, totalSize) = a.stat()
for i in range(1, numMsgs + 1):
(header, msg, octets) = a.retr(i)
print("Message %d:" % i)
for line in msg:
print(' ' + line)
print('-----------------------')
a.quit()
| {
"repo_name": "batermj/algorithm-challenger",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/poplib.py",
"copies": "10",
"size": "15077",
"license": "apache-2.0",
"hash": -2364380986518093000,
"line_mean": 30.3451143451,
"line_max": 107,
"alpha_frac": 0.5505737216,
"autogenerated": false,
"ratio": 4.212629226040794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0032135152603975418,
"num_lines": 481
} |
"""A POP3 client class.
Based on the J. Myers POP3 draft, Jan. 96
"""
# Author: David Ascher <david_ascher@brown.edu>
# [heavily stealing from nntplib.py]
# Updated: Piers Lauder <piers@cs.su.oz.au> [Jul '97]
# String method conversion and test jig improvements by ESR, February 2001.
# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <urtubia@mrbook.org> Aug 2003
# Example (see the test function at the end of this file)
# Imports
import re, socket
__all__ = ["POP3","error_proto","POP3_SSL"]
# Exception raised when an error or invalid response is received:
class error_proto(Exception): pass
# Standard Port
POP3_PORT = 110
# POP SSL PORT
POP3_SSL_PORT = 995
# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
CR = '\r'
LF = '\n'
CRLF = CR+LF
class POP3:
"""This class supports both the minimal and optional command sets.
Arguments can be strings or integers (where appropriate)
(e.g.: retr(1) and retr('1') both work equally well.
Minimal Command Set:
USER name user(name)
PASS string pass_(string)
STAT stat()
LIST [msg] list(msg = None)
RETR msg retr(msg)
DELE msg dele(msg)
NOOP noop()
RSET rset()
QUIT quit()
Optional Commands (some servers support these):
RPOP name rpop(name)
APOP name digest apop(name, digest)
TOP msg n top(msg, n)
UIDL [msg] uidl(msg = None)
Raises one exception: 'error_proto'.
Instantiate with:
POP3(hostname, port=110)
NB: the POP protocol locks the mailbox from user
authorization until QUIT, so be sure to get in, suck
the messages, and quit, each time you access the
mailbox.
POP is a line-based protocol, which means large mail
messages consume lots of python cycles reading them
line-by-line.
If it's available on your mail server, use IMAP4
instead, it doesn't suffer from the two problems
above.
"""
def __init__(self, host, port = POP3_PORT):
self.host = host
self.port = port
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
def _putline(self, line):
if self._debugging > 1: print '*put*', repr(line)
self.sock.sendall('%s%s' % (line, CRLF))
# Internal: send one command to the server (through _putline())
def _putcmd(self, line):
if self._debugging: print '*cmd*', repr(line)
self._putline(line)
# Internal: return one line from the server, stripping CRLF.
# This is where all the CPU time of this module is consumed.
# Raise error_proto('-ERR EOF') if the connection is closed.
def _getline(self):
line = self.file.readline()
if self._debugging > 1: print '*get*', repr(line)
if not line: raise error_proto('-ERR EOF')
octets = len(line)
# server can send any combination of CR & LF
# however, 'readline()' returns lines ending in LF
# so only possibilities are ...LF, ...CRLF, CR...LF
if line[-2:] == CRLF:
return line[:-2], octets
if line[0] == CR:
return line[1:-1], octets
return line[:-1], octets
# Internal: get a response from the server.
# Raise 'error_proto' if the response doesn't start with '+'.
def _getresp(self):
resp, o = self._getline()
if self._debugging > 1: print '*resp*', repr(resp)
c = resp[:1]
if c != '+':
raise error_proto(resp)
return resp
# Internal: get a response plus following text from the server.
def _getlongresp(self):
resp = self._getresp()
list = []; octets = 0
line, o = self._getline()
while line != '.':
if line[:2] == '..':
o = o-1
line = line[1:]
octets = octets + o
list.append(line)
line, o = self._getline()
return resp, list, octets
# Internal: send a command and get the response
def _shortcmd(self, line):
self._putcmd(line)
return self._getresp()
# Internal: send a command and get the response plus following text
def _longcmd(self, line):
self._putcmd(line)
return self._getlongresp()
# These can be useful:
def getwelcome(self):
return self.welcome
def set_debuglevel(self, level):
self._debugging = level
# Here are all the POP commands:
def user(self, user):
"""Send user name, return response
(should indicate password required).
"""
return self._shortcmd('USER %s' % user)
def pass_(self, pswd):
"""Send password, return response
(response includes message count, mailbox size).
NB: mailbox is locked by server from here to 'quit()'
"""
return self._shortcmd('PASS %s' % pswd)
def stat(self):
"""Get mailbox status.
Result is tuple of 2 ints (message count, mailbox size)
"""
retval = self._shortcmd('STAT')
rets = retval.split()
if self._debugging: print '*stat*', repr(rets)
numMessages = int(rets[1])
sizeMessages = int(rets[2])
return (numMessages, sizeMessages)
def list(self, which=None):
"""Request listing, return result.
Result without a message number argument is in form
['response', ['mesg_num octets', ...], octets].
Result when a message number argument is given is a
single response: the "scan listing" for that message.
"""
if which is not None:
return self._shortcmd('LIST %s' % which)
return self._longcmd('LIST')
def retr(self, which):
"""Retrieve whole message number 'which'.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('RETR %s' % which)
def dele(self, which):
"""Delete message number 'which'.
Result is 'response'.
"""
return self._shortcmd('DELE %s' % which)
def noop(self):
"""Does nothing.
One supposes the response indicates the server is alive.
"""
return self._shortcmd('NOOP')
def rset(self):
"""Not sure what this does."""
return self._shortcmd('RSET')
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
try:
resp = self._shortcmd('QUIT')
except error_proto, val:
resp = val
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
#__del__ = quit
# optional commands:
def rpop(self, user):
"""Not sure what this does."""
return self._shortcmd('RPOP %s' % user)
timestamp = re.compile(r'\+OK.*(<[^>]+>)')
def apop(self, user, secret):
"""Authorisation
- only possible if server has supplied a timestamp in initial greeting.
Args:
user - mailbox user;
secret - secret shared between client and server.
NB: mailbox is locked by server from here to 'quit()'
"""
m = self.timestamp.match(self.welcome)
if not m:
raise error_proto('-ERR APOP not supported by server')
import hashlib
digest = hashlib.md5(m.group(1)+secret).digest()
digest = ''.join(map(lambda x:'%02x'%ord(x), digest))
return self._shortcmd('APOP %s %s' % (user, digest))
def top(self, which, howmuch):
"""Retrieve message header of message number 'which'
and first 'howmuch' lines of message body.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('TOP %s %s' % (which, howmuch))
def uidl(self, which=None):
"""Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets]
"""
if which is not None:
return self._shortcmd('UIDL %s' % which)
return self._longcmd('UIDL')
class POP3_SSL(POP3):
"""POP3 client class over SSL connection
Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None)
hostname - the hostname of the pop3 over ssl server
port - port number
keyfile - PEM formatted file that countains your private key
certfile - PEM formatted certificate chain file
See the methods of the parent class POP3 for more documentation.
"""
def __init__(self, host, port = POP3_SSL_PORT, keyfile = None, certfile = None):
self.host = host
self.port = port
self.keyfile = keyfile
self.certfile = certfile
self.buffer = ""
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
self.file = self.sock.makefile('rb')
self.sslobj = socket.ssl(self.sock, self.keyfile, self.certfile)
self._debugging = 0
self.welcome = self._getresp()
def _fillBuffer(self):
localbuf = self.sslobj.read()
if len(localbuf) == 0:
raise error_proto('-ERR EOF')
self.buffer += localbuf
def _getline(self):
line = ""
renewline = re.compile(r'.*?\n')
match = renewline.match(self.buffer)
while not match:
self._fillBuffer()
match = renewline.match(self.buffer)
line = match.group(0)
self.buffer = renewline.sub('' ,self.buffer, 1)
if self._debugging > 1: print '*get*', repr(line)
octets = len(line)
if line[-2:] == CRLF:
return line[:-2], octets
if line[0] == CR:
return line[1:-1], octets
return line[:-1], octets
def _putline(self, line):
if self._debugging > 1: print '*put*', repr(line)
line += CRLF
bytes = len(line)
while bytes > 0:
sent = self.sslobj.write(line)
if sent == bytes:
break # avoid copy
line = line[sent:]
bytes = bytes - sent
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
try:
resp = self._shortcmd('QUIT')
except error_proto, val:
resp = val
self.sock.close()
del self.sslobj, self.sock
return resp
if __name__ == "__main__":
import sys
a = POP3(sys.argv[1])
print a.getwelcome()
a.user(sys.argv[2])
a.pass_(sys.argv[3])
a.list()
(numMsgs, totalSize) = a.stat()
for i in range(1, numMsgs + 1):
(header, msg, octets) = a.retr(i)
print "Message %d:" % i
for line in msg:
print ' ' + line
print '-----------------------'
a.quit()
| {
"repo_name": "ericlink/adms-server",
"path": "playframework-dist/play-1.1/python/Lib/poplib.py",
"copies": "2",
"size": "12869",
"license": "mit",
"hash": -5850198948915104000,
"line_mean": 28.4231678487,
"line_max": 107,
"alpha_frac": 0.5314321237,
"autogenerated": false,
"ratio": 4.131300160513644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004080281372419134,
"num_lines": 423
} |
"""A POP3 server class.
Author: Konstantin Volkov <kozzztik@mail.ru>
Based on aiosmtpd
Implements:
RFC 1939 Post Office Protocol - Version 3
https://tools.ietf.org/html/rfc1939
RFC 2449 POP3 Extension Mechanism
https://tools.ietf.org/html/rfc2449
RFC 2595 Using TLS with IMAP, POP3 and ACAP
https://tools.ietf.org/html/rfc2595
RFC 3206 The SYS and AUTH POP Response Codes
https://tools.ietf.org/html/rfc3206
RFC 1734 POP3 AUTHentication command
https://tools.ietf.org/html/rfc1734
RFC 2222 Simple Authentication and Security Layer (SASL)
https://tools.ietf.org/html/rfc2222
RFC 5034 The Post Office Protocol (POP3) Simple Authentication and Security
Layer (SASL) Authentication Mechanism
https://tools.ietf.org/html/rfc5034
RFC 4616 The PLAIN Simple Authentication and Security Layer (SASL) Mechanism
https://tools.ietf.org/html/rfc4616
"""
import os
import re
import time
import base64
import socket
import asyncio
import logging
import hashlib
from .base_handler import BaseHandler
from .exceptions import (
POP3Exception,
BaseCodedException,
AuthFailed,
)
try:
import ssl
from asyncio import sslproto
except ImportError:
_has_ssl = False
else:
_has_ssl = sslproto and hasattr(ssl, 'MemoryBIO')
__version__ = '0.1'
__ident__ = 'Python-aiopop3-{}'.format(__version__)
log = logging.getLogger('server.pop3')
def _quote_periods(bindata):
return re.sub(br'(?m)^\.', b'..', bindata)
class POP3ServerProtocol(asyncio.StreamReaderProtocol):
command_size_limit = 255 # RFC 2449 p.4
def __init__(self, handler,
*,
hostname=None,
tls_context=None,
require_starttls=False,
timeout=600,
loop=None):
self.loop = loop if loop else asyncio.get_event_loop()
reader = asyncio.StreamReader(
loop=self.loop, limit=self.command_size_limit)
super().__init__(reader, loop=self.loop)
assert isinstance(handler, BaseHandler)
self.handler = handler # type: BaseHandler
hostname = hostname or socket.getfqdn()
self.tls_context = tls_context
if tls_context:
self.tls_context.check_hostname = False
self.tls_context.verify_mode = ssl.CERT_NONE
self.require_starttls = tls_context and require_starttls
self._tls_handshake_failed = False
self._tls_protocol = None
self.transport = None
self._handler_coroutine = None
self._mail_box = None
self._messages = None
self._message_ids = None
self._deleted_messages = []
self._read_messages = []
self._auth_passed = False
self._user_name = None # for USER/PASS auth
self._timeout = timeout
self._greeting = '<{}.{}@{}>'.format(
os.getpid(), time.monotonic(), hostname)
self.__ident__ = __ident__
self.auth_mechanizms = ['PLAIN']
self.peer = None
self._over_ssl = False
def connection_made(self, transport):
is_instance = (_has_ssl and
isinstance(transport, sslproto._SSLProtocolTransport))
if self.transport is not None and is_instance: # pragma: nossl
# It is STARTTLS connection over normal connection.
self._stream_reader._transport = transport
self._stream_writer._transport = transport
self.transport = transport
# Why _extra is protected attribute?
extra = self._tls_protocol._extra
auth = self.handler.handle_tls_handshake(
extra['ssl_object'],
extra['peercert'],
extra['cipher'])
self._tls_handshake_failed = not auth
self._over_ssl = True
self._user_name = None
else:
super().connection_made(transport)
# TODO context for auth
self.peer = transport.get_extra_info('peername')
self.transport = transport
log.info('Peer: %s', repr(self.peer))
# Process the client's requests.
self._stream_writer = asyncio.StreamWriter(
transport, self, self._stream_reader, self._loop)
self._handler_coroutine = self.loop.create_task(
self._handle_client())
@asyncio.coroutine
def push(self, msg):
response = bytes(msg + '\r\n', 'utf-8')
self._stream_writer.write(response)
log.debug(msg)
yield from self._stream_writer.drain()
@asyncio.coroutine
def _read_line(self):
line = yield from asyncio.wait_for(
self._stream_reader.readline(),
self._timeout,
loop=self.loop)
if not line:
raise asyncio.IncompleteReadError(line, None)
return line.decode('utf-8').rstrip('\r\n')
@asyncio.coroutine
def _handle_client(self):
log.info('handling connection')
yield from self.push(
'+OK POP3 server ready {}'.format(self._greeting))
while not self._stream_reader.at_eof():
# XXX Put the line limit stuff into the StreamReader?
try:
line = yield from self._read_line()
except TimeoutError:
log.info('Close session by timeout')
self.close()
return
try:
log.info('Data: %r', line)
if not line:
yield from self.push('-ERR bad syntax')
continue
i = line.find(' ')
if i < 0:
command = line.upper()
arg = None
else:
command = line[:i].upper()
arg = line[i+1:].strip()
if (self._tls_handshake_failed
and command not in ['CAPA', 'QUIT']): # pragma: nossl
yield from self.push(
'-ERR Command refused due to lack of security')
continue
if (self.require_starttls
and (not self._tls_protocol)
and (command not in ['STLS', 'CAPA', 'QUIT'])):
# RFC2595 part 2.2
yield from self.push(
'-ERR Must issue a STLS command first')
continue
method = getattr(self, 'pop_' + command, None)
if not method:
yield from self.push(
'-ERR command "%s" not recognized' % command)
continue
yield from method(arg)
except BaseCodedException as error:
yield from self.push('-ERR [{}] {}'.format(
error.code, error.message))
except POP3Exception as error:
yield from self.push('-ERR {}'.format(error.message))
except Exception as error:
yield from self.push('-ERR [SYS/TEMP] ({}) {}'.format(
error.__class__.__name__, str(error)))
log.exception('POP3 session exception')
yield from self.handler.handle_exception(error)
@asyncio.coroutine
def close(self):
# XXX this close is probably not quite right.
if self._stream_writer:
self._stream_writer.close()
@asyncio.coroutine
def commit_transaction(self):
if self._mail_box and self._auth_passed:
nums = self._deleted_messages
if self._mail_box.retention_period == 0:
for i in self._read_messages:
if i not in nums:
nums.append(i)
msgs = [self._messages[i] for i in self._deleted_messages]
yield from self._mail_box.delete_messages(msgs)
yield from self._mail_box.commit()
self._deleted_messages = []
self._read_messages = []
self._messages = None
@asyncio.coroutine
def _load_messages(self):
if not self._auth_passed:
raise POP3Exception('Authorization required')
if self._messages is not None:
return
self._messages = yield from self._mail_box.get_messages()
assert isinstance(self._messages, list)
self._message_ids = {}
for i, message in enumerate(self._messages):
self._message_ids[str(message.message_id)] = i
def _get_message_by_num(self, arg):
try:
arg = int(arg)
except ValueError:
raise POP3Exception('Syntax: Message number must be integer')
if arg > len(self._messages) or arg < 1:
raise POP3Exception('No such message')
if arg in self._deleted_messages:
raise POP3Exception('Message deleted')
return arg - 1, self._messages[arg - 1]
@asyncio.coroutine
def capa_hook(self):
"""Allow subclasses to extend CAPA responses.
This hook is called just before the final, non-continuing "."
response. Subclasses can add additional to declare new capabilities
"""
pass
@asyncio.coroutine
def pop_CAPA(self, arg):
yield from self.push('+OK Capability list follows')
if self.tls_context and not self._tls_protocol:
yield from self.push('STLS')
auth = not self._auth_passed
if self._tls_protocol and self._tls_handshake_failed:
auth = False
if self.require_starttls and not self._tls_protocol:
auth = False
if auth:
yield from self.push('USER')
if self.auth_mechanizms:
yield from self.push('SASL {}'.format(
' '.join(self.auth_mechanizms)))
if self._auth_passed:
yield from self.push('TOP')
yield from self.push('UIDL')
retention_period = self._mail_box.retention_period
if retention_period is None:
retention_period = 'NEVER'
yield from self.push('EXPIRE {}'.format(retention_period))
yield from self.push('LOGIN-DELAY {}'.format(
self._mail_box.login_delay))
else:
yield from self.push('EXPIRE {} USER'.format(
self.handler.retention_period))
yield from self.push('LOGIN-DELAY {}'.format(
self.handler.login_delay))
# TODO Not really capable in sending responses, but must work
yield from self.push('RESP-CODES')
yield from self.push('AUTH-RESP-CODE')
yield from self.push('PIPELINING')
if self.__ident__:
yield from self.push('IMPLEMENTATION {}'.format(self.__ident__))
yield from self.capa_hook()
yield from self.push('.')
@asyncio.coroutine
def pop_APOP(self, arg):
if not arg or ' ' not in arg:
raise POP3Exception('Syntax: APOP <user_name> <password_hash>')
if self._auth_passed:
raise POP3Exception('Already authenticated')
user_name, user_hash = arg.split(' ', maxsplit=1)
mail_box = yield from self.handler.handle_user(user_name)
if not mail_box:
raise AuthFailed()
try:
password = yield from mail_box.get_password() # type: str
digest = bytes(self._greeting + password, encoding='utf-8')
digest_str = hashlib.md5(digest).hexdigest()
if user_hash != digest_str:
raise AuthFailed()
except Exception:
yield from mail_box.rollback()
raise
self._mail_box = mail_box
self._auth_passed = True
yield from self.push('+OK maildrop locked and ready')
@asyncio.coroutine
def pop_USER(self, arg):
if not arg:
raise POP3Exception('Syntax: USER <name>')
self._user_name = arg
yield from self.push('+OK name is a valid mailbox')
@asyncio.coroutine
def pop_PASS(self, arg):
if not arg:
raise POP3Exception('Syntax: PASS <password>')
if self._user_name is None:
raise POP3Exception('USER command first')
if self._auth_passed:
raise POP3Exception('Already authenticated')
mail_box = yield from self.handler.handle_user(self._user_name)
if not mail_box:
raise AuthFailed()
try:
yield from mail_box.check_password(arg)
except Exception:
yield from mail_box.rollback()
raise
self._mail_box = mail_box
self._auth_passed = True
yield from self.push('+OK maildrop locked and ready')
@asyncio.coroutine
def pop_DELE(self, arg):
if not self._auth_passed:
raise POP3Exception('Authorization required')
if not arg:
raise POP3Exception('Syntax: DELE <message_id>')
yield from self._load_messages()
arg, _ = self._get_message_by_num(arg)
self._deleted_messages.append(arg)
yield from self.push('+OK message deleted')
def _get_stat(self):
count = 0
size = 0
for i, message in enumerate(self._messages):
if i not in self._deleted_messages:
count += 1
size += message.size
return count, size
@asyncio.coroutine
def pop_LIST(self, arg):
yield from self._load_messages()
if arg:
arg, message = self._get_message_by_num(arg)
yield from self.push('+OK {} ({} octets)'.format(
arg + 1, message.size))
else:
count, size = self._get_stat()
yield from self.push(
'+OK {} messages ({} octets)'.format(count, size))
for i, message in enumerate(self._messages):
if i not in self._deleted_messages:
yield from self.push('{} {}'.format(i + 1, message.size))
yield from self.push('.')
@asyncio.coroutine
def pop_NOOP(self, arg):
if arg:
raise POP3Exception('Syntax: NOOP')
yield from self.push('+OK')
@asyncio.coroutine
def pop_RSET(self, arg):
if not self._auth_passed:
raise POP3Exception('Authorization required')
yield from self._mail_box.rollback()
self._deleted_messages = []
yield from self.push('+OK')
@asyncio.coroutine
def pop_STAT(self, arg):
if arg:
raise POP3Exception('Syntax: STAT')
yield from self._load_messages()
count, size = self._get_stat()
yield from self.push('+OK {} {}'.format(count, size))
@asyncio.coroutine
def pop_TOP(self, arg):
if not arg or ' ' not in arg:
raise POP3Exception('Syntax: TOP <message_id> <lines_count>')
num, lines_count = arg.split(' ', maxsplit=1)
try:
lines_count = int(lines_count)
except ValueError:
raise POP3Exception('Syntax: Lines count must be integer')
yield from self._load_messages()
arg, message = self._get_message_by_num(num)
data = yield from message.get_data()
in_headers = True
i = 0
for line in data.splitlines(keepends=True):
# Dump the RFC 2822 headers first.
if in_headers:
if not line:
in_headers = False
else:
i += 1
if i > lines_count:
break
self._stream_writer.write(_quote_periods(line))
yield from self._stream_writer.drain()
self._stream_writer.write(b'\r\n.\r\n')
log.info('Message %s (%s) %s first lines send',
arg, message.message_id, lines_count)
yield from self._stream_writer.drain()
@asyncio.coroutine
def pop_RETR(self, arg):
yield from self._load_messages()
arg, message = self._get_message_by_num(arg)
yield from self.push('+OK {} octets'.format(message.size))
data = yield from message.get_data()
self._stream_writer.write(_quote_periods(data))
self._stream_writer.write(b'\r\n.\r\n')
log.info('Message %s (%s) is send', arg, message.message_id)
yield from self._stream_writer.drain()
if arg not in self._read_messages:
self._read_messages.append(arg)
@asyncio.coroutine
def pop_QUIT(self, arg):
if arg:
raise POP3Exception('Syntax: QUIT')
yield from self.commit_transaction()
yield from self.push('+OK Bye')
# To prevent rollback on close
self._auth_passed = False
self._handler_coroutine.cancel()
self.transport.close()
@asyncio.coroutine
def pop_STLS(self, arg): # pragma: nossl
log.info('STARTTLS')
if arg:
raise POP3Exception('Syntax: STARTTLS')
if not (self.tls_context and _has_ssl):
raise POP3Exception('TLS not available')
if self._auth_passed:
# RFC 2595 4
raise POP3Exception(
'Command is only valid in non-authenticated state')
yield from self.push('+OK Begin TLS negotiation')
# Create SSL layer.
self._tls_protocol = sslproto.SSLProtocol(
self.loop,
self,
self.tls_context,
None,
server_side=True)
# Reconfigure transport layer.
socket_transport = self.transport
socket_transport._protocol = self._tls_protocol
# Reconfigure protocol layer. Cant understand why app transport is
# protected property, if it MUST be used externally.
self.transport = self._tls_protocol._app_transport
# Start handshake.
self._tls_protocol.connection_made(socket_transport)
@asyncio.coroutine
def pop_UIDL(self, arg):
yield from self._load_messages()
if arg:
arg, message = self._get_message_by_num(arg)
yield from self.push('+OK {} {}'.format(arg, message.message_id))
else:
yield from self.push('+OK')
for i, message in enumerate(self._messages):
yield from self.push('{} {}'.format(i, message.message_id))
yield from self.push('.')
@asyncio.coroutine
def pop_AUTH(self, arg):
if not arg:
raise POP3Exception('Unrecognized authentication type')
if ' ' in arg:
name, initial = arg.split(' ', maxsplit=1)
else:
name = arg.upper()
initial = None
if name not in self.auth_mechanizms:
raise POP3Exception('Unrecognized authentication type')
if self._auth_passed:
raise POP3Exception('Already authenticated')
method = getattr(self, 'auth_' + name, None)
if not method:
raise POP3Exception('[SYS/PERM] Authentication type not supported')
mail_box = yield from method(initial)
if not mail_box:
raise AuthFailed()
self._auth_passed = True
self._mail_box = mail_box
yield from self.push('+OK {} authentication successful'.format(name))
@asyncio.coroutine
def auth_PLAIN(self, arg):
if not arg:
yield from self.push('+')
arg = yield from self._read_line()
arg = base64.b64decode(arg)
params = arg.split(b'\x00')
_, authcid, passwd = [p.decode('utf-8') for p in params]
mail_box = yield from self.handler.handle_user(authcid)
if not mail_box:
return
try:
yield from mail_box.check_password(passwd)
except Exception:
yield from mail_box.rollback()
raise
return mail_box
| {
"repo_name": "kozzztik/aiopop3",
"path": "aiopop3/server.py",
"copies": "1",
"size": "19834",
"license": "mit",
"hash": 5524269180360024000,
"line_mean": 35.5267034991,
"line_max": 79,
"alpha_frac": 0.5649894121,
"autogenerated": false,
"ratio": 4.182623365668494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 543
} |
""" A Port graph defines a graph whose edges connect
to identified ports on vertices instead of directly to
a given vertex.
"""
from openalea.container.id_generator import IdGenerator
from openalea.container.property_graph import (PropertyGraph,
InvalidVertex,
InvalidEdge)
from openalea.container.graph import GraphError
class InvalidPort(GraphError, KeyError):
""" Exception raised when a wrong port id is provided
"""
pass
class Port(object):
""" Simple structure to maintain some port property.
A port is an entry point to a vertex
"""
def __init__(self, vid, local_pid, is_out_port):
# internal data to access from dataflow
self.vid = vid
self.local_pid = local_pid
self.is_out_port = is_out_port
class PortGraph(PropertyGraph):
""" A Port graph defines a graph whose edges connect
to identified ports on vertices instead of directly to
a given vertex.
"""
def __init__(self):
PropertyGraph.__init__(self)
self._ports = {}
self._pid_generator = IdGenerator()
self.add_edge_property("_source_port")
self.add_edge_property("_target_port")
self.add_vertex_property("_ports")
self.add_vertex_property("_actor")
####################################################
#
# edge port view
#
####################################################
def source_port(self, eid):
""" Out port of the source vertex of the edge.
args:
- eid (eid): id of edge
return:
- (pid): id of port
"""
return self.edge_property("_source_port")[eid]
def target_port(self, eid):
""" In port of the target vertex of the edge.
args:
- eid (eid): id of edge
return:
- (pid): id of port
"""
return self.edge_property("_target_port")[eid]
####################################################
#
# vertex port view
#
####################################################
def ports(self, vid=None):
""" Iterates on all ports.
If vid is None, iterates on all ports
in the dataflow.
Else, iterates only on the ports of
the given vertex.
args:
- vid (vid): id of vertex
return:
- (iter of pid)
"""
if vid is None:
return iter(self._ports)
else:
try:
ports = self.vertex_property("_ports")[vid]
return iter(ports)
except KeyError:
raise InvalidVertex("vertex %d does not exist" % vid)
def in_ports(self, vid=None):
""" Iterates on all in ports.
If vid is None, iterates on all in ports
in the dataflow.
Else, iterates only on the in ports of
the given vertex.
args:
- vid (vid): id of vertex
return:
- (iter of pid)
"""
for pid in self.ports(vid):
if self.is_in_port(pid):
yield pid
def out_ports(self, vid=None):
""" Iterates on all out ports.
If vid is None, iterates on all out ports
in the dataflow.
Else, iterates only on the out ports of
the given vertex.
args:
- vid (vid): id of vertex
return:
- (iter of pid)
"""
for pid in self.ports(vid):
if self.is_out_port(pid):
yield pid
####################################################
#
# port view
#
####################################################
def is_in_port(self, pid):
""" Test whether a port is an input for its vertex.
args:
- pid (pid): id of port to consider
return:
- (bool)
"""
try:
return not self._ports[pid].is_out_port
except KeyError:
raise InvalidPort("port %s does not exist" % pid)
def is_out_port(self, pid):
""" Test whether a port is an output for its vertex.
args:
- pid (pid): id of port to consider
return:
- (bool)
"""
try:
return self._ports[pid].is_out_port
except KeyError:
raise InvalidPort("port %s does not exist" % pid)
def vertex(self, pid):
""" Find id of the vertex who own the port.
args:
- pid (pid): id of port to consider
return:
- (vid)
"""
try:
return self._ports[pid].vid
except KeyError:
raise InvalidPort("port %d does not exist" % pid)
def connected_edges(self, pid):
""" Iterate on all edges connected to this port.
args:
- pid (pid): id of port to consider
return:
- (iter of eid)
"""
vid = self.vertex(pid)
if self.is_out_port(pid):
for eid in self.out_edges(vid):
if self.source_port(eid) == pid:
yield eid
else:
for eid in self.in_edges(vid):
if self.target_port(eid) == pid:
yield eid
def connected_ports(self, pid):
""" Iterate on all ports connected to this port.
args:
- pid (pid): id of port to consider
return:
- (iter of pid)
"""
if self.is_out_port(pid):
for eid in self.connected_edges(pid):
yield self.target_port(eid)
else:
for eid in self.connected_edges(pid):
yield self.source_port(eid)
def nb_connections(self, pid):
""" Compute number of edges connected to a given port.
args:
- pid (pid): id of port
return:
- (int)
"""
return len(tuple(self.connected_edges(pid)))
####################################################
#
# local port concept
#
####################################################
def local_id(self, pid):
""" Find local id of a port.
args:
- pid (pid): id of port
return:
- (local pid)
"""
try:
return self._ports[pid].local_pid
except KeyError:
raise InvalidPort("port %s does not exist" % pid)
def in_port(self, vid, local_pid):
""" Find global port id of a given input port.
args:
- vid (vid): id of vertex who own the port
- local_pid (pid): local id of the port
return:
- (pid)
"""
for pid in self.in_ports(vid):
if self._ports[pid].local_pid == local_pid:
return pid
msg = "local pid '%s' does not exist for vertex %d" % (local_pid, vid)
raise InvalidPort(msg)
def out_port(self, vid, local_pid):
""" Find global port id of a given output port.
args:
- vid (vid): id of vertex who own the port
- local_pid (pid): local id of the port
return:
- (pid)
"""
for pid in self.out_ports(vid):
if self._ports[pid].local_pid == local_pid:
return pid
msg = "local pid '%s' does not exist for vertex %d" % (local_pid, vid)
raise InvalidPort(msg)
#####################################################
#
# associated actor
#
#####################################################
def actor(self, vid):
""" Return actor associated to a given vertex.
return:
- (IActor)
"""
try:
return self.vertex_property("_actor")[vid]
except KeyError:
raise InvalidVertex("vertex %s does not exist" % vid)
def set_actor(self, vid, actor):
""" Associate an actor to a given vertex.
args:
- vid (vid): id of vertex
- actor (IActor): a function like type of object
"""
if vid not in self:
raise InvalidVertex("vertex %d does not exist" % vid)
if actor is not None:
# test actor inputs vs vertex in ports
inputs = set(actor.inputs())
inports = set(self.local_id(pid) for pid in self.in_ports(vid))
if inputs != inports:
msg = "Ports of vertex and inputs of node do not match"
raise InvalidPort(msg)
outputs = set(actor.outputs())
outports = set(self.local_id(pid) for pid in self.out_ports(vid))
if outputs != outports:
msg = "Ports of vertex and outputs of node do not match"
raise InvalidPort(msg)
self.vertex_property("_actor")[vid] = actor
# TODO: one day update this function to accept already existing
# vertices with no actor and create only relevant ports
def add_actor(self, actor, vid=None):
""" Create a vertex and the corresponding ports
and associate it with the given actor.
args:
- actor (IActor): a function like type of object
- vid (vid): id of vertex to use. If None one will
be created.
return:
- vid (vid): id of the vertex that was created
"""
vid = self.add_vertex(vid)
try:
for key in actor.inputs():
self.add_in_port(vid, key)
for key in actor.outputs():
self.add_out_port(vid, key)
self.set_actor(vid, actor)
except AttributeError:
self.remove_vertex(vid)
raise
return vid
#####################################################
#
# mutable concept
#
#####################################################
def add_in_port(self, vid, local_pid, pid=None):
""" Add a new input port to a vertex.
args:
- vid (vid): id of vertex who will own the port.
- local_pid (pid): local identifier for the port.
- pdi (pid): global pid for the port. If None
a new one will be created
return:
- pid (pid): global id of the created port.
"""
if vid not in self:
raise InvalidVertex("vertex %d does not exists" % vid)
for tpid in self.in_ports(vid):
if self.local_id(tpid) == local_pid:
msg = "port %s already exists for this vertex" % local_pid
raise InvalidPort(msg)
pid = self._pid_generator.get_id(pid)
self._ports[pid] = Port(vid, local_pid, False)
self.vertex_property("_ports")[vid].add(pid)
return pid
def add_out_port(self, vid, local_pid, pid=None):
""" Add a new output port to a vertex.
args:
- vid (vid): id of vertex who will own the port.
- local_pid (pid): local identifier for the port.
- pdi (pid): global pid for the port. If None
a new one will be created
return:
- pid (pid): global id of the created port.
"""
if vid not in self:
raise InvalidVertex("vertex %d does not exists" % vid)
for tpid in self.out_ports(vid):
if self.local_id(tpid) == local_pid:
msg = "port %s already exists for this vertex" % local_pid
raise InvalidPort(msg)
pid = self._pid_generator.get_id(pid)
self._ports[pid] = Port(vid, local_pid, True)
self.vertex_property("_ports")[vid].add(pid)
return pid
def remove_port(self, pid):
""" Remove a port and all connections
attached to this port.
args:
- pid (pid): global id of port to remove
"""
for eid in list(self.connected_edges(pid)):
self.remove_edge(eid)
self.vertex_property("_ports")[self.vertex(pid)].remove(pid)
self._pid_generator.release_id(pid)
del self._ports[pid]
def add_edge(self, edge=None, eid=None):
""" Usage of this method is forbidden
"""
raise UserWarning("Call connect instead")
# TODO: add tests to prevent connections on same vertex?
# TODO: add tests to prevent duplicating connection?
def connect(self, source_pid, target_pid, eid=None):
""" Connect two ports together.
Connection can only be created between and output port
and an input port.
args:
- source_pid (pid): global id of output port.
- target_pid (pid): global if of input port.
- eid (eid): edge id to use. If None, a new one
will be assigned.
return:
- eid (eid): id of edge used to make the connection.
"""
if not self.is_out_port(source_pid):
msg = "source_pid %s is not an output port" % str(source_pid)
raise InvalidPort(msg)
if not self.is_in_port(target_pid):
msg = "target_pid %s is not an input port" % str(target_pid)
raise InvalidPort(msg)
eid = PropertyGraph.add_edge(self,
self.vertex(source_pid),
self.vertex(target_pid),
eid)
self.edge_property("_source_port")[eid] = source_pid
self.edge_property("_target_port")[eid] = target_pid
return eid
def add_vertex(self, vid=None):
vid = PropertyGraph.add_vertex(self, vid)
self.vertex_property("_ports")[vid] = set()
self.set_actor(vid, None)
return vid
add_vertex.__doc__ = PropertyGraph.add_vertex.__doc__
def remove_vertex(self, vid):
for pid in list(self.ports(vid)):
self.remove_port(pid)
PropertyGraph.remove_vertex(self, vid)
remove_vertex.__doc__ = PropertyGraph.remove_vertex.__doc__
def clear(self):
self._ports.clear()
self._pid_generator = IdGenerator()
PropertyGraph.clear(self)
clear.__doc__ = PropertyGraph.clear.__doc__
| {
"repo_name": "revesansparole/oaworkflow",
"path": "src/openalea/workflow/port_graph.py",
"copies": "1",
"size": "14371",
"license": "mit",
"hash": -1720437441624226000,
"line_mean": 28.2688391039,
"line_max": 78,
"alpha_frac": 0.5000347923,
"autogenerated": false,
"ratio": 4.356168535919975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00006571580141744745,
"num_lines": 491
} |
# A port of https://github.com/phoboslab/jsmpeg-vnc/blob/master/source/grabber.c to python
# License information (GPLv3) is here https://github.com/phoboslab/jsmpeg-vnc/blob/master/README.md
from ctypes import Structure, c_int, POINTER, WINFUNCTYPE, windll, WinError, sizeof, c_wchar_p
from ctypes.wintypes import BOOL, HWND, RECT, HDC, HBITMAP, HGDIOBJ, DWORD, LONG, WORD, UINT, LPVOID, LPSTR
import numpy as np
SRCCOPY = 0x00CC0020
CAPTUREBLT = 0x40000000
DIB_RGB_COLORS = 0
BI_RGB = 0
class BITMAPINFOHEADER(Structure):
_fields_ = [('biSize', DWORD),
('biWidth', LONG),
('biHeight', LONG),
('biPlanes', WORD),
('biBitCount', WORD),
('biCompression', DWORD),
('biSizeImage', DWORD),
('biXPelsPerMeter', LONG),
('biYPelsPerMeter', LONG),
('biClrUsed', DWORD),
('biClrImportant', DWORD)]
def err_on_zero_or_null_check(result, func, args):
if not result:
raise WinError()
return args
def quick_win_define(name, output, *args, **kwargs):
dllname, fname = name.split('.')
params = kwargs.get('params', None)
if params:
params = tuple([(x, ) for x in params])
func = (WINFUNCTYPE(output, *args))((fname, getattr(windll, dllname)), params)
err = kwargs.get('err', err_on_zero_or_null_check)
if err:
func.errcheck = err
return func
GetClientRect = quick_win_define('user32.GetClientRect', BOOL, HWND, POINTER(RECT), params=(1, 2))
GetDC = quick_win_define('user32.GetDC', HDC, HWND)
CreateCompatibleDC = quick_win_define('gdi32.CreateCompatibleDC', HDC, HDC)
CreateCompatibleBitmap = quick_win_define('gdi32.CreateCompatibleBitmap', HBITMAP, HDC, c_int, c_int)
ReleaseDC = quick_win_define('user32.ReleaseDC', c_int, HWND, HDC)
DeleteDC = quick_win_define('gdi32.DeleteDC', BOOL, HDC)
DeleteObject = quick_win_define('gdi32.DeleteObject', BOOL, HGDIOBJ)
SelectObject = quick_win_define('gdi32.SelectObject', HGDIOBJ, HDC, HGDIOBJ)
BitBlt = quick_win_define('gdi32.BitBlt', BOOL, HDC, c_int, c_int, c_int, c_int, HDC, c_int, c_int, DWORD)
GetDIBits = quick_win_define('gdi32.GetDIBits', c_int, HDC, HBITMAP, UINT, UINT, LPVOID, POINTER(BITMAPINFOHEADER), UINT)
GetDesktopWindow = quick_win_define('user32.GetDesktopWindow', HWND)
GetWindowRect = quick_win_define('user32.GetWindowRect', BOOL, HWND, POINTER(RECT), params=(1, 2))
FindWindow = quick_win_define('user32.FindWindowW', HWND, c_wchar_p, c_wchar_p)
class Grabber(object):
def __init__(self, window_title, with_alpha=False, bbox=None):
hwnd = FindWindow(None, window_title)
print(hwnd)
window = GetDesktopWindow()
self.window = window
rect = GetWindowRect(hwnd)
#for project cars settings #note in game resolution
#Make sure in display settings the "change the size of text, apps and other items" is 100% -Windows 10
#Note I'm playing the game in windowed mode at 1920 x 1080
self.width = 1920
self.height = 1080
#Offset, May need to adjust these settings
self.x = rect.left + 8 #+ 10
self.y = rect.top + 31 #+ 45
print('w:{} h:{}'.format(self.width, self.height))
# if bbox:
# bbox = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]
# if not bbox[2] or not bbox[3]:
# bbox[2] = self.width - bbox[0]
# bbox[3] = self.height - bbox[1]
# self.x, self.y, self.width, self.height = bbox
# else:
# self.x = 0
# self.y = 0
self.windowDC = GetDC(window)
self.memoryDC = CreateCompatibleDC(self.windowDC)
self.bitmap = CreateCompatibleBitmap(self.windowDC, self.width, self.height)
self.bitmapInfo = BITMAPINFOHEADER()
self.bitmapInfo.biSize = sizeof(BITMAPINFOHEADER)
self.bitmapInfo.biPlanes = 1
self.bitmapInfo.biBitCount = 32 if with_alpha else 24
self.bitmapInfo.biWidth = self.width
self.bitmapInfo.biHeight = -self.height
self.bitmapInfo.biCompression = BI_RGB
self.bitmapInfo.biSizeImage = 0
self.channels = 4 if with_alpha else 3
self.closed = False
def __del__(self):
try:
self.close()
except:
pass
def close(self):
if self.closed:
return
ReleaseDC(self.window, self.windowDC)
DeleteDC(self.memoryDC)
DeleteObject(self.bitmap)
self.closed = True
def grab(self, output=None):
if self.closed:
raise ValueError('Grabber already closed')
if output is None:
output = np.empty((self.height, self.width, self.channels), dtype='uint8')
else:
if output.shape != (self.height, self.width, self.channels):
raise ValueError('Invalid output dimentions')
SelectObject(self.memoryDC, self.bitmap)
BitBlt(self.memoryDC, 0, 0, self.width, self.height, self.windowDC, self.x, self.y, SRCCOPY)
GetDIBits(self.memoryDC, self.bitmap, 0, self.height, output.ctypes.data, self.bitmapInfo, DIB_RGB_COLORS)
return output
# if __name__ == "__main__":
# import cv2
# import ctypes
# import time
# from datetime import datetime
# folder_name = 'F:/Project_Cars_Data/Raw2'
# time.sleep(5)
# #from PIL import ImageGrab
# #bbox=(0, 40, 800, 600)
# handle = ctypes.windll.user32.GetForegroundWindow()
# print(handle)
# grabber = Grabber(window=handle)
# for i in range(10):
# pic = grabber.grab()
# #gray_image = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY)
# #gray_image = cv2.resize(gray_image, (160,120))
# save_file_name = datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f')
# cv2.imwrite(folder_name + '/' + save_file_name + '-image.png', pic)
# #gray_image = None
# #pic = None
# #time.sleep(0.2)
# #cv2.imshow("image", pic)
# #cv2.waitKey()
# #time.sleep(3)
# #s = time.clock()
# #for i in range(10):
# # pic = np.array(ImageGrab.grab(bbox=(0, 40, 800, 640)))
# #e = time.clock()
# #print (e - s) / 10
# #cv2.imwrite('b.tif', pic)
# #import ctypes
# #import time
# #time.sleep(2)
# #handle = ctypes.windll.user32.GetForegroundWindow()
# #print(handle) | {
"repo_name": "ThisIsSoSteve/Project-Tensorflow-Cars",
"path": "common/grabber.py",
"copies": "1",
"size": "6439",
"license": "mit",
"hash": -5624067041611167000,
"line_mean": 35.1797752809,
"line_max": 121,
"alpha_frac": 0.6115856499,
"autogenerated": false,
"ratio": 3.184470820969337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.927428773740867,
"avg_score": 0.004353746692133509,
"num_lines": 178
} |
# A port of https://github.com/python/cpython/blob/e42b7051/Lib/heapq.py
from __future__ import print_function, absolute_import, division
import heapq as hq
from numba import types
from numba.errors import TypingError
from numba.extending import overload, register_jitable
@register_jitable
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
@register_jitable
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
childpos = 2 * pos + 1
while childpos < endpos:
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
heap[pos] = heap[childpos]
pos = childpos
childpos = 2 * pos + 1
heap[pos] = newitem
_siftdown(heap, startpos, pos)
@register_jitable
def _siftdown_max(heap, startpos, pos):
newitem = heap[pos]
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
@register_jitable
def _siftup_max(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
childpos = 2 * pos + 1
while childpos < endpos:
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
heap[pos] = heap[childpos]
pos = childpos
childpos = 2 * pos + 1
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
@register_jitable
def reversed_range(x):
# analogous to reversed(range(x))
return range(x - 1, -1, -1)
@register_jitable
def _heapify_max(x):
n = len(x)
for i in reversed_range(n // 2):
_siftup_max(x, i)
@register_jitable
def _heapreplace_max(heap, item):
returnitem = heap[0]
heap[0] = item
_siftup_max(heap, 0)
return returnitem
def assert_heap_type(heap):
if not isinstance(heap, types.List):
raise TypingError('heap argument must be a list')
dt = heap.dtype
if isinstance(dt, types.Complex):
msg = ("'<' not supported between instances "
"of 'complex' and 'complex'")
raise TypingError(msg)
def assert_item_type_consistent_with_heap_type(heap, item):
if not heap.dtype == item:
raise TypingError('heap type must be the same as item type')
@overload(hq.heapify)
def hq_heapify(x):
assert_heap_type(x)
def hq_heapify_impl(x):
n = len(x)
for i in reversed_range(n // 2):
_siftup(x, i)
return hq_heapify_impl
@overload(hq.heappop)
def hq_heappop(heap):
assert_heap_type(heap)
def hq_heappop_impl(heap):
lastelt = heap.pop()
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
return returnitem
return lastelt
return hq_heappop_impl
@overload(hq.heappush)
def heappush(heap, item):
assert_heap_type(heap)
assert_item_type_consistent_with_heap_type(heap, item)
def hq_heappush_impl(heap, item):
heap.append(item)
_siftdown(heap, 0, len(heap) - 1)
return hq_heappush_impl
@overload(hq.heapreplace)
def heapreplace(heap, item):
assert_heap_type(heap)
assert_item_type_consistent_with_heap_type(heap, item)
def hq_heapreplace(heap, item):
returnitem = heap[0]
heap[0] = item
_siftup(heap, 0)
return returnitem
return hq_heapreplace
@overload(hq.heappushpop)
def heappushpop(heap, item):
assert_heap_type(heap)
assert_item_type_consistent_with_heap_type(heap, item)
def hq_heappushpop_impl(heap, item):
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
return hq_heappushpop_impl
def check_input_types(n, iterable):
if not isinstance(n, (types.Integer, types.Boolean)):
raise TypingError("First argument 'n' must be an integer")
# heapq also accepts 1.0 (but not 0.0, 2.0, 3.0...) but
# this isn't replicated
if not isinstance(iterable, (types.Sequence, types.Array)):
raise TypingError("Second argument 'iterable' must be iterable")
@overload(hq.nsmallest)
def nsmallest(n, iterable):
check_input_types(n, iterable)
def hq_nsmallest_impl(n, iterable):
if n == 0:
return [iterable[0] for _ in range(0)]
elif n == 1:
out = min(iterable)
return [out]
size = len(iterable)
if n >= size:
return sorted(iterable)[:n]
it = iter(iterable)
result = [(elem, i) for i, elem in zip(range(n), it)]
_heapify_max(result)
top = result[0][0]
order = n
for elem in it:
if elem < top:
_heapreplace_max(result, (elem, order))
top, _order = result[0]
order += 1
result.sort()
return [elem for (elem, order) in result]
return hq_nsmallest_impl
@overload(hq.nlargest)
def nlargest(n, iterable):
check_input_types(n, iterable)
def hq_nlargest_impl(n, iterable):
if n == 0:
return [iterable[0] for _ in range(0)]
elif n == 1:
out = max(iterable)
return [out]
size = len(iterable)
if n >= size:
return sorted(iterable)[::-1][:n]
it = iter(iterable)
result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)]
hq.heapify(result)
top = result[0][0]
order = -n
for elem in it:
if top < elem:
hq.heapreplace(result, (elem, order))
top, _order = result[0]
order -= 1
result.sort(reverse=True)
return [elem for (elem, order) in result]
return hq_nlargest_impl
| {
"repo_name": "jriehl/numba",
"path": "numba/targets/heapq.py",
"copies": "1",
"size": "6201",
"license": "bsd-2-clause",
"hash": 2174175737967230500,
"line_mean": 22.2247191011,
"line_max": 72,
"alpha_frac": 0.5786163522,
"autogenerated": false,
"ratio": 3.3338709677419356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9412487319941936,
"avg_score": 0,
"num_lines": 267
} |
# A port of https://github.com/python/cpython/blob/e42b7051/Lib/heapq.py
import heapq as hq
from numba.core import types
from numba.core.errors import TypingError
from numba.core.extending import overload, register_jitable
@register_jitable
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
@register_jitable
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
childpos = 2 * pos + 1
while childpos < endpos:
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
heap[pos] = heap[childpos]
pos = childpos
childpos = 2 * pos + 1
heap[pos] = newitem
_siftdown(heap, startpos, pos)
@register_jitable
def _siftdown_max(heap, startpos, pos):
newitem = heap[pos]
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
@register_jitable
def _siftup_max(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
childpos = 2 * pos + 1
while childpos < endpos:
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
heap[pos] = heap[childpos]
pos = childpos
childpos = 2 * pos + 1
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
@register_jitable
def reversed_range(x):
# analogous to reversed(range(x))
return range(x - 1, -1, -1)
@register_jitable
def _heapify_max(x):
n = len(x)
for i in reversed_range(n // 2):
_siftup_max(x, i)
@register_jitable
def _heapreplace_max(heap, item):
returnitem = heap[0]
heap[0] = item
_siftup_max(heap, 0)
return returnitem
def assert_heap_type(heap):
if not isinstance(heap, types.List):
raise TypingError('heap argument must be a list')
dt = heap.dtype
if isinstance(dt, types.Complex):
msg = ("'<' not supported between instances "
"of 'complex' and 'complex'")
raise TypingError(msg)
def assert_item_type_consistent_with_heap_type(heap, item):
if not heap.dtype == item:
raise TypingError('heap type must be the same as item type')
@overload(hq.heapify)
def hq_heapify(x):
assert_heap_type(x)
def hq_heapify_impl(x):
n = len(x)
for i in reversed_range(n // 2):
_siftup(x, i)
return hq_heapify_impl
@overload(hq.heappop)
def hq_heappop(heap):
assert_heap_type(heap)
def hq_heappop_impl(heap):
lastelt = heap.pop()
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
return returnitem
return lastelt
return hq_heappop_impl
@overload(hq.heappush)
def heappush(heap, item):
assert_heap_type(heap)
assert_item_type_consistent_with_heap_type(heap, item)
def hq_heappush_impl(heap, item):
heap.append(item)
_siftdown(heap, 0, len(heap) - 1)
return hq_heappush_impl
@overload(hq.heapreplace)
def heapreplace(heap, item):
assert_heap_type(heap)
assert_item_type_consistent_with_heap_type(heap, item)
def hq_heapreplace(heap, item):
returnitem = heap[0]
heap[0] = item
_siftup(heap, 0)
return returnitem
return hq_heapreplace
@overload(hq.heappushpop)
def heappushpop(heap, item):
assert_heap_type(heap)
assert_item_type_consistent_with_heap_type(heap, item)
def hq_heappushpop_impl(heap, item):
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
return hq_heappushpop_impl
def check_input_types(n, iterable):
if not isinstance(n, (types.Integer, types.Boolean)):
raise TypingError("First argument 'n' must be an integer")
# heapq also accepts 1.0 (but not 0.0, 2.0, 3.0...) but
# this isn't replicated
if not isinstance(iterable, (types.Sequence, types.Array)):
raise TypingError("Second argument 'iterable' must be iterable")
@overload(hq.nsmallest)
def nsmallest(n, iterable):
check_input_types(n, iterable)
def hq_nsmallest_impl(n, iterable):
if n == 0:
return [iterable[0] for _ in range(0)]
elif n == 1:
out = min(iterable)
return [out]
size = len(iterable)
if n >= size:
return sorted(iterable)[:n]
it = iter(iterable)
result = [(elem, i) for i, elem in zip(range(n), it)]
_heapify_max(result)
top = result[0][0]
order = n
for elem in it:
if elem < top:
_heapreplace_max(result, (elem, order))
top, _order = result[0]
order += 1
result.sort()
return [elem for (elem, order) in result]
return hq_nsmallest_impl
@overload(hq.nlargest)
def nlargest(n, iterable):
check_input_types(n, iterable)
def hq_nlargest_impl(n, iterable):
if n == 0:
return [iterable[0] for _ in range(0)]
elif n == 1:
out = max(iterable)
return [out]
size = len(iterable)
if n >= size:
return sorted(iterable)[::-1][:n]
it = iter(iterable)
result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)]
hq.heapify(result)
top = result[0][0]
order = -n
for elem in it:
if top < elem:
hq.heapreplace(result, (elem, order))
top, _order = result[0]
order -= 1
result.sort(reverse=True)
return [elem for (elem, order) in result]
return hq_nlargest_impl
| {
"repo_name": "gmarkall/numba",
"path": "numba/cpython/heapq.py",
"copies": "7",
"size": "6151",
"license": "bsd-2-clause",
"hash": -4939198295316169000,
"line_mean": 22.1240601504,
"line_max": 72,
"alpha_frac": 0.576979353,
"autogenerated": false,
"ratio": 3.324864864864865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7401844217864865,
"avg_score": null,
"num_lines": null
} |
# A port of SeeedStudio's Library for the 96x96 OLED Display
# http://www.seeedstudio.com/depot/Grove-OLED-Display-096-p-824.html
# http://www.seeedstudio.com/wiki/Grove_-_OLED_Display_96*96
# https://github.com/Seeed-Studio/OLED_Display_96X96
import pyb
import time
from pyb import I2C
i2c = I2C(1, I2C.MASTER)
i2c.init(I2C.MASTER, baudrate=400000)
BasicFont = [
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],
[0x00,0x00,0x5F,0x00,0x00,0x00,0x00,0x00],
[0x00,0x00,0x07,0x00,0x07,0x00,0x00,0x00],
[0x00,0x14,0x7F,0x14,0x7F,0x14,0x00,0x00],
[0x00,0x24,0x2A,0x7F,0x2A,0x12,0x00,0x00],
[0x00,0x23,0x13,0x08,0x64,0x62,0x00,0x00],
[0x00,0x36,0x49,0x55,0x22,0x50,0x00,0x00],
[0x00,0x00,0x05,0x03,0x00,0x00,0x00,0x00],
[0x00,0x1C,0x22,0x41,0x00,0x00,0x00,0x00],
[0x00,0x41,0x22,0x1C,0x00,0x00,0x00,0x00],
[0x00,0x08,0x2A,0x1C,0x2A,0x08,0x00,0x00],
[0x00,0x08,0x08,0x3E,0x08,0x08,0x00,0x00],
[0x00,0xA0,0x60,0x00,0x00,0x00,0x00,0x00],
[0x00,0x08,0x08,0x08,0x08,0x08,0x00,0x00],
[0x00,0x60,0x60,0x00,0x00,0x00,0x00,0x00],
[0x00,0x20,0x10,0x08,0x04,0x02,0x00,0x00],
[0x00,0x3E,0x51,0x49,0x45,0x3E,0x00,0x00],
[0x00,0x00,0x42,0x7F,0x40,0x00,0x00,0x00],
[0x00,0x62,0x51,0x49,0x49,0x46,0x00,0x00],
[0x00,0x22,0x41,0x49,0x49,0x36,0x00,0x00],
[0x00,0x18,0x14,0x12,0x7F,0x10,0x00,0x00],
[0x00,0x27,0x45,0x45,0x45,0x39,0x00,0x00],
[0x00,0x3C,0x4A,0x49,0x49,0x30,0x00,0x00],
[0x00,0x01,0x71,0x09,0x05,0x03,0x00,0x00],
[0x00,0x36,0x49,0x49,0x49,0x36,0x00,0x00],
[0x00,0x06,0x49,0x49,0x29,0x1E,0x00,0x00],
[0x00,0x00,0x36,0x36,0x00,0x00,0x00,0x00],
[0x00,0x00,0xAC,0x6C,0x00,0x00,0x00,0x00],
[0x00,0x08,0x14,0x22,0x41,0x00,0x00,0x00],
[0x00,0x14,0x14,0x14,0x14,0x14,0x00,0x00],
[0x00,0x41,0x22,0x14,0x08,0x00,0x00,0x00],
[0x00,0x02,0x01,0x51,0x09,0x06,0x00,0x00],
[0x00,0x32,0x49,0x79,0x41,0x3E,0x00,0x00],
[0x00,0x7E,0x09,0x09,0x09,0x7E,0x00,0x00],
[0x00,0x7F,0x49,0x49,0x49,0x36,0x00,0x00],
[0x00,0x3E,0x41,0x41,0x41,0x22,0x00,0x00],
[0x00,0x7F,0x41,0x41,0x22,0x1C,0x00,0x00],
[0x00,0x7F,0x49,0x49,0x49,0x41,0x00,0x00],
[0x00,0x7F,0x09,0x09,0x09,0x01,0x00,0x00],
[0x00,0x3E,0x41,0x41,0x51,0x72,0x00,0x00],
[0x00,0x7F,0x08,0x08,0x08,0x7F,0x00,0x00],
[0x00,0x41,0x7F,0x41,0x00,0x00,0x00,0x00],
[0x00,0x20,0x40,0x41,0x3F,0x01,0x00,0x00],
[0x00,0x7F,0x08,0x14,0x22,0x41,0x00,0x00],
[0x00,0x7F,0x40,0x40,0x40,0x40,0x00,0x00],
[0x00,0x7F,0x02,0x0C,0x02,0x7F,0x00,0x00],
[0x00,0x7F,0x04,0x08,0x10,0x7F,0x00,0x00],
[0x00,0x3E,0x41,0x41,0x41,0x3E,0x00,0x00],
[0x00,0x7F,0x09,0x09,0x09,0x06,0x00,0x00],
[0x00,0x3E,0x41,0x51,0x21,0x5E,0x00,0x00],
[0x00,0x7F,0x09,0x19,0x29,0x46,0x00,0x00],
[0x00,0x26,0x49,0x49,0x49,0x32,0x00,0x00],
[0x00,0x01,0x01,0x7F,0x01,0x01,0x00,0x00],
[0x00,0x3F,0x40,0x40,0x40,0x3F,0x00,0x00],
[0x00,0x1F,0x20,0x40,0x20,0x1F,0x00,0x00],
[0x00,0x3F,0x40,0x38,0x40,0x3F,0x00,0x00],
[0x00,0x63,0x14,0x08,0x14,0x63,0x00,0x00],
[0x00,0x03,0x04,0x78,0x04,0x03,0x00,0x00],
[0x00,0x61,0x51,0x49,0x45,0x43,0x00,0x00],
[0x00,0x7F,0x41,0x41,0x00,0x00,0x00,0x00],
[0x00,0x02,0x04,0x08,0x10,0x20,0x00,0x00],
[0x00,0x41,0x41,0x7F,0x00,0x00,0x00,0x00],
[0x00,0x04,0x02,0x01,0x02,0x04,0x00,0x00],
[0x00,0x80,0x80,0x80,0x80,0x80,0x00,0x00],
[0x00,0x01,0x02,0x04,0x00,0x00,0x00,0x00],
[0x00,0x20,0x54,0x54,0x54,0x78,0x00,0x00],
[0x00,0x7F,0x48,0x44,0x44,0x38,0x00,0x00],
[0x00,0x38,0x44,0x44,0x28,0x00,0x00,0x00],
[0x00,0x38,0x44,0x44,0x48,0x7F,0x00,0x00],
[0x00,0x38,0x54,0x54,0x54,0x18,0x00,0x00],
[0x00,0x08,0x7E,0x09,0x02,0x00,0x00,0x00],
[0x00,0x18,0xA4,0xA4,0xA4,0x7C,0x00,0x00],
[0x00,0x7F,0x08,0x04,0x04,0x78,0x00,0x00],
[0x00,0x00,0x7D,0x00,0x00,0x00,0x00,0x00],
[0x00,0x80,0x84,0x7D,0x00,0x00,0x00,0x00],
[0x00,0x7F,0x10,0x28,0x44,0x00,0x00,0x00],
[0x00,0x41,0x7F,0x40,0x00,0x00,0x00,0x00],
[0x00,0x7C,0x04,0x18,0x04,0x78,0x00,0x00],
[0x00,0x7C,0x08,0x04,0x7C,0x00,0x00,0x00],
[0x00,0x38,0x44,0x44,0x38,0x00,0x00,0x00],
[0x00,0xFC,0x24,0x24,0x18,0x00,0x00,0x00],
[0x00,0x18,0x24,0x24,0xFC,0x00,0x00,0x00],
[0x00,0x00,0x7C,0x08,0x04,0x00,0x00,0x00],
[0x00,0x48,0x54,0x54,0x24,0x00,0x00,0x00],
[0x00,0x04,0x7F,0x44,0x00,0x00,0x00,0x00],
[0x00,0x3C,0x40,0x40,0x7C,0x00,0x00,0x00],
[0x00,0x1C,0x20,0x40,0x20,0x1C,0x00,0x00],
[0x00,0x3C,0x40,0x30,0x40,0x3C,0x00,0x00],
[0x00,0x44,0x28,0x10,0x28,0x44,0x00,0x00],
[0x00,0x1C,0xA0,0xA0,0x7C,0x00,0x00,0x00],
[0x00,0x44,0x64,0x54,0x4C,0x44,0x00,0x00],
[0x00,0x08,0x36,0x41,0x00,0x00,0x00,0x00],
[0x00,0x00,0x7F,0x00,0x00,0x00,0x00,0x00],
[0x00,0x41,0x36,0x08,0x00,0x00,0x00,0x00],
[0x00,0x02,0x01,0x01,0x02,0x01,0x00,0x00],
[0x00,0x02,0x05,0x05,0x02,0x00,0x00,0x00]
]
VERTICAL_MODE=01
HORIZONTAL_MODE=02
SeeedGrayOLED_Address=0x3c
SeeedGrayOLED_Command_Mode=0x80
SeeedGrayOLED_Data_Mode=0x40
SeeedGrayOLED_Display_Off_Cmd=0xAE
SeeedGrayOLED_Display_On_Cmd=0xAF
SeeedGrayOLED_Normal_Display_Cmd=0xA4
SeeedGrayOLED_Inverse_Display_Cmd=0xA7
SeeedGrayOLED_Activate_Scroll_Cmd=0x2F
SeeedGrayOLED_Dectivate_Scroll_Cmd=0x2E
SeeedGrayOLED_Set_ContrastLevel_Cmd=0x81
Scroll_Left=0x00
Scroll_Right=0x01
Scroll_2Frames=0x7
Scroll_3Frames=0x4
Scroll_4Frames=0x5
Scroll_5Frames=0x0
Scroll_25Frames=0x6
Scroll_64Frames=0x1
Scroll_128Frames=0x2
Scroll_256Frames=0x3
class SeeedGrayOLED():
grayH=0xF0
grayL=0x0F
def __init__(self):
self.sendCommand(0xFD)
# Unlock OLED driver IC MCU interface from entering command. i.e: Accept commands
self.sendCommand(0x12);
self.sendCommand(0xAE)
# Set display off
self.sendCommand(0xA8)
# set multiplex ratio
self.sendCommand(0x5F)
# 96
self.sendCommand(0xA1)
# set display start line
self.sendCommand(0x00);
self.sendCommand(0xA2)
# set display offset
self.sendCommand(0x60);
self.sendCommand(0xA0)
# set remap
self.sendCommand(0x46);
self.sendCommand(0xAB)
# set vdd internal
self.sendCommand(0x01)
#
self.sendCommand(0x81)
# set contrasr
self.sendCommand(0x53)
# 100 nit
self.sendCommand(0xB1)
# Set Phase Length
self.sendCommand(0X51)
#
self.sendCommand(0xB3)
# Set Display Clock Divide Ratio/Oscillator Frequency
self.sendCommand(0x01);
self.sendCommand(0xB9)
#
self.sendCommand(0xBC)
# set pre_charge voltage/VCOMH
self.sendCommand(0x08)
# (0x08);
self.sendCommand(0xBE)
# set VCOMH
self.sendCommand(0X07)
# (0x07);
self.sendCommand(0xB6)
# Set second pre-charge period
self.sendCommand(0x01)
#
self.sendCommand(0xD5)
# enable second precharge and enternal vsl
self.sendCommand(0X62)
# (0x62);
self.sendCommand(0xA4)
# Set Normal Display Mode
self.sendCommand(0x2E)
# Deactivate Scroll
self.sendCommand(0xAF)
# Switch on display
time.sleep(.100)
self.sendCommand(0x75)
#Set Row Address
self.sendCommand(0x00)
#Start 0
self.sendCommand(0x5f)
#End 95
self.sendCommand(0x15)
#Set Column Address
self.sendCommand(0x08)
#Start from 8th Column of driver IC. This is 0th Column for OLED
self.sendCommand(0x37)
#End at (8 + 47)th column. Each Column has 2 pixels(segments)
#Init gray level for text. Default:Brightest White
def sendCommand(self, command):
data = bytearray(2)
data[0]=SeeedGrayOLED_Command_Mode
data[1]=command
i2c.send(data, addr=SeeedGrayOLED_Address)
def setContrastLevel(self, ContrastLevel):
self.sendCommand(SeeedGrayOLED_Set_ContrastLevel_Cmd)
self.sendCommand(ContrastLevel)
def setHorizontalMode(self):
self.sendCommand(0xA0)
# remap to
self.sendCommand(0x42)
# horizontal mode
#Row Address
self.sendCommand(0x75)
# Set Row Address
self.sendCommand(0x00)
# Start 0
self.sendCommand(0x5f)
# End 95
#Column Address
self.sendCommand(0x15)
# Set Column Address
self.sendCommand(0x08)
# Start from 8th Column of driver IC. This is 0th Column for OLED
self.sendCommand(0x37)
# End at (8 + 47)th column. Each Column has 2 pixels(or segments)
def setVerticalMode(self):
self.sendCommand(0xA0)
#remap to Vertical mode
self.sendCommand(0x46)
def setTextXY(self, Row, Column):
#Column Address
self.sendCommand(0x15)
#Set Column Address
self.sendCommand(0x08+(Column*4))
#Start Column: Start from 8
self.sendCommand(0x37)
#End Column
#Row Address
self.sendCommand(0x75)
#Set Row Address
self.sendCommand(0x00+(Row*8))
#Start Row
self.sendCommand(0x07+(Row*8))
#End Row
def clearDisplay(self):
for j in range(0, 48):
for i in range(0, 96):
#clear all columns
self.sendData([0x00])
def sendData(self, toSend):
ln=len(toSend)
data = bytearray(ln+1)
data[0]=SeeedGrayOLED_Data_Mode
for i in range(0, ln):
data[i+1]=toSend[i]
i2c.send(data, addr=SeeedGrayOLED_Address)
def setGrayLevel(self, grayLevel):
self.grayH = (grayLevel << 4) & 0xF0
self.grayL = grayLevel & 0x0F
def putChar(self, C):
if ord(C) < 32 or ord(C) > 127:
#Ignore non-printable ASCII characters. This can be modified for multilingual font.
C=' '
D=ord(C)
for i in [0, 2, 4, 6]:
for j in range(0, 8):
#Character is constructed two pixel at a time using vertical mode from the default 8x8 font
c=0x00
bit1=(BasicFont[D-32][i] >> j) & 0x01
bit2=(BasicFont[D-32][i+1] >> j) & 0x01
#Each bit is changed to a nibble
if(bit1>0):
c|=self.grayH
if(bit2>0):
c|=self.grayL
self.sendData([c])
def putString(self, s):
for i in s:
self.putChar(i)
def putNumber(self, long_num):
char_buffer=[0]*10
i = 0
f = 0
if long_num < 0:
f=1
self.putChar('-')
long_num = -long_num
elif long_num == 0:
f=1
self.putChar('0')
return f
while (long_num > 0):
char_buffer[i] = long_num % 10
i+=1
long_num /= 10
f=f+i
while(i>0):
self.putChar(48+char_buffer[i - 1])
i-=1
return f
def setNormalDisplay(self):
self.sendCommand(SeeedGrayOLED_Normal_Display_Cmd)
def setInverseDisplay(self):
self.sendCommand(SeeedGrayOLED_Inverse_Display_Cmd)
def activateScroll(self):
self.sendCommand(SeeedGrayOLED_Activate_Scroll_Cmd)
def deactivateScroll(self):
self.sendCommand(SeeedGrayOLED_Dectivate_Scroll_Cmd)
def drawBitmap(self, bitmaparray, bytes):
localAddressMode = addressingMode
if(localAddressMode != HORIZONTAL_MODE):
#Bitmap is drawn in horizontal mode
setHorizontalMode()
for i in range (0, bytes):
for j in [0, 2, 4, 6]:
c=0x00
bit1=bitmaparray[i] << j & 0x80
bit2=bitmaparray[i] << (j+1) & 0x80
#Each bit is changed to a nibble
if(bit1>0):
c|=self.grayH
if(bit2>0):
c|=self.grayL
self.sendData(c)
if(localAddressMode == VERTICAL_MODE):
#If Vertical Mode was used earlier, restore it.
setVerticalMode()
def setHorizontalScrollProperties(direction, startRow, endRow, startColumn, endColumn, scrollSpeed):
#Use the following defines for 'direction':
# Scroll_Left
# Scroll_Right
#Use the following defines for 'scrollSpeed':
# Scroll_2Frames
# Scroll_3Frames
# Scroll_4Frames
# Scroll_5Frames
# Scroll_25Frames
# Scroll_64Frames
# Scroll_128Frames
# Scroll_256Frames
if(Scroll_Right == direction):
self.sendCommand(0x27)
else:
self.sendCommand(0x26)
#Dummmy byte
self.sendCommand(0x00)
self.sendCommand(startRow)
self.sendCommand(scrollSpeed)
self.sendCommand(endRow)
self.sendCommand(startColumn+8)
self.sendCommand(endColumn+8)
#Dummmy byte
self.sendCommand(0x00)
OLED=SeeedGrayOLED()
#Initialize Object
OLED.clearDisplay()
OLED.setNormalDisplay()
OLED.setVerticalMode()
for i in range(0, 12):
OLED.setTextXY(i,0)
OLED.setGrayLevel(i)
OLED.putString("Hello World")
| {
"repo_name": "dda/MicroPython",
"path": "Grove_OLED.py",
"copies": "1",
"size": "12083",
"license": "apache-2.0",
"hash": -5577602106220910000,
"line_mean": 30.7973684211,
"line_max": 102,
"alpha_frac": 0.6835223041,
"autogenerated": false,
"ratio": 2.014169028171362,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3197691332271362,
"avg_score": null,
"num_lines": null
} |
"""A port of the `JSON-minify` utility to the Python language.
Based on JSON.minify.js: https://github.com/getify/JSON.minify
Contributers:
- Gerald Storer
- Contributed original version
- Felipe Machado
- Performance optimization
- Pradyun S. Gedam
- Conditions and variable names changed
- Reformatted tests and moved to separate file
- Made into a PyPI Package
"""
import re
def json_minify(string, strip_space=True):
tokenizer = re.compile('"|(/\*)|(\*/)|(//)|\n|\r')
end_slashes_re = re.compile(r'(\\)*$')
in_string = False
in_multi = False
in_single = False
new_str = []
index = 0
for match in re.finditer(tokenizer, string):
if not (in_multi or in_single):
tmp = string[index:match.start()]
if not in_string and strip_space:
# replace white space as defined in standard
tmp = re.sub('[ \t\n\r]+', '', tmp)
new_str.append(tmp)
index = match.end()
val = match.group()
if val == '"' and not (in_multi or in_single):
escaped = end_slashes_re.search(string, 0, match.start())
# start of string or unescaped quote character to end string
if not in_string or (escaped is None or len(escaped.group()) % 2 == 0): # noqa
in_string = not in_string
index -= 1 # include " character in next catch
elif not (in_string or in_multi or in_single):
if val == '/*':
in_multi = True
elif val == '//':
in_single = True
elif val == '*/' and in_multi and not (in_string or in_single):
in_multi = False
elif val in '\r\n' and not (in_multi or in_string) and in_single:
in_single = False
elif not ((in_multi or in_single) or (val in ' \r\n\t' and strip_space)): # noqa
new_str.append(val)
new_str.append(string[index:])
return ''.join(new_str)
| {
"repo_name": "skuroda/FindKeyConflicts",
"path": "lib/minify_json.py",
"copies": "1",
"size": "1994",
"license": "mit",
"hash": 6911015467290846000,
"line_mean": 32.2333333333,
"line_max": 91,
"alpha_frac": 0.5631895687,
"autogenerated": false,
"ratio": 3.6654411764705883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9725600442140285,
"avg_score": 0.0006060606060606061,
"num_lines": 60
} |
"""A port of the `JSON-minify` utility to the Python language.
Based on JSON.minify.js: https://github.com/getify/JSON.minify
Contributers:
- Gerald Storer
- Contributed original version
- Felipe Machado
- Performance optimization
- Pradyun S. Gedam
- Conditions and variable names changed
- Reformatted tests and moved to separate file
- Made into a PyPI Package
"""
import re
def json_minify(string, strip_space=True):
tokenizer = re.compile('"|(/\*)|(\*/)|(//)|\n|\r')
end_slashes_re = re.compile(r'(\\)*$')
in_string = False
in_multi = False
in_single = False
new_str = []
index = 0
for match in re.finditer(tokenizer, string):
if not (in_multi or in_single):
tmp = string[index:match.start()]
if not in_string and strip_space:
# replace white space as defined in standard
tmp = re.sub('[ \t\n\r]+', '', tmp)
new_str.append(tmp)
index = match.end()
val = match.group()
if val == '"' and not (in_multi or in_single):
escaped = end_slashes_re.search(string, 0, match.start())
# start of string or unescaped quote character to end string
if not in_string or (escaped is None or len(escaped.group()) % 2 == 0): # noqa
in_string = not in_string
index -= 1 # include " character in next catch
elif not (in_string or in_multi or in_single):
if val == '/*':
in_multi = True
elif val == '//':
in_single = True
elif val == '*/' and in_multi and not (in_string or in_single):
in_multi = False
elif val in '\r\n' and not (in_multi or in_string) and in_single:
in_single = False
elif not ((in_multi or in_single) or (val in ' \r\n\t' and strip_space)): # noqa
new_str.append(val)
new_str.append(string[index:])
return ''.join(new_str)
| {
"repo_name": "tobeycarman/dvm-dos-tem",
"path": "calibration/json_minify/__init__.py",
"copies": "3",
"size": "1996",
"license": "mit",
"hash": 7310224312630944000,
"line_mean": 31.1935483871,
"line_max": 91,
"alpha_frac": 0.5626252505,
"autogenerated": false,
"ratio": 3.6556776556776556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5718302906177655,
"avg_score": null,
"num_lines": null
} |
""" A port of the node.js EventEmitter functionality
Classes:
:py:class:`EventEmitter`
implements the event emitter functionality
"""
import collections
import sys
class EventEmitter(object):
""" Handles event emitting and listening
Public Methods:
:py:meth:`~EventEmitter.add_listener`
add a listener for an event
:py:meth:`~EventEmitter.on`
alias for :py:meth:`~EventEmitter.add_listener`
:py:meth:`~EventEmitter.once`
adds a listener, but only executes it once, then it is removed
:py:meth:`~EventEmitter.emit`
trigger the listeners for an event
:py:meth:`~EventEmitter.remove_listener`
remove a listener from an event
:py:meth:`~EventEmitter.remove_all_listeners`
remove all listeners from an event
:py:meth:`~EventEmitter.listeners`
get a copy of the listeners on an event
:py:meth:`~EventEmitter.set_max_listeners`
set the maximum number of listeners for an event before warnings are
issued (default: 10, None for no limit)
"""
max_errstr = """"
WARNING: Possible EventEmitter memory leak:
{0} listeners added for {1}. Use set_max_listeners to increase the limit.
"""
def __init__(self):
""" Initialize the EventEmitter
"""
self._map = collections.defaultdict(list)
self._oncemap = collections.defaultdict(list)
self._max_listeners = 10
def set_max_listeners(self, max_listeners):
""" Set the maximum number of listeners for each event.
Parameters:
max_listeners
the maximum number of listeners allowed (None for no limit)
"""
self._max_listeners = max_listeners
return self
def add_listener(self, event, listener=None):
""" Adds a listener to an event
Parameters:
event
the event to listen for
listener
the handler for the event (should be a function / callable)
"""
if listener is not None:
listeners = len(self._map[event])
if self._max_listeners is not None\
and listeners >= self._max_listeners:
print >> sys.stderr, self.max_errstr.format(listeners + 1,
event)
self._map[event].append(listener)
return self
def on(self, *args):
""" Alias for add_listener
"""
return self.add_listener(*args)
def once(self, event, listener=None):
""" Add a listener, but only execute it the first time the event
occurs, then remove it
Parameters:
event
the event to listen for
listener
the listener function / callable
"""
self.add_listener(event, listener)
self._oncemap[event].append(listener)
return self
def remove_listener(self, event, listener=None):
""" Remove a listener from an event
Parameters:
event
the event from which to remove it
listener
the handler to remove
"""
try:
self._map[event].remove(listener)
except ValueError:
pass
return self
def remove_all_listeners(self, event):
""" Clears all listeners from an event
Parameters:
event
the event to clear listeners from
"""
self._map[event] = []
return self
def listeners(self, event):
""" Gets a COPY of the list of listeners on an event
Parameters:
event
the event for which to lookup the listeners
"""
return self._map[event][:]
def emit(self, event, *args, **kwargs):
""" Emit an event, triggering the handlers on it with certain arguments
Parameters:
event
the event to trigger
args
arguments to pass to the triggered listeners
kwargs
keyword arguments to pass to the triggered listeners
"""
for listener in self._map[event][:]:
listener(*args, **kwargs)
for listener in self._oncemap[event]:
self.remove_listener(event, listener)
self._oncemap[event] = []
return self
| {
"repo_name": "gsmcwhirter/simulations",
"path": "src/simulations/utils/eventemitter.py",
"copies": "1",
"size": "4501",
"license": "mit",
"hash": -2382081677220791000,
"line_mean": 22.2010309278,
"line_max": 79,
"alpha_frac": 0.5625416574,
"autogenerated": false,
"ratio": 4.839784946236559,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00009545628102329132,
"num_lines": 194
} |
# A port of VSML
# http://www.lighthouse3d.com/very-simple-libs/vsml/vsml-in-action/
# to support matrix operations
# Singleton class from
# http://code.activestate.com/recipes/52558/
# also see http://sourceforge.net/projects/libmymath/files/libmymath%20v1.3.1/
from pyglet.gl import *
class MatrixTypes(object):
MODELVIEW = 0
PROJECTION = 1
class VSML:
""" A python singleton """
class __impl:
""" Implementation of the VSML singleton interface """
def initUniformLocs(sefl, modelviewLoc, projLoc):
"""
/** Call this function to init the library for a particular
* program shader if using uniform variables
*
* \param modelviewLoc location of the uniform variable
* for the modelview matrix
*
* \param projLoc location of the uniform variable
* for the projection matrix
*/
void initUniformLocs(GLuint modelviewLoc, GLuint projLoc);
"""
pass
def initUniformBlock(self, buf, modelviewOffset, projOffset):
"""
/** Call this function to init the library for a particular
* program shader if using uniform blocks
*
* \param buffer index of the uniform buffer
* \param modelviewOffset offset within the buffer of
* the modelview matrix
* \param projOffset offset within the buffer of
* the projection matrix
*/
void initUniformBlock(GLuint buffer, GLuint modelviewOffset, GLuint projOffset);
"""
pass
def translate(self, aType, x, y, z):
"""
/** Similar to glTranslate*. Can be applied to both MODELVIEW
* and PROJECTION matrices.
*
* \param aType either MODELVIEW or PROJECTION
* \param x,y,z vector to perform the translation
*/
void translate(MatrixTypes aType, float x, float y, float z);
"""
pass
def translate(self, x, y, z):
"""
/** Similar to glTranslate*. Applied to MODELVIEW only.
*
* \param x,y,z vector to perform the translation
*/
void translate(float x, float y, float z);
"""
pass
def scale(self, aType, x, y, z):
"""
/** Similar to glScale*. Can be applied to both MODELVIEW
* and PROJECTION matrices.
*
* \param aType either MODELVIEW or PROJECTION
* \param x,y,z scale factors
*/
void scale(MatrixTypes aType, float x, float y, float z);
"""
pass
def scale(self, x, y, z):
"""
/** Similar to glScale*. Applied to MODELVIEW only.
*
* \param x,y,z scale factors
*/
void scale(float x, float y, float z);
"""
pass
def rotate(self, aType, angle, x, y, z):
"""
/** Similar to glTotate*. Can be applied to both MODELVIEW
* and PROJECTION matrices.
*
* \param aType either MODELVIEW or PROJECTION
* \param angle rotation angle in degrees
* \param x,y,z rotation axis in degrees
*/
void rotate(MatrixTypes aType, float angle, float x, float y, float z);
"""
pass
def rotate(self, angle, x, y, z):
"""
/** Similar to glRotate*. Applied to MODELVIEW only.
*
* \param angle rotation angle in degrees
* \param x,y,z rotation axis in degrees
*/
void rotate(float angle, float x, float y, float z);
"""
pass
def loadIdentity(self, aType):
"""
/** Similar to glLoadIdentity.
*
* \param aType either MODELVIEW or PROJECTION
*/
void loadIdentity(MatrixTypes aType);
"""
pass
def multMatrix(self, aType, aMatrix):
"""
/** Similar to glMultMatrix.
*
* \param aType either MODELVIEW or PROJECTION
* \param aMatrix matrix in column major order data, float[16]
*/
void multMatrix(MatrixTypes aType, float *aMatrix);
"""
pass
def loadMatrix(self, aType, aMatrix):
"""
/** Similar to gLoadMatrix.
*
* \param aType either MODELVIEW or PROJECTION
* \param aMatrix matrix in column major order data, float[16]
*/
void loadMatrix(MatrixTypes aType, float *aMatrix);
"""
pass
def pushMatrix(self, aType):
"""
/** Similar to glPushMatrix
*
* \param aType either MODELVIEW or PROJECTION
*/
void pushMatrix(MatrixTypes aType);
"""
pass
def popMatrix(self, aType):
"""
/** Similar to glPopMatrix
*
* \param aType either MODELVIEW or PROJECTION
*/
void popMatrix(MatrixTypes aType);
"""
pass
def lookAt(self, xPos, yPos, zPos, xLook, yLook, zLook, xUp, yUp, zUp):
"""
/** Similar to gluLookAt
*
* \param xPos, yPos, zPos camera position
* \param xLook, yLook, zLook point to aim the camera at
* \param xUp, yUp, zUp camera's up vector
*/
void lookAt(float xPos, float yPos, float zPos,
float xLook, float yLook, float zLook,
float xUp, float yUp, float zUp);
"""
pass
def perspective(self, fov, ratio, nearp, farp):
"""
/** Similar to gluPerspective
*
* \param fov vertical field of view
* \param ratio aspect ratio of the viewport or window
* \param nearp,farp distance to the near and far planes
*/
void perspective(float fov, float ratio, float nearp, float farp);
"""
pass
def ortho(self, left, right, bottom, top, nearp=-1.0, farp=1.0):
"""
/** Similar to glOrtho and gluOrtho2D (just leave the last two params blank).
*
* \param left,right coordinates for the left and right vertical clipping planes
* \param bottom,top coordinates for the bottom and top horizontal clipping planes
* \param nearp,farp distance to the near and far planes
*/
void ortho(float left, float right, float bottom, float top, float nearp=-1.0f, float farp=1.0f);
"""
pass
def frustum(self, left, right, bottom, top, nearp, farp):
"""
/** Similar to glFrustum
*
* \param left,right coordinates for the left and right vertical clipping planes
* \param bottom,top coordinates for the bottom and top horizontal clipping planes
* \param nearp,farp distance to the near and far planes
*/
void frustum(float left, float right, float bottom, float top, float nearp, float farp);
"""
pass
def get(self, aType):
"""
/** Similar to glGet
*
* \param aType either MODELVIEW or PROJECTION
* \returns pointer to the matrix (float[16])
*/
float *get(MatrixTypes aType);
"""
pass
def matrixToBuffer(self, aType):
"""
/** Updates the uniform buffer data
*
* \param aType either MODELVIEW or PROJECTION
*/
void matrixToBuffer(MatrixTypes aType);
"""
pass
def matrixToUniform(self, aType):
"""
/** Updates the uniform variables
*
* \param aType either MODELVIEW or PROJECTION
*/
void matrixToUniform(MatrixTypes aType);
"""
pass
def matrixToGL(self, aType):
"""
/** Updates either the buffer or the uniform variables
* based on which init* function was called last
*
* \param aType either MODELVIEW or PROJECTION
*/
void matrixToGL(MatrixTypes aType);
"""
pass
# protected:
# /// Has an init* function been called?
# bool mInit;
mInit = False
# /// Using uniform blocks?
# bool mBlocks;
mBlocks = False
# ///brief Matrix stacks for modelview and projection matrices
# std::vector<float *> mMatrixStack[2];
mMatrixStack = []
# /// The storage for the two matrices
# float mMatrix[2][16];
mMatrix = []
# /// Storage for the uniform locations
# GLuint mUniformLoc[2];
mUniformLoc = []
# /// Storage for the buffer index
# GLuint mBuffer;
mBuffer = None
# /// Storage for the offsets within the buffer
# GLuint mOffset[2];
mOffset = []
def setIdentityMatrix(self, mat, size=4):
"""
/** Set a float* to an identity matrix
*
* \param size the order of the matrix
*/
void setIdentityMatrix( float *mat, int size=4);
"""
pass
def crossProduct(self, a, b, res):
"""
/** vector cross product
*
* res = a x b
*/
void crossProduct( float *a, float *b, float *res);
"""
pass
def normalize(self, a):
"""
/// normalize a vec3
void normalize(float *a);
"""
pass
# storage for the instance reference
__instance = None
def __init__(self):
""" Create singleton instance """
# Check whether we already have an instance
if VSML.__instance is None:
# Create and remember instance
VSML.__instance = VSML.__impl()
# Store instance reference as the only member in the handle
self.__dict__['_VSML__instance'] = VSML.__instance
def __getattr__(self, attr):
""" Delegate access to implementation """
return getattr(self.__instance, attr)
def __setattr__(self, attr, value):
""" Delegate access to implementation """
return setattr(self.__instance, attr, value)
# Test it
s1 = VSML()
print id(s1), s1.normalize(None)
s2 = VSML()
print id(s2), s2.normalize(None)
# Sample output, the second (inner) id is constant:
# 8172684 8176268
# 8168588 8176268
| {
"repo_name": "fos/fos-legacy",
"path": "fos/shader/vsml.py",
"copies": "1",
"size": "11294",
"license": "bsd-3-clause",
"hash": 6815067313300630000,
"line_mean": 31.0852272727,
"line_max": 109,
"alpha_frac": 0.5025677351,
"autogenerated": false,
"ratio": 4.563232323232323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5565800058332323,
"avg_score": null,
"num_lines": null
} |
# A port of VSML
# http://www.lighthouse3d.com/very-simple-libs/vsml/vsml-in-action/
# to support matrix operations
# Singleton class from
# http://stackoverflow.com/questions/42558/python-and-the-singleton-pattern
# also see http://sourceforge.net/projects/libmymath/files/libmymath%20v1.3.1/
from pyglet.gl import *
import numpy as np
from ctypes import *
DEBUG = False
def normalize(vectarr):
return vectarr / np.linalg.norm( vectarr )
class VSML(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(VSML, cls).__new__(
cls, *args, **kwargs)
return cls._instance
class MatrixTypes(object):
MODELVIEW = 0
PROJECTION = 1
def __init__(self):
self.projection = np.eye(4)
self.modelview = np.eye(4)
# setting up the stacks
self.mMatrixStack = {}
# use list as stack
self.mMatrixStack[self.MatrixTypes.MODELVIEW] = []
self.mMatrixStack[self.MatrixTypes.PROJECTION] = []
#self.windows = {}
def setSize(self, width, height):
""" Stores the width and height of the currently active OpenGL context
"""
#self.windows[winId] = { 'width' : width, 'height' : height}
self.width = width
self.height = height
def loadIdentity(self, aType):
"""
/** Similar to glLoadIdentity.
*
* \param aType either MODELVIEW or PROJECTION
*/
void loadIdentity(MatrixTypes aType);
"""
if aType == self.MatrixTypes.PROJECTION:
self.projection = np.eye(4, dtype = np.float32 )
elif aType == self.MatrixTypes.MODELVIEW:
self.modelview = np.eye(4, dtype = np.float32 )
else:
print "loadIdentity: wrong matrix type"
def pushMatrix(self, aType):
"""
/** Similar to glPushMatrix
*
* \param aType either MODELVIEW or PROJECTION
*/
void pushMatrix(MatrixTypes aType);
"""
# todo: do we need copy?
if aType == self.MatrixTypes.PROJECTION:
self.mMatrixStack[aType].append(self.projection.copy())
elif aType == self.MatrixTypes.MODELVIEW:
self.mMatrixStack[aType].append(self.modelview.copy() )
else:
print "pushMatrix: wrong matrix type"
def popMatrix(self, aType):
"""
/** Similar to glPopMatrix
*
* \param aType either MODELVIEW or PROJECTION
*/
void popMatrix(MatrixTypes aType);
"""
if aType == self.MatrixTypes.PROJECTION:
self.projection = self.mMatrixStack[aType].pop()
elif aType == self.MatrixTypes.MODELVIEW:
self.modelview = self.mMatrixStack[aType].pop()
else:
print "popMatrix: wrong matrix type"
def multMatrix(self, aType, aMatrix):
"""
/** Similar to glMultMatrix.
*
* \param aType either MODELVIEW or PROJECTION
* \param aMatrix matrix in column major order data, float[16]
*/
void multMatrix(MatrixTypes aType, float *aMatrix);
"""
if DEBUG:
print "multiply matrix"
if aType == self.MatrixTypes.PROJECTION:
if DEBUG:
print "projection was", self.projection
self.projection = np.dot(self.projection, aMatrix)
if DEBUG:
print "projection is", self.projection
elif aType == self.MatrixTypes.MODELVIEW:
if DEBUG:
print "modelview was", self.modelview
self.modelview = np.dot(self.modelview, aMatrix)
if DEBUG:
print "modelview is", self.modelview
else:
print "multMatrix: wrong matrix type"
def get_modelview_matrix(self, array_type=c_float, glGetMethod=glGetFloatv):
"""Returns the built-in modelview matrix."""
m = (array_type*16)()
glGetMethod(GL_MODELVIEW_MATRIX, m)
return np.array( m )
def get_projection_matrix(self, array_type=c_float, glGetMethod=glGetFloatv):
"""Returns the current modelview matrix."""
m = (array_type*16)()
glGetMethod(GL_PROJECTION_MATRIX, m)
return np.array( m )
def get_viewport(self):
"""
Returns the current viewport.
"""
m = (c_int*4)()
glGetIntegerv(GL_VIEWPORT, m)
return np.array( m )
def get_projection(self):
return (c_float*16)(*self.projection.T.ravel().tolist())
def get_modelview(self):
return (c_float*16)(*self.modelview.T.ravel().tolist())
def initUniformLocs(sefl, modelviewLoc, projLoc):
"""
/** Call this function to init the library for a particular
* program shader if using uniform variables
*
* \param modelviewLoc location of the uniform variable
* for the modelview matrix
*
* \param projLoc location of the uniform variable
* for the projection matrix
*/
void initUniformLocs(GLuint modelviewLoc, GLuint projLoc);
"""
pass
def initUniformBlock(self, buf, modelviewOffset, projOffset):
"""
/** Call this function to init the library for a particular
* program shader if using uniform blocks
*
* \param buffer index of the uniform buffer
* \param modelviewOffset offset within the buffer of
* the modelview matrix
* \param projOffset offset within the buffer of
* the projection matrix
*/
void initUniformBlock(GLuint buffer, GLuint modelviewOffset, GLuint projOffset);
"""
pass
def translate(self, x, y, z, aType = MatrixTypes.MODELVIEW ):
"""
/** Similar to glTranslate*. Applied to MODELVIEW only.
*
* \param x,y,z vector to perform the translation
*/
void translate(float x, float y, float z);
"""
mat = np.eye(4, dtype = np.float32 )
mat[0,3] = x
mat[1,3] = y
mat[2,3] = z
if aType == self.MatrixTypes.MODELVIEW:
self.multMatrix(self.MatrixTypes.MODELVIEW, mat)
elif aType == self.MatrixTypes.PROJECTION:
self.multMatrix(self.MatrixTypes.PROJECTION, mat)
def scale(self, x, y, z, aType = MatrixTypes.MODELVIEW ):
"""
/** Similar to glScale*. Can be applied to both MODELVIEW
* and PROJECTION matrices.
*
* \param aType either MODELVIEW or PROJECTION
* \param x,y,z scale factors
*/
void scale(MatrixTypes aType, float x, float y, float z);
"""
mat = np.zeros( (4,4), dtype = np.float32)
mat[0,0] = x
mat[1,1] = y
mat[2,2] = z
mat[3,3] = 1.0
if aType == self.MatrixTypes.MODELVIEW:
self.multMatrix(self.MatrixTypes.MODELVIEW, mat)
elif aType == self.MatrixTypes.PROJECTION:
self.multMatrix(self.MatrixTypes.PROJECTION, mat)
def rotate(self, angle, x, y, z, aType = MatrixTypes.MODELVIEW ):
"""
/** Similar to glRotate*. Can be applied to both MODELVIEW
* and PROJECTION matrices.
*
* \param aType either MODELVIEW or PROJECTION
* \param angle rotation angle in degrees
* \param x,y,z rotation axis in degrees
*/
void rotate(MatrixTypes aType, float angle, float x, float y, float z);
"""
mat = np.zeros( (4,4), dtype = np.float32)
radAngle = np.deg2rad(angle)
co = np.cos(radAngle)
si = np.sin(radAngle)
x2 = x*x
y2 = y*y
z2 = z*z
mat[0,0] = x2 + (y2 + z2) * co
mat[0,1] = x * y * (1 - co) - z * si
mat[0,2] = x * z * (1 - co) + y * si
mat[0,3]= 0.0
mat[1,0] = x * y * (1 - co) + z * si
mat[1,1] = y2 + (x2 + z2) * co
mat[1,2] = y * z * (1 - co) - x * si
mat[1,3]= 0.0
mat[2,0] = x * z * (1 - co) - y * si
mat[2,1] = y * z * (1 - co) + x * si
mat[2,2]= z2 + (x2 + y2) * co
mat[2,3]= 0.0
mat[3,0] = 0.0
mat[3,1] = 0.0
mat[3,2]= 0.0
mat[3,3]= 1.0
if aType == self.MatrixTypes.MODELVIEW:
self.multMatrix(self.MatrixTypes.MODELVIEW, mat)
elif aType == self.MatrixTypes.PROJECTION:
self.multMatrix(self.MatrixTypes.PROJECTION, mat)
def loadMatrix(self, aMatrix, aType = MatrixTypes.MODELVIEW ):
"""
/** Similar to gLoadMatrix.
*
* \param aType either MODELVIEW or PROJECTION
* \param aMatrix matrix in column major order data, float[16]
*/
void loadMatrix(MatrixTypes aType, float *aMatrix);
"""
if aType == self.MatrixTypes.PROJECTION:
self.projection = aMatrix.astype( np.float32 )
elif aType == self.MatrixTypes.MODELVIEW:
self.modelview = aMatrix.astype( np.float32 )
else:
print "loadMatrix: wrong matrix type"
print "new modelview", self.modelview
def lookAt(self, xPos, yPos, zPos, xLook, yLook, zLook, xUp, yUp, zUp):
"""
/** Similar to gluLookAt
*
* \param xPos, yPos, zPos camera position
* \param xLook, yLook, zLook point to aim the camera at
* \param xUp, yUp, zUp camera's up vector
*/
void lookAt(float xPos, float yPos, float zPos,
float xLook, float yLook, float zLook,
float xUp, float yUp, float zUp);
"""
dir = np.array( [xLook - xPos, yLook - yPos, zLook - zPos], dtype = np.float32)
dir = normalize(dir)
up = np.array( [xUp, yUp, zUp], dtype = np.float32 )
right = normalize( np.cross(dir, up) )
up = normalize( np.cross(right,dir) )
# build the matrix
out = np.zeros( (4,4), dtype = np.float32 )
out[0,:3] = right
out[1,:3] = up
out[2,:3] = -dir
out[3,3] = 1.0
self.multMatrix(self.MatrixTypes.MODELVIEW, out)
out = np.eye( 4, dtype = np.float32 )
out[0,3] = -xPos
out[1,3] = -yPos
out[2,3] = -zPos
self.multMatrix(self.MatrixTypes.MODELVIEW, out)
if DEBUG:
print "lookat: modelview vsml", np.array( vsml.get_modelview() )
def perspective(self, fov, ratio, nearp, farp):
"""
/** Similar to gluPerspective
*
* \param fov vertical field of view
* \param ratio aspect ratio of the viewport or window
* \param nearp,farp distance to the near and far planes
*/
void perspective(float fov, float ratio, float nearp, float farp);
"""
out = np.eye( 4, dtype = np.float32 )
f = 1.0 / np.tan (fov * (np.pi / 360.0) )
out[0,0] = f / ratio
out[1,1] = f
out[2,2] = (farp + nearp) / (nearp - farp)
out[2,3] = (2.0 * farp * nearp) / (nearp - farp)
out[3,2] = -1.0
out[3,3] = 0.0
self.multMatrix(self.MatrixTypes.PROJECTION, out)
if DEBUG:
print "perspective: new projection vsml", self.projection, np.array( vsml.get_projection() )
def ortho(self, left, right, top, bottom, nearp=-1.0, farp=1.0):
"""
/** Similar to glOrtho and gluOrtho2D (just leave the last two params blank).
*
* \param left,right coordinates for the left and right vertical clipping planes
* \param bottom,top coordinates for the bottom and top horizontal clipping planes
* \param nearp,farp distance to the near and far planes
*/
void ortho(float left, float right, float bottom, float top, float nearp=-1.0f, float farp=1.0f);
"""
w = right - left
h = top - bottom
p = farp - nearp
if w == 0.0 or h == 0.0 or p == 0.0:
return
x = ( right + left ) / w
y = ( top + bottom ) / h
z = ( nearp + farp ) / p
mat2 = np.eye( 4, dtype = np.float32 )
mat2[0,0] = 2. / w
mat2[1,1] = 2. / (top - bottom)
mat2[2,2] = -2. / (farp - nearp)
mat2[0,3] = -x
mat2[1,3] = -y
mat2[2,3] = -z
self.multMatrix(self.MatrixTypes.PROJECTION, mat2)
def frustum(self, left, right, bottom, top, nearp, farp):
"""
/** Similar to glFrustum
*
* \param left,right coordinates for the left and right vertical clipping planes
* \param bottom,top coordinates for the bottom and top horizontal clipping planes
* \param nearp,farp distance to the near and far planes
*/
void frustum(float left, float right, float bottom, float top, float nearp, float farp);
"""
mat = np.eye( 4, dtype = np.float32 )
mat[0,0] = 2 * nearp / (right-left)
mat[1,1] = 2 * nearp / (top - bottom)
mat[0,2] = (right + left) / (right - left)
mat[1,2] = (top + bottom) / (top - bottom)
mat[2,2] = - (farp + nearp) / (farp - nearp)
mat[3,2] = -1.0
mat[2,3] = - 2 * farp * nearp / (farp-nearp)
mat[3,3] = 0.0
self.multMatrix(self.MatrixTypes.PROJECTION, mat)
def matrixToBuffer(self, aType):
"""
/** Updates the uniform buffer data
*
* \param aType either MODELVIEW or PROJECTION
*/
void matrixToBuffer(MatrixTypes aType);
"""
pass
def matrixToUniform(self, aType):
"""
/** Updates the uniform variables
*
* \param aType either MODELVIEW or PROJECTION
*/
void matrixToUniform(MatrixTypes aType);
"""
pass
def matrixToGL(self, aType):
"""
/** Updates either the buffer or the uniform variables
* based on which init* function was called last
*
* \param aType either MODELVIEW or PROJECTION
*/
void matrixToGL(MatrixTypes aType);
"""
pass
# protected:
# /// Has an init* function been called?
# bool mInit;
mInit = False
# /// Using uniform blocks?
# bool mBlocks;
mBlocks = False
# ///brief Matrix stacks for modelview and projection matrices
# std::vector<float *> mMatrixStack[2];
# mMatrixStack = []
# /// Storage for the uniform locations
# GLuint mUniformLoc[2];
mUniformLoc = []
# /// Storage for the buffer index
# GLuint mBuffer;
mBuffer = None
# /// Storage for the offsets within the buffer
# GLuint mOffset[2];
mOffset = []
# the global vsml instance
vsml = VSML()
| {
"repo_name": "fos/fos",
"path": "fos/vsml.py",
"copies": "1",
"size": "14937",
"license": "bsd-3-clause",
"hash": 7284859720823624000,
"line_mean": 30.9166666667,
"line_max": 105,
"alpha_frac": 0.5510477338,
"autogenerated": false,
"ratio": 3.6062288749396427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9603935851340091,
"avg_score": 0.010668151479910293,
"num_lines": 468
} |
import sys
import json
import base64
PY3 = sys.version_info[0] == 3
try:
from http.client import HTTPConnection
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTP as HTTPConnection # NOQA
from httplib import HTTPS as HTTPSConnection # NOQA
try:
from urllib.parse import unquote
from urllib.parse import splithost, splittype, splituser
except ImportError:
from urllib import unquote # NOQA
from urllib import splithost, splittype, splituser # NOQA
__version__ = "0.0.1"
ID = 1
def _gen_id():
global ID
ID = ID + 1
return ID
# --------------------------------------------------------------------
# Exceptions
##
# Base class for all kinds of client-side errors.
class Error(Exception):
"""Base class for client errors."""
def __str__(self):
return repr(self)
##
# Indicates an HTTP-level protocol error. This is raised by the HTTP
# transport layer, if the server returns an error code other than 200
# (OK).
#
# @param url The target URL.
# @param errcode The HTTP error code.
# @param errmsg The HTTP error message.
# @param headers The HTTP header dictionary.
class ProtocolError(Error):
"""Indicates an HTTP protocol error."""
def __init__(self, url, errcode, errmsg, headers, response):
Error.__init__(self)
self.url = url
self.errcode = errcode
self.errmsg = errmsg
self.headers = headers
self.response = response
def __repr__(self):
return (
"<ProtocolError for %s: %s %s>" %
(self.url, self.errcode, self.errmsg)
)
def getparser(encoding):
un = Unmarshaller(encoding)
par = Parser(un)
return par, un
def dumps(params, methodname=None, methodresponse=None, encoding=None,
allow_none=0):
if methodname:
request = {}
request["method"] = methodname
request["params"] = params
request["id"] = _gen_id()
return json.dumps(request)
class Unmarshaller(object):
def __init__(self, encoding):
self.data = None
self.encoding = encoding
def feed(self, data):
if self.data is None:
self.data = data
else:
self.data = self.data + data
def close(self):
# try to convert string to json
return json.loads(self.data.decode(self.encoding))
class Parser(object):
def __init__(self, unmarshaller):
self._target = unmarshaller
self.data = None
def feed(self, data):
if self.data is None:
self.data = data
else:
self.data = self.data + data
def close(self):
self._target.feed(self.data)
class _Method(object):
# some magic to bind an JSON-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
##
# Standard transport class for JSON-RPC over HTTP.
# <p>
# You can create custom transports by subclassing this method, and
# overriding selected methods.
class Transport:
"""Handles an HTTP transaction to an JSON-RPC server."""
# client identifier (may be overridden)
user_agent = "jsonlib.py/%s (by matt harrison)" % __version__
##
# Send a complete request, and parse the response.
#
# @param host Target host.
# @param handler Target PRC handler.
# @param request_body JSON-RPC request body.
# @param verbose Debugging flag.
# @return Parsed response.
def request(self, host, handler, request_body, encoding, verbose=0):
# issue JSON-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
self.send_request(h, handler, request_body)
if not PY3:
self.send_host(h, host)
self.send_user_agent(h)
self.send_content(h, request_body)
try:
errcode, errmsg, headers = h.getreply()
r = h.getfile()
except AttributeError:
r = h.getresponse()
errcode = r.status
errmsg = r.reason
headers = r.getheaders()
if errcode != 200:
response = r.read()
raise ProtocolError(
host + handler,
errcode, errmsg,
headers,
response
)
self.verbose = verbose
try:
sock = h._conn.sock
except AttributeError:
sock = None
return self._parse_response(r, sock, encoding)
##
# Create parser.
#
# @return A 2-tuple containing a parser and a unmarshaller.
def getparser(self, encoding):
# get parser and unmarshaller
return getparser(encoding)
##
# Get authorization info from host parameter
# Host may be a string, or a (host, x509-dict) tuple; if a string,
# it is checked for a "user:pw@host" format, and a "Basic
# Authentication" header is added if appropriate.
#
# @param host Host descriptor (URL or (URL, x509 info) tuple).
# @return A 3-tuple containing (actual host, extra headers,
# x509 info). The header and x509 fields may be None.
def get_host_info(self, host):
x509 = {}
if isinstance(host, tuple):
host, x509 = host
auth, host = splituser(host)
if auth:
auth = base64.encodestring(unquote(auth))
auth = "".join(auth.split()) # get rid of whitespace
extra_headers = [
("Authorization", "Basic " + auth)
]
else:
extra_headers = None
return host, extra_headers, x509
##
# Connect to server.
#
# @param host Target host.
# @return A connection handle.
def make_connection(self, host):
# create a HTTP connection object from a host descriptor
host, extra_headers, x509 = self.get_host_info(host)
return HTTPConnection(host)
##
# Send request header.
#
# @param connection Connection handle.
# @param handler Target RPC handler.
# @param request_body JSON-RPC body.
def send_request(self, connection, handler, request_body):
connection.putrequest("POST", handler)
##
# Send host name.
#
# @param connection Connection handle.
# @param host Host name.
def send_host(self, connection, host):
host, extra_headers, x509 = self.get_host_info(host)
connection.putheader("Host", host)
if extra_headers:
if isinstance(extra_headers, dict):
extra_headers = list(extra_headers.items())
for key, value in extra_headers:
connection.putheader(key, value)
##
# Send user-agent identifier.
#
# @param connection Connection handle.
def send_user_agent(self, connection):
connection.putheader("User-Agent", self.user_agent)
##
# Send request body.
#
# @param connection Connection handle.
# @param request_body JSON-RPC request body.
def send_content(self, connection, request_body):
connection.putheader("Content-Type", "text/xml")
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders()
if request_body:
connection.send(request_body)
##
# Parse response.
#
# @param file Stream.
# @return Response tuple and target method.
def parse_response(self, file):
# compatibility interface
return self._parse_response(file, None)
##
# Parse response (alternate interface). This is similar to the
# parse_response method, but also provides direct access to the
# underlying socket object (where available).
#
# @param file Stream.
# @param sock Socket handle (or None, if the socket object
# could not be accessed).
# @return Response tuple and target method.
def _parse_response(self, file, sock, encoding):
# read response from input file/socket, and parse it
p, u = self.getparser(encoding)
while 1:
if sock:
response = sock.recv(1024)
else:
response = file.read(1024)
if not response:
break
if self.verbose:
print("body:", repr(response))
p.feed(response)
file.close()
p.close()
return u.close()
##
# Standard transport class for JSON-RPC over HTTPS.
class SafeTransport(Transport):
"""Handles an HTTPS transaction to an JSON-RPC server."""
# FIXME: mostly untested
def make_connection(self, host):
# create a HTTPS connection object from a host descriptor
# host may be a string, or a (host, x509-dict) tuple
host, extra_headers, x509 = self.get_host_info(host)
try:
HTTPS = HTTPSConnection
except AttributeError:
raise NotImplementedError(
"your version of httplib doesn't support HTTPS"
)
else:
return HTTPS(host, None, **(x509 or {}))
class ServerProxy(object):
def __init__(self, uri, transport=None, encoding=None,
verbose=None, allow_none=0):
utype, uri = splittype(uri)
if utype not in ("http", "https"):
raise IOError("Unsupported JSONRPC protocol")
self.__host, self.__handler = splithost(uri)
if not self.__handler:
self.__handler = "/RPC2"
if transport is None:
if utype == "https":
transport = SafeTransport()
else:
transport = Transport()
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
self.__allow_none = allow_none
def __request(self, methodname, params):
"""call a method on the remote server
"""
request = dumps(params, methodname, encoding=self.__encoding,
allow_none=self.__allow_none)
response = self.__transport.request(
self.__host,
self.__handler,
request.encode(self.__encoding),
self.__encoding,
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return ("<JSONProxy for %s%s>" %
(self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
# dispatch
return _Method(self.__request, name)
# note: to call a remote object with an non-standard name, use
# result getattr(server, "strange-python-name")(args)
if __name__ == "__main__":
s = ServerProxy("http://localhost:8080/foo/", verbose=1)
c = s.echo("foo bar")
print(c)
d = s.bad("other")
print(d)
e = s.echo("foo bar", "baz")
print(e)
f = s.echo(5)
print(f)
| {
"repo_name": "treemo/circuits",
"path": "tests/web/jsonrpclib.py",
"copies": "3",
"size": "12764",
"license": "mit",
"hash": 4777233249552195000,
"line_mean": 26.8082788671,
"line_max": 72,
"alpha_frac": 0.5980883736,
"autogenerated": false,
"ratio": 4.167156382631407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 459
} |
"""A port scanning script from the Violent Python cookbook."""
import nmap
import optparse
from socket import *
from threading import *
screen_lock = Semaphore(value=1)
def connscan(tgt_host, tgt_port):
"""Attempt to create a connection to the target host and port."""
try:
connsckt = socket(AF_INET, SOCK_STREAM)
connsckt.connect((tgt_host, tgt_port))
connsckt.send('ViolentPython\r\n')
results = connsckt.recv(100)
screen_lock.acquire()
print('[+]%d/tcp open' % tgt_port)
print('[+] ' + str(results))
except:
screen_lock.acquire()
print('[-]%d/tcp closed' % tgt_port)
finally:
screen_lock.release()
connsckt.close()
def portscan(tgt_host, tgt_ports):
"""Attempt to resolve an IP Address to a friendly hostname, then print hostname or IP address."""
try:
tgt_ip = gethostbyname(tgt_host)
except:
print("[-] Cannot resolve '%s': Unknown host" % tgt_host)
return
try:
tgt_name = gethostbyaddr(tgt_ip)
print('\n[+] Scan Results for: ' + tgt_name[0])
except:
print('\n[+] Scan Results for: ' + tgt_ip)
setdefaulttimeout(1)
for tgt_port in tgt_ports:
t = Thread(target=connscan, args=(tgt_host, int(tgt_port)))
t.start()
def nmap_scan(tgt_host, tgt_port):
"""Basic NMAP scan."""
nm_scan = nmap.PortScanner()
nm_scan.scan(tgt_host, tgt_port)
state = nm_scan[tgt_host]['tcp'][int(tgt_port)]['state']
print("[*]" + tgt_host + 'tcp/' + tgt_port + ' ' + state)
def main():
"""Parse -p and -H flags and scan the ip and port associated with it."""
parser = optparse.OptionParser(
'usage %prog -H' + '<target host> -p <target port>')
parser.add_option('-H', dest='tgt_host', type='string',
help='specify target host')
parser.add_option('-p', dest='tgt_port', type='string',
help='specify target port')
(options, args) = parser.parse_args()
tgt_host = options.tgt_host
tgt_ports = str(options.tgt_port).split(', ')
if (tgt_host is None) or (tgt_ports[0] is None):
print(parser.usage)
exit(0)
for tgt_port in tgt_ports:
nmap_scan(tgt_host, str(tgt_port))
if __name__ == '__main__':
main() | {
"repo_name": "Copenbacon/ViolentPython",
"path": "src/TCPScanner.py",
"copies": "1",
"size": "2317",
"license": "mit",
"hash": -3540592773456457000,
"line_mean": 30.7534246575,
"line_max": 101,
"alpha_frac": 0.5886922745,
"autogenerated": false,
"ratio": 3.348265895953757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4436958170453757,
"avg_score": null,
"num_lines": null
} |
#A PostgreSQL driver for Akara Dendrite, a Web semi-structured metadata tool
'''
[
(origin, rel, target, {attrname1: attrval1, attrname2: attrval2}),
]
The optional attributes are metadata bound to the statement itself
'''
#Note: for PyPy support port to pg8000 <http://pybrary.net/pg8000/>
#Reportedly PyPy/pg8000 is faster than CPython/psycopg2
import logging
from itertools import groupby
from operator import itemgetter
import psycopg2 #http://initd.org/psycopg/
from versa.driver import connection_base
class connection(connection_base):
def __init__(self, connstr, logger=None):
'''
connstr - the Postgres connection string
'''
self._conn = psycopg2.connect(connstr)
self._logger = logger or logging
return
def create_space(self):
'''Set up a new table space for the first time'''
cur = self._conn.cursor()
cur.execute(SQL_MODEL)
self._conn.commit()
cur.close()
return
def drop_space(self):
'''Dismantle an existing table space'''
cur = self._conn.cursor()
cur.execute(DROP_SQL_MODEL)
self._conn.commit()
cur.close()
return
def query(self, expr):
'''Execute a Versa query'''
raise NotImplementedError
def size(self):
'''Return the number of links in the model'''
cur = self._conn.cursor()
querystr = "SELECT COUNT(*) FROM relationship;"
cur.execute(querystr)
result = cur.fetchone()
return result[0]
def __iter__(self):
cur = self._conn.cursor()
tables = "relationship"
querystr = "SELECT relationship.rawid, relationship.origin, relationship.rel, relationship.target, attribute.name, attribute.value FROM relationship FULL JOIN attribute ON relationship.rawid = attribute.rawid;".format(tables)
cur.execute(querystr)
return self._process_db_rows_iter(cur)
def match(self, origin=None, rel=None, target=None, attrs=None, include_ids=False):
'''
Retrieve an iterator of relationship IDs that match a pattern of components
origin - (optional) origin of the relationship (similar to an RDF subject). If omitted any origin will be matched.
rel - (optional) type IRI of the relationship (similar to an RDF predicate). If omitted any relationship will be matched.
target - (optional) target of the relationship (similar to an RDF object), a boolean, floating point or unicode object. If omitted any target will be matched.
attrs - (optional) attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}. If any attribute is specified, an exact match is made (i.e. the attribute name and value must match).
include_ids - If true include statement IDs with yield values
'''
#FIXME: Implement include_ids
cur = self._conn.cursor()
conditions = ""
and_placeholder = ""
tables = "relationship"
params = []
if origin:
conditions += "relationship.origin = %s"
params.append(origin)
and_placeholder = " AND "
if target:
conditions += and_placeholder + "relationship.target = %s"
params.append(target)
and_placeholder = " AND "
if rel:
conditions += and_placeholder + "relationship.rel = %s"
params.append(rel)
and_placeholder = " AND "
if attrs:
tables = "relationship, attribute"
for a_name, a_val in attrs.items():
conditions += and_placeholder + "EXISTS (SELECT 1 from attribute AS subattr WHERE subattr.rawid = relationship.rawid AND subattr.name = %s AND subattr.value = %s)"
params.extend((a_name, a_val))
and_placeholder = " AND "
#querystr = "SELECT relationship.rawid, relationship.origin, relationship.rel, relationship.target, attribute.name, attribute.value FROM {0} WHERE {1} ORDER BY relationship.rawid;".format(tables, conditions)
#SELECT relationship.rawid, attribute.rawid, relationship.origin, relationship.rel, relationship.target, attribute.name, attribute.value FROM relationship FULL JOIN attribute ON relationship.rawid = attribute.rawid WHERE relationship.origin = 'http://uche.ogbuji.net' AND EXISTS (SELECT 1 from attribute AS subattr WHERE subattr.rawid = relationship.rawid AND subattr.name = '@context' AND subattr.value = 'http://uche.ogbuji.net#_metadata') AND EXISTS (SELECT 1 from attribute AS subattr WHERE subattr.rawid = relationship.rawid AND subattr.name = '@lang' AND subattr.value = 'ig') ORDER BY relationship.rawid;
querystr = "SELECT relationship.rawid, relationship.origin, relationship.rel, relationship.target, attribute.name, attribute.value FROM relationship FULL JOIN attribute ON relationship.rawid = attribute.rawid WHERE {1} ORDER BY relationship.rawid;".format(tables, conditions)
#self._logger.debug(x.format(url))
self._logger.debug(cur.mogrify(querystr, params))
cur.execute(querystr, params)
#Use groupby to batch up the returning statements acording to rawid then rol up the attributes
#return ( (s, p, o, dict([(n,v) for n,v in xxx])) for s, p, o in yyy)
#cur.fetchone()
#cur.close()
return self._process_db_rows_iter(cur)
def _process_db_rows_iter(self, cursor):
'''
Turn the low-level rows from the result of a standard query join
into higher-level statements, yielded iteratively. Note this might lead to
idle transaction errors?
'''
#Be aware of: http://packages.python.org/psycopg2/faq.html#problems-with-transactions-handling
#The results will come back grouped by the raw relationship IDs, in order
for relid, relgroup in groupby(cursor, itemgetter(0)):
curr_rel = None
attrs = None
#Each relgroup are the DB rows corresponding to a single relationship,
#With redundant origin/rel/target but the sequence of attributes
for row in relgroup:
(rawid, origin, rel, target, a_name, a_val) = row
#self._logger.debug('Row: {0}'.format(repr(row)))
if not curr_rel: curr_rel = (origin, rel, target)
if a_name:
if not attrs:
attrs = {}
curr_rel = (origin, rel, target, attrs)
attrs[a_name] = a_val
yield curr_rel
cursor.close()
self._conn.rollback() #Finish with the transaction
return
def add(self, origin, rel, target, attrs=None, rid=None):
'''
Add one relationship to the extent
origin - origin of the relationship (similar to an RDF subject)
rel - type IRI of the relationship (similar to an RDF predicate)
target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object
attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}
rid - optional ID for the relationship in IRI form. If not specified one will be generated.
'''
#FIXME no it doesn't re:
#returns an ID (IRI) for the resulting relationship
cur = self._conn.cursor()
#relationship.
if rid:
querystr = "INSERT INTO relationship (origin, rel, target, rid) VALUES (%s, %s, %s, %s) RETURNING rawid;"
cur.execute(querystr, (origin, rel, target, rid))
else:
querystr = "INSERT INTO relationship (origin, rel, target) VALUES (%s, %s, %s) RETURNING rawid;"
cur.execute(querystr, (origin, rel, target))
rawid = cur.fetchone()[0]
for a_name, a_val in attrs.items():
querystr = "INSERT INTO attribute (rawid, name, value) VALUES (%s, %s, %s);"
cur.execute(querystr, (rawid, a_name, a_val))
self._conn.commit()
cur.close()
return
def add_many(self, rels):
'''
Add a list of relationships to the extent
rels - a list of 0 or more relationship tuples, e.g.:
[
(origin, rel, target, {attrname1: attrval1, attrname2: attrval2}, rid),
]
origin - origin of the relationship (similar to an RDF subject)
rel - type IRI of the relationship (similar to an RDF predicate)
target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object
attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}
rid - optional ID for the relationship in IRI form. If not specified for any relationship, one will be generated.
you can omit the dictionary of attributes if there are none, as long as you are not specifying a statement ID
returns a list of IDs (IRI), one for each resulting relationship, in order
'''
raise NotImplementedError
def remove(self, rids):
'''
Delete one or more relationship, by ID, from the extent
rids - either a single ID or an sequence or iterator of IDs
'''
raise NotImplementedError
def add_iri_prefix(self, prefix):
'''
Add an IRI prefix, for efficiency of table scan searches
XXX We might or might not need such a method, based on perf testing
'''
raise NotImplementedError
def close(self):
'''Set up a new table space for the first time'''
self._conn.close()
return
SQL_MODEL = '''
CREATE TABLE relationship (
rawid SERIAL PRIMARY KEY, -- a low level, internal ID purely for effieicnt referential integrity
id TEXT UNIQUE, --The higher level relationship ID
origin TEXT NOT NULL,
rel TEXT NOT NULL,
target TEXT NOT NULL
);
CREATE TABLE attribute (
rawid INT REFERENCES relationship (rawid),
name TEXT,
value TEXT
);
CREATE INDEX main_relationship_index ON relationship (origin, rel);
CREATE INDEX main_attribute_index ON attribute (name, value);
'''
DROP_SQL_MODEL = '''
DROP INDEX main_relationship_index;
DROP INDEX main_attribute_index;
DROP TABLE attribute;
DROP TABLE relationship;
'''
#Some notes on arrays:
# * http://fossplanet.com/f15/%5Bgeneral%5D-general-postgres-performance-tips-when-using-array-169307/
| {
"repo_name": "uogbuji/versa",
"path": "tools/py/driver/postgres.py",
"copies": "1",
"size": "10538",
"license": "apache-2.0",
"hash": -5028046276827242000,
"line_mean": 41.837398374,
"line_max": 619,
"alpha_frac": 0.642721579,
"autogenerated": false,
"ratio": 4.152088258471237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5294809837471237,
"avg_score": null,
"num_lines": null
} |
# A post is considered to be extracted from an existing file, and always
# consists in a title, followed by a line of '------------', followed
# immediatly by the tags with a leading #. It is a list of strings, where empty
# ones were removed.
# If it is a valid post, it has at minima 4 lines (title, line of -, tags,
# date)
# They should be extracted recursively, and fed to the different routines
# `_from_post`.
# Note that all doctests are commented, and now executed in the py.test suite,
# for compatibility reasons between py2.7 and py3.3
from __future__ import unicode_literals
from datetime import date
from collections import Counter
from collections import OrderedDict as od
import re
import io
def is_valid_post(post):
"""
Check that it has all the required arguments
If the post is valid, the function returns True. Otherwise, an
MarkdownSyntaxError is raised with a description of the problem.
"""
# remove all blank lines
temp = [e for e in post if e]
if len(temp) < 4:
raise MarkdownSyntaxError("Post contains under four lines", post)
else:
# Recover the index of the line of dashes, in case of long titles
index = 0
dashes = [e for e in temp if re.match('^-{2,}$', e)]
try:
index = temp.index(dashes[0])
except IndexError:
raise MarkdownSyntaxError("Post does not contain dashes", post)
if index:
if not temp[0]:
raise MarkdownSyntaxError("Post title is empty", post)
if not re.match('^#.*$', temp[index+1]):
raise MarkdownSyntaxError(
"Tags were not found after the dashes", post)
match = re.match(
r"^\*[0-9]{2}/[0-1][0-9]/[0-9]{4}\*$",
temp[index+2])
if not match:
raise MarkdownSyntaxError(
"The date could not be read", temp)
return True
def extract_tags_from_post(post):
"""
Recover the tags from an extracted post
.. note::
No tests are being done to ensure that the third line exists, as only
valid posts, determined with :func:`is_valid_post` are sent to this
routine.
"""
tag_line = post[2].strip()
if tag_line and tag_line[0] == '#':
tags = [elem.strip().lower() for elem in tag_line[1:].split(',')]
if any(tags):
return tags, post[:2]+post[3:]
else:
raise MarkdownSyntaxError("No tags specified in the post", post)
def extract_title_from_post(post):
"""
Recover the title from an extracted post
"""
return post[0]
def extract_date_from_post(post):
"""
Recover the date from an extracted post, and return the correct post
"""
match = re.match(
r"\*([0-9]{2})/([0-1][0-9])/([0-9]{4})\*",
post[2])
if match:
assert len(match.groups()) == 3
day, month, year = match.groups()
extracted_date = date(int(year), int(month), int(day))
return extracted_date, post[:2]+post[3:]
else:
raise MarkdownSyntaxError("No date found in the post", post)
def normalize_post(post):
"""
Perform normalization of the input post
- If a title has several lines, merge them
- If there are missing/added blank lines in the headers, remove them
"""
# Remove trailing \n
post = [line.rstrip('\n') for line in post]
# Recover the dashline (title of the post)
dashes = [e for e in post if re.match('^-{2,}$', e)]
dashline_index = post.index(dashes[0])
title = ' '.join([post[index] for index in range(dashline_index)])
normalized_post = [title]+[post[dashline_index]]
# Recover the tag line
tags = [e for e in post if re.match('^\#(.*)$', e)]
tag_line_index = post.index(tags[0])
normalized_post.append(post[tag_line_index])
# Recover the date
dates = [e for e in post if re.match(
r"\*([0-9]{2})/([0-1][0-9])/([0-9]{4})\*", e)]
date_line_index = post.index(dates[0])
normalized_post.append(post[date_line_index])
# Append the rest, starting from the first non-empty line
non_empty = [e for e in post[date_line_index+1:] if e]
if non_empty:
non_empty_index = post.index(non_empty[0])
normalized_post.extend(post[non_empty_index:])
return normalized_post
def extract_corpus_from_post(post):
"""
Recover the whole content of a post
"""
return post[2:]
def extract_title_and_posts_from_text(text):
"""
From an entire text (array), recover each posts and the file's title
"""
# Make a first pass to recover the title (first line that is underlined
# with = signs) and the indices of the dash, that signals a new post.
post_starting_indices = []
has_title = False
for index, line in enumerate(text):
# Remove white lines at the beginning
if not line.strip():
continue
if re.match('^={2,}$', line) and not has_title:
title = ' '.join(text[:index]).strip()
has_title = True
if re.match('^-{2,}$', line) and line[0] == '-':
# Check that the lines surrounding this line of dashes are
# non-empty, otherwise it could be the beginning or end of a table.
previous_line = text[index-1].rstrip('\n')
if index+1 < len(text)-1:
next_line = text[index+1].rstrip('\n')
if previous_line and next_line:
# find the latest non empty line
for backward_index in range(1, 10):
if not text[index-backward_index].strip():
post_starting_indices.append(
index-backward_index+1)
break
if not has_title:
raise MarkdownSyntaxError(
"You should specify a title to your file"
", underlined with = signs", [])
number_of_posts = len(post_starting_indices)
# Create post_indices such that it stores all the post, so the starting and
# ending index of each post
post_indices = []
for index, elem in enumerate(post_starting_indices):
if index < number_of_posts - 1:
post_indices.append([elem, post_starting_indices[index+1]])
else:
post_indices.append([elem, len(text)])
posts = []
for elem in post_indices:
start, end = elem
posts.append(text[start:end])
# Normalize them all
for index, post in enumerate(posts):
posts[index] = normalize_post(post)
assert is_valid_post(posts[index]) is True
return title, posts
def post_to_markdown(post):
"""
Write the markdown for a given post
post : list
lines constituting the post
"""
title = post[0]
text = ["", "<article class='blog-post' markdown=1>",
"## %s {.blog-post-title}" % title, ""]
tags, post = extract_tags_from_post(post)
edit_date, post = extract_date_from_post(post)
text.extend(["<p class='blog-post-meta'>",
"%s:" % edit_date,
"%s" % ", ".join(["**%s**" % tag for tag in tags]),
"</p>"])
corpus = extract_corpus_from_post(post)
text.extend(corpus)
text.extend(["</article>", "", ""])
return text, tags
def from_notes_to_markdown(path, input_tags=()):
"""
From a file, given tags, produce an output markdown file.
This will then be interpreted with the pandoc library into html.
..note::
so far the date is ignored.
Returns
-------
markdown : list
entire markdown text
tags : list of tuples
list of tags extracted from the text, with their importance
"""
# Create the array to return
text = io.open(path, 'r', encoding='utf-8', errors='replace').readlines()
title, posts = extract_title_and_posts_from_text(text)
markdown = ["<article class='blog-header'>",
"# %s {.blog-title}" % title, "</article>", "",
"<article class='row'>", "<article class='col-sm-12 blog-main'>"]
extracted_tags = []
for post in posts:
text, tags = post_to_markdown(post)
if all([tag in tags for tag in input_tags]):
# Store the recovered tags
extracted_tags.extend(tags)
markdown.extend(text)
markdown.extend(["</article>", "</article>"])
cleaned_tags = sort_tags(extracted_tags)
return markdown, cleaned_tags
def sort_tags(source):
"""
return a sorted version of source, with the biggests tags first
"""
output = od(sorted(Counter([e for e in source]).items(),
key=lambda t: -t[1]))
return output
def create_post_from_entry(title, tags, corpus):
"""
Create a string containing the post given user's input
"""
text = [title]
text.append('\n%s\n' % ''.join(['-' for _ in range(len(title))]))
text.append('# %s\n' % ', '.join(tags))
text.append('\n*%s*\n\n' % date.today().strftime("%d/%m/%Y"))
text.append(corpus+'\n')
return ''.join(text)
def create_image_markdown(filename):
"""
Create a valid markdown string that presents the image given as a filename
"""
text = "".format(filename)
return text
class MarkdownSyntaxError(ValueError):
def __init__(self, message, post):
ValueError.__init__(self, message+':\n\n' +
'\n'.join([' %s' % e for e in post]))
| {
"repo_name": "egolus/NoteOrganiser",
"path": "noteorganiser/text_processing.py",
"copies": "2",
"size": "9534",
"license": "mit",
"hash": -1631932977266638000,
"line_mean": 30.8862876254,
"line_max": 81,
"alpha_frac": 0.5855884204,
"autogenerated": false,
"ratio": 3.8412570507655115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5426845471165511,
"avg_score": null,
"num_lines": null
} |
"""A powerful, extensible, and easy-to-use option parser.
By Greg Ward <gward@python.net>
Originally distributed as Optik.
For support, use the optik-users@lists.sourceforge.net mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
"""
__version__ = "1.5.3"
__all__ = ['Option',
'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import types
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
# Id: option.py 522 2006-06-11 16:22:03Z gward
# Id: help.py 527 2006-07-23 15:21:30Z greg
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext
except ImportError:
def gettext(message):
return message
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
self.help_position = self.max_help_position = max_help_position
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError, "subclasses must implement"
def format_heading(self, heading):
raise NotImplementedError, "subclasses must implement"
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = self.width - self.current_indent
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = self.width - self.help_position
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
def _parse_long(val):
return _parse_num(val, long)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_long, _("long integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = filter(None, opts)
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = attrs.keys()
attrs.sort()
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names. (The
# complicated check of __builtin__ is only necessary for
# Python 2.1 and earlier, and is short-circuited by the
# first check on modern Pythons.)
import __builtin__
if ( type(self.type) is types.TypeType or
(hasattr(self.type, "__name__") and
getattr(__builtin__, self.type.__name__, None) is self.type) ):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif type(self.choices) not in (types.TupleType, types.ListType):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not hasattr(self.callback, '__call__'):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
type(self.callback_args) is not types.TupleType):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
type(self.callback_kwargs) is not types.DictType):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise RuntimeError, "unknown action %r" % self.action
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
try:
basestring
except NameError:
def isbasestring(x):
return isinstance(x, (types.StringType, types.UnicodeType))
else:
def isbasestring(x):
return isinstance(x, basestring)
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __cmp__(self, other):
if isinstance(other, Values):
return cmp(self.__dict__, other.__dict__)
elif isinstance(other, types.DictType):
return cmp(self.__dict__, other)
else:
return -1
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError, "invalid update mode: %r" % mode
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
execfile(filename, vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError, "invalid conflict_resolution value %r" % handler
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if type(args[0]) in types.StringTypes:
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError, "not an Option instance: %r" % option
else:
raise TypeError, "invalid arguments"
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
"""Set parsing to not stop on the first non-option, allowing
interspersing switches with command arguments. This is the
default behavior. See also disable_interspersed_args() and the
class documentation description of the attribute
allow_interspersed_args."""
self.allow_interspersed_args = True
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option. Use this if
you have a command processor which runs another command that
has options of its own and you want to make sure these options
don't get confused.
"""
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isbasestring(default):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if type(args[0]) is types.StringType:
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError, "not an OptionGroup instance: %r" % group
if group.parser is not self:
raise ValueError, "invalid OptionGroup (wrong parser)"
else:
raise TypeError, "invalid arguments"
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError), err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbrevation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print >>file, self.get_usage()
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print >>file, self.get_version()
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
# used by test suite
def _get_encoding(self, file):
encoding = getattr(file, "encoding", None)
if not encoding:
encoding = sys.getdefaultencoding()
return encoding
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
encoding = self._get_encoding(file)
file.write(self.format_help().encode(encoding, "replace"))
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
| {
"repo_name": "windyuuy/opera",
"path": "chromium/src/third_party/python_26/Lib/optparse.py",
"copies": "49",
"size": "60418",
"license": "bsd-3-clause",
"hash": -4510293462547418600,
"line_mean": 34.856379822,
"line_max": 81,
"alpha_frac": 0.5655433811,
"autogenerated": false,
"ratio": 4.327936962750717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A powerful, extensible, and easy-to-use option parser.
By Greg Ward <gward@python.net>
Originally distributed as Optik.
For support, use the optik-users@lists.sourceforge.net mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
"""
__version__ = "1.5.3"
__all__ = ['Option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import types
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
# Id: option.py 522 2006-06-11 16:22:03Z gward
# Id: help.py 527 2006-07-23 15:21:30Z greg
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext
except ImportError:
def gettext(message):
return message
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
self.help_position = self.max_help_position = max_help_position
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError, "subclasses must implement"
def format_heading(self, heading):
raise NotImplementedError, "subclasses must implement"
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = self.width - self.current_indent
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = self.width - self.help_position
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
def _parse_long(val):
return _parse_num(val, long)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_long, _("long integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = filter(None, opts)
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = attrs.keys()
attrs.sort()
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names. (The
# complicated check of __builtin__ is only necessary for
# Python 2.1 and earlier, and is short-circuited by the
# first check on modern Pythons.)
import __builtin__
if ( type(self.type) is types.TypeType or
(hasattr(self.type, "__name__") and
getattr(__builtin__, self.type.__name__, None) is self.type) ):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif type(self.choices) not in (types.TupleType, types.ListType):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not hasattr(self.callback, '__call__'):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
type(self.callback_args) is not types.TupleType):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
type(self.callback_kwargs) is not types.DictType):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise RuntimeError, "unknown action %r" % self.action
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
try:
basestring
except NameError:
def isbasestring(x):
return isinstance(x, (types.StringType, types.UnicodeType))
else:
def isbasestring(x):
return isinstance(x, basestring)
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __cmp__(self, other):
if isinstance(other, Values):
return cmp(self.__dict__, other.__dict__)
elif isinstance(other, types.DictType):
return cmp(self.__dict__, other)
else:
return -1
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError, "invalid update mode: %r" % mode
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
execfile(filename, vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError, "invalid conflict_resolution value %r" % handler
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if type(args[0]) is types.StringType:
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError, "not an Option instance: %r" % option
else:
raise TypeError, "invalid arguments"
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
"""Set parsing to not stop on the first non-option, allowing
interspersing switches with command arguments. This is the
default behavior. See also disable_interspersed_args() and the
class documentation description of the attribute
allow_interspersed_args."""
self.allow_interspersed_args = True
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option. Use this if
you have a command processor which runs another command that
has options of its own and you want to make sure these options
don't get confused.
"""
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isbasestring(default):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if type(args[0]) is types.StringType:
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError, "not an OptionGroup instance: %r" % group
if group.parser is not self:
raise ValueError, "invalid OptionGroup (wrong parser)"
else:
raise TypeError, "invalid arguments"
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError), err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbrevation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print >>file, self.get_usage()
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print >>file, self.get_version()
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
# used by test suite
def _get_encoding(self, file):
encoding = getattr(file, "encoding", None)
if not encoding:
encoding = sys.getdefaultencoding()
return encoding
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
encoding = self._get_encoding(file)
file.write(self.format_help().encode(encoding, "replace"))
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
| {
"repo_name": "deanhiller/databus",
"path": "webapp/play1.3.x/python/Lib/optparse.py",
"copies": "3",
"size": "62075",
"license": "mpl-2.0",
"hash": 1931958193797048300,
"line_mean": 34.8616389549,
"line_max": 81,
"alpha_frac": 0.5502698349,
"autogenerated": false,
"ratio": 4.431712715071036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6481982549971036,
"avg_score": null,
"num_lines": null
} |
"""A Power Supply Unit (PSU) in a pinball machine."""
from mpf.core.system_wide_device import SystemWideDevice
class PowerSupplyUnit(SystemWideDevice):
"""Represents a power supply in a pinball machine."""
config_section = 'psus'
collection = 'psus'
class_label = 'psu'
__slots__ = ["_busy_until"]
def __init__(self, machine, name):
"""Initialise PSU."""
super().__init__(machine, name)
self._busy_until = None
def get_wait_time_for_pulse(self, pulse_ms, max_wait_ms) -> int:
"""Return a wait time for a pulse or 0."""
current_time = self.machine.clock.get_time()
if self._busy_until and self._busy_until < current_time:
# prevent negative times
self._busy_until = None
if not self._busy_until or not max_wait_ms:
# if we are not busy. do pulse now
self.notify_about_instant_pulse(pulse_ms)
return 0
if self._busy_until > current_time + (max_wait_ms / 1000.0) or max_wait_ms is None:
# if we are busy for longer than possible. do pulse now
self.notify_about_instant_pulse(pulse_ms)
return 0
# calculate wait time and return it
wait_ms = (self._busy_until - current_time) * 1000
self._busy_until += (pulse_ms + self.config['release_wait_ms']) / 1000.0
return wait_ms
def notify_about_instant_pulse(self, pulse_ms):
"""Notify PSU about pulse."""
if self._busy_until:
self._busy_until = max(
self._busy_until,
self.machine.clock.get_time() + (pulse_ms + self.config['release_wait_ms']) / 1000.0)
else:
self._busy_until = self.machine.clock.get_time() + ((pulse_ms + self.config['release_wait_ms']) / 1000.0)
| {
"repo_name": "missionpinball/mpf",
"path": "mpf/devices/power_supply_unit.py",
"copies": "1",
"size": "1821",
"license": "mit",
"hash": -4630764051201434000,
"line_mean": 35.42,
"line_max": 117,
"alpha_frac": 0.5859417902,
"autogenerated": false,
"ratio": 3.5988142292490117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4684756019449012,
"avg_score": null,
"num_lines": null
} |
## APP13 segment Contents Description
## 0xFF 0xED APP13 marker
## Segment size (2 bytes) excl. marker
## Photoshop 3.0\x00 Photoshop identification string
## 8BIM segments (see below)
##
## A JPEG file from Photoshop has various 8BIM (I don't know the real name) headers.
## The one with the type 0x04 0x04 contains the textual information. The image URL is stored
## in a different header. That's why it is currently not supported by the demo class.
## Other headers contain a thumbnail image and other information.
##
## Photoshop 6 introduced a slight variation in this header segment. Basically the 4 byte padding
## has been replaced by a header description text of variable length. The updated sample can now
## handle these files as well.
##
## 8BIM segment Description
## Recorsds 8BIM Segment marker (4 bytes)
## Segment type (2 bytes)
## Zero byte padding (4 bytes)
## Segment size (2 bytes excl. marker, type, padding and size)
## Segment data
##
## The 8BIM header with the text is divided by even more headers, prefixed by 0x1C 0x02.
## These blocks then finally contain the information. Multiple blocks with the same type
## (e.g. Keywords) form a list.
##
## 0x1C 0x02 segment Description
## Datasets 0x1C 0x02 Segment marker (2 bytes)
## Segment type (1 byte)
## Segment size (2 bytes excl. marker, type and size)
## Segment data
##
import util
class IPTC(object):
"""
"""
def __init__(self, value):
self.originalValue = value
self.header = "Photoshop 3.0\x00"
self.records = []
self._parsed = []
self.parse(value[len(self.header):])
for r in self.records:
for d in r.datasets:
if d.name:
self._makeattr(d)
def _makeattr(self, d):
self._parsed.append(d)
if hasattr(self, d.name):
d1 = getattr(self, d.name)
if isinstance(d1, list):
d1.append(d)
else:
d1 = [d1, d]
setattr(self, d.name, d1)
else:
setattr(self, d.name, d)
def create(self, name, value):
#TODO: maybe it's not correct to add to first Record
ds = self.records[0].create(name, value)
self._makeattr(ds)
def __iter__(self):
return iter(self._parsed)
def parse(self, value): # 8BIM Records
if value:
marker = value[:4]
type = value[4:6]
padding = value[6:10]
length = util.getNr(value[10:12])
rValue = value[12:12+length]
self.records.append(Record(marker, type, padding, rValue, self))
#Skip a NULL (\x00 terminated value) if not even size
if length % 2 != 0: length += 1
self.parse(value[12+length:])
def _delete(self, dataset):
self._parsed.remove(dataset)
atr = getattr(self, dataset.name)
if isinstance(atr, list):
atr.remove(dataset)
if len(atr) == 1:
setattr(self, atr[0].name, atr[0])
else:
delattr(self, dataset.name)
def binary(self):
res = self.header
for record in self.records:
res += record.binary()
return res
supported_iptc_attributes = property(fget = lambda s: txt_datasets.keys(), doc="attributes supported via iptc.create(atr, value), iptc.atr, iptc.atr.delete()")
def display(self):
supported = self.supported_iptc_attributes
for atr in self:
if atr.name in supported:
print "%s (%s): %s" % (atr.name, atr.nrType, atr.value)
class NewIPTC(IPTC):
def __init__(self):
self.header = "Photoshop 3.0\x00"
self.records = [Record('8BIM', '\x00\x02', '\x00\x00\x00\x00', None, self)]
self._parsed = []
self.create("writer_editor", "jpeg.py IPTC module (emilas.com/jpeg)")
class Record:
def __init__(self, marker, type, padding, value, iptc):
self.marker = marker
self.type = type
self.padding = padding
self.originalValue = value
self.iptc = iptc
self.datasets = []
self.parse(value)
def parse(self, value): # sub-segments for 8BIM segment
if value:
marker = value[:2]
type = value[2:3]
length = util.getNr(value[3:5])
rValue = value[5:5+length]
self.datasets.append(DataSet(marker, type, rValue, self))
# Skip a NULL (\x00 terminated value) if not even size
try:
if length % 2 != 0 and value[5+length+1]=='\x00':
length += 1
except IndexError, e:
pass
self.parse(value[5+length:])
def delete(self, dataset):
self.datasets.remove(dataset)
self.iptc._delete(dataset)
def create(self, name, value):
if name in txt_datasets:
ds = DataSet('\x1c\x02', util.setNr(txt_datasets[name], 1), value, self)
self.datasets.append(ds)
return ds
else:
s = ["Only the following are supported:"]
for k in txt_datasets.keys():
s.append(k)
raise NotImplementedError("\n".join(s))
def binary(self):
res = ""
for ds in self.datasets:
res += ds.binary()
length = util.setNr(len(res), 2)
if len(res) % 2 != 0:
# Pad with a blank if not even size but let length be as it was
res += "\x00"
return self.marker + self.type + self.padding + length + res
class DataSet:
def __init__(self, marker, type, value, record):
self.marker = marker
self.type = type
self.value = value
self.record = record
self.nrType = util.getNr(type) #nr_datasets dictionary key
self.name = nr_datasets.get(self.nrType, None)
def __str__(self):
return str((self.nrType, self .value))
__repr__ = __str__
def delete(self):
self.record.delete(self)
def binary(self):
#http://www.iptc.org/std/IIM/4.1/specification/IIMV4.1.pdf (pag 15)
res = self.marker + self.type
val = self.value
#only parse it this way but not write this way
#if len(val) % 2 != 0: #Pad with a NULL if not even size but let length be as it was
# val += "\x00"
if len(self.value) < 32767:
res += util.setNr(len(self.value), 2) + val
else:
#lengthOfValueLength + valueLength + value (4 should be enough for lengthOfValueLength)
res += util.setNr(4, 2) + util.setNr(len(self.value), 4) + val
return res
nr_datasets = {
#0: 'recordVersion', # skip -- binary data
#5: 'objectName',
7: 'editStatus',
8: 'editorialUpdate',
10: 'urgency',
12: 'subjectReference',
15: 'category',
20: 'supplementalCategory',
22: 'fixtureIdentifier',
25: 'keywords',
26: 'contentLocationCode',
27: 'contentLocationName',
30: 'releaseDate',
35: 'releaseTime',
37: 'expirationDate',
38: 'expirationTime',
40: 'specialInstructions',
42: 'actionAdvised',
45: 'referenceService',
47: 'referenceDate',
50: 'referenceNumber',
55: 'dateCreated',
60: 'timeCreated',
62: 'digitalCreationDate',
63: 'digitalCreationTime',
65: 'originatingProgram',
70: 'programVersion',
75: 'objectCycle',
80: 'byLine',
85: 'byLineTitle',
90: 'city',
92: 'subLocation',
95: 'province_state',
100: 'country_primaryLocationCode',
101: 'country_primaryLocationName',
103: 'originalTransmissionReference',
105: 'headline',
110: 'credit',
115: 'source',
116: 'copyrightNotice',
118: 'contact',
120: 'caption_abstract',
122: 'writer_editor',
#125: 'rasterizedCaption', # unsupported (binary data)
130: 'imageType',
131: 'imageOrientation',
135: 'languageIdentifier',
200: 'custom1', # These are NOT STANDARD, but are used by
201: 'custom2', # Fotostation. Use at your own risk. They're
202: 'custom3', # here in case you need to store some special
203: 'custom4', # stuff, but note that other programs won't
204: 'custom5', # recognize them and may blow them away if
205: 'custom6', # you open and re-save the file. (Except with
206: 'custom7', # Fotostation, of course.)
207: 'custom8',
208: 'custom9',
209: 'custom10',
210: 'custom11',
211: 'custom12',
212: 'custom13',
213: 'custom14',
214: 'custom15',
215: 'custom16',
216: 'custom17',
217: 'custom18',
218: 'custom19',
219: 'custom20',
}
txt_datasets = dict([(v, k) for k,v in nr_datasets.items()])
##IPTC:
## http://www.iptc.org/IIM/
## http://www.controlledvocabulary.com/imagedatabases/iptc_naa.html
##
## also see Exiv2 C++ app: http://home.arcor.de/ahuggel/exiv2/iptc.html
## Const Byte Jpegbase::App13_ = 0Xed; - iptc here
## Const Uint16_T Jpegbase::Iptc_ = 0X0404;
##
## iptc.cpp ::READ (LINE 152)
## dataset marker 1 byte 0x1C
## record 1 byte
## dataset 1 byte
##
## if next byte is 0x08 then this is extended dataset:
## pass
## otherwise is standard:
## dataset len short (big endian) | {
"repo_name": "straup/parallel-flickr",
"path": "filtr/utils/jpeg/iptc.py",
"copies": "1",
"size": "9666",
"license": "bsd-2-clause",
"hash": 7150772338196714000,
"line_mean": 31.9931740614,
"line_max": 163,
"alpha_frac": 0.5645561763,
"autogenerated": false,
"ratio": 3.594644849386389,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4659201025686389,
"avg_score": null,
"num_lines": null
} |
"""APP2APP protocol.
"""
__docformat__ = 'restructuredtext en'
import threading
from .utils import *
from .user import *
class Application(Cached):
"""Represents an application in APP2APP protocol. Use `skype.Skype.Application` to instantiate.
"""
_ValidateHandle = staticmethod(tounicode)
def __repr__(self):
return Cached.__repr__(self, 'Name')
def _Alter(self, AlterName, Args=None):
return self._Owner._Alter('APPLICATION', self.Name, AlterName, Args)
def _Init(self):
self._MakeOwner()
def _Property(self, PropName, Set=None):
return self._Owner._Property('APPLICATION', self.Name, PropName, Set)
def _Connect_ApplicationStreams(self, App, Streams):
if App == self:
s = [x for x in Streams if x.PartnerHandle == self._Connect_Username]
if s:
self._Connect_Stream[0] = s[0]
self._Connect_Event.set()
def Connect(self, Username, WaitConnected=False):
"""Connects application to user.
:Parameters:
Username : str
Name of the user to connect to.
WaitConnected : bool
If True, causes the method to wait until the connection is established.
:return: If ``WaitConnected`` is True, returns the stream which can be used to send the
data. Otherwise returns None.
:rtype: `ApplicationStream` or None
"""
if WaitConnected:
self._Connect_Event = threading.Event()
self._Connect_Stream = [None]
self._Connect_Username = Username
self._Connect_ApplicationStreams(self, self.Streams)
self._Owner.RegisterEventHandler('ApplicationStreams', self._Connect_ApplicationStreams)
self._Alter('CONNECT', Username)
self._Connect_Event.wait()
self._Owner.UnregisterEventHandler('ApplicationStreams', self._Connect_ApplicationStreams)
try:
return self._Connect_Stream[0]
finally:
del self._Connect_Stream, self._Connect_Event, self._Connect_Username
else:
self._Alter('CONNECT', Username)
def Create(self):
"""Creates the APP2APP application in Skype client.
"""
self._Owner._DoCommand('CREATE APPLICATION %s' % self.Name)
def Delete(self):
"""Deletes the APP2APP application in Skype client.
"""
self._Owner._DoCommand('DELETE APPLICATION %s' % self.Name)
def SendDatagram(self, Text, Streams=None):
"""Sends datagram to application streams.
:Parameters:
Text : unicode
Text to send.
Streams : sequence of `ApplicationStream`
Streams to send the datagram to or None if all currently connected streams should be
used.
"""
if Streams is None:
Streams = self.Streams
for s in Streams:
s.SendDatagram(Text)
def _GetConnectableUsers(self):
return UserCollection(self._Owner, split(self._Property('CONNECTABLE')))
ConnectableUsers = property(_GetConnectableUsers,
doc="""All connectible users.
:type: `UserCollection`
""")
def _GetConnectingUsers(self):
return UserCollection(self._Owner, split(self._Property('CONNECTING')))
ConnectingUsers = property(_GetConnectingUsers,
doc="""All users connecting at the moment.
:type: `UserCollection`
""")
def _GetName(self):
return self._Handle
Name = property(_GetName,
doc="""Name of the application.
:type: unicode
""")
def _GetReceivedStreams(self):
return ApplicationStreamCollection(self, (x.split('=')[0] for x in split(self._Property('RECEIVED'))))
ReceivedStreams = property(_GetReceivedStreams,
doc="""All streams that received data and can be read.
:type: `ApplicationStreamCollection`
""")
def _GetSendingStreams(self):
return ApplicationStreamCollection(self, (x.split('=')[0] for x in split(self._Property('SENDING'))))
SendingStreams = property(_GetSendingStreams,
doc="""All streams that send data and at the moment.
:type: `ApplicationStreamCollection`
""")
def _GetStreams(self):
return ApplicationStreamCollection(self, split(self._Property('STREAMS')))
Streams = property(_GetStreams,
doc="""All currently connected application streams.
:type: `ApplicationStreamCollection`
""")
class ApplicationStream(Cached):
"""Represents an application stream in APP2APP protocol.
"""
_ValidateHandle = str
def __len__(self):
return self.DataLength
def __repr__(self):
return Cached.__repr__(self, 'Handle')
def Disconnect(self):
"""Disconnects the stream.
"""
self.Application._Alter('DISCONNECT', self.Handle)
close = Disconnect
def Read(self):
"""Reads data from stream.
:return: Read data or an empty string if none were available.
:rtype: unicode
"""
return self.Application._Alter('READ', self.Handle)
read = Read
def SendDatagram(self, Text):
"""Sends datagram to stream.
:Parameters:
Text : unicode
Datagram to send.
"""
self.Application._Alter('DATAGRAM', '%s %s' % (self.Handle, tounicode(Text)))
def Write(self, Text):
"""Writes data to stream.
:Parameters:
Text : unicode
Data to send.
"""
self.Application._Alter('WRITE', '%s %s' % (self.Handle, tounicode(Text)))
write = Write
def _GetApplication(self):
return self._Owner
Application = property(_GetApplication,
doc="""Application this stream belongs to.
:type: `Application`
""")
def _GetApplicationName(self):
return self.Application.Name
ApplicationName = property(_GetApplicationName,
doc="""Name of the application this stream belongs to. Same as ``ApplicationStream.Application.Name``.
:type: unicode
""")
def _GetDataLength_GetStreamLength(self, Type):
for s in split(self.Application._Property(Type)):
h, i = s.split('=')
if h == self.Handle:
return int(i)
def _GetDataLength(self):
i = self._GetDataLength_GetStreamLength('SENDING')
if i is not None:
return i
i = self._GetDataLength_GetStreamLength('RECEIVED')
if i is not None:
return i
return 0
DataLength = property(_GetDataLength,
doc="""Number of bytes awaiting in the read buffer.
:type: int
""")
def _GetHandle(self):
return self._Handle
Handle = property(_GetHandle,
doc="""Stream handle in u'<Skypename>:<n>' format.
:type: str
""")
def _GetPartnerHandle(self):
return self.Handle.split(':')[0]
PartnerHandle = property(_GetPartnerHandle,
doc="""Skypename of the user this stream is connected to.
:type: str
""")
class ApplicationStreamCollection(CachedCollection):
_CachedType = ApplicationStream
| {
"repo_name": "FloatingGhost/skype4py",
"path": "Skype4Py/application.py",
"copies": "1",
"size": "7170",
"license": "bsd-3-clause",
"hash": -7345869745425918000,
"line_mean": 27.4523809524,
"line_max": 110,
"alpha_frac": 0.6139470014,
"autogenerated": false,
"ratio": 4.358662613981763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5472609615381763,
"avg_score": null,
"num_lines": null
} |
"""APP2APP protocol.
"""
__docformat__ = 'restructuredtext en'
import threading
from utils import *
from user import *
class Application(Cached):
"""Represents an application in APP2APP protocol. Use `skype.Skype.Application` to instantiate.
"""
_ValidateHandle = staticmethod(tounicode)
def __repr__(self):
return Cached.__repr__(self, 'Name')
def _Alter(self, AlterName, Args=None):
return self._Owner._Alter('APPLICATION', self.Name, AlterName, Args)
def _Init(self):
self._MakeOwner()
def _Property(self, PropName, Set=None):
return self._Owner._Property('APPLICATION', self.Name, PropName, Set)
def _Connect_ApplicationStreams(self, App, Streams):
if App == self:
s = [x for x in Streams if x.PartnerHandle == self._Connect_Username]
if s:
self._Connect_Stream[0] = s[0]
self._Connect_Event.set()
def Connect(self, Username, WaitConnected=False):
"""Connects application to user.
:Parameters:
Username : str
Name of the user to connect to.
WaitConnected : bool
If True, causes the method to wait until the connection is established.
:return: If ``WaitConnected`` is True, returns the stream which can be used to send the
data. Otherwise returns None.
:rtype: `ApplicationStream` or None
"""
if WaitConnected:
self._Connect_Event = threading.Event()
self._Connect_Stream = [None]
self._Connect_Username = Username
self._Connect_ApplicationStreams(self, self.Streams)
self._Owner.RegisterEventHandler('ApplicationStreams', self._Connect_ApplicationStreams)
self._Alter('CONNECT', Username)
self._Connect_Event.wait()
self._Owner.UnregisterEventHandler('ApplicationStreams', self._Connect_ApplicationStreams)
try:
return self._Connect_Stream[0]
finally:
del self._Connect_Stream, self._Connect_Event, self._Connect_Username
else:
self._Alter('CONNECT', Username)
def Create(self):
"""Creates the APP2APP application in Skype client.
"""
self._Owner._DoCommand('CREATE APPLICATION %s' % self.Name)
def Delete(self):
"""Deletes the APP2APP application in Skype client.
"""
self._Owner._DoCommand('DELETE APPLICATION %s' % self.Name)
def SendDatagram(self, Text, Streams=None):
"""Sends datagram to application streams.
:Parameters:
Text : unicode
Text to send.
Streams : sequence of `ApplicationStream`
Streams to send the datagram to or None if all currently connected streams should be
used.
"""
if Streams is None:
Streams = self.Streams
for s in Streams:
s.SendDatagram(Text)
def _GetConnectableUsers(self):
return UserCollection(self._Owner, split(self._Property('CONNECTABLE')))
ConnectableUsers = property(_GetConnectableUsers,
doc="""All connectible users.
:type: `UserCollection`
""")
def _GetConnectingUsers(self):
return UserCollection(self._Owner, split(self._Property('CONNECTING')))
ConnectingUsers = property(_GetConnectingUsers,
doc="""All users connecting at the moment.
:type: `UserCollection`
""")
def _GetName(self):
return self._Handle
Name = property(_GetName,
doc="""Name of the application.
:type: unicode
""")
def _GetReceivedStreams(self):
return ApplicationStreamCollection(self, (x.split('=')[0] for x in split(self._Property('RECEIVED'))))
ReceivedStreams = property(_GetReceivedStreams,
doc="""All streams that received data and can be read.
:type: `ApplicationStreamCollection`
""")
def _GetSendingStreams(self):
return ApplicationStreamCollection(self, (x.split('=')[0] for x in split(self._Property('SENDING'))))
SendingStreams = property(_GetSendingStreams,
doc="""All streams that send data and at the moment.
:type: `ApplicationStreamCollection`
""")
def _GetStreams(self):
return ApplicationStreamCollection(self, split(self._Property('STREAMS')))
Streams = property(_GetStreams,
doc="""All currently connected application streams.
:type: `ApplicationStreamCollection`
""")
class ApplicationStream(Cached):
"""Represents an application stream in APP2APP protocol.
"""
_ValidateHandle = str
def __len__(self):
return self.DataLength
def __repr__(self):
return Cached.__repr__(self, 'Handle')
def Disconnect(self):
"""Disconnects the stream.
"""
self.Application._Alter('DISCONNECT', self.Handle)
close = Disconnect
def Read(self):
"""Reads data from stream.
:return: Read data or an empty string if none were available.
:rtype: unicode
"""
return self.Application._Alter('READ', self.Handle)
read = Read
def SendDatagram(self, Text):
"""Sends datagram to stream.
:Parameters:
Text : unicode
Datagram to send.
"""
self.Application._Alter('DATAGRAM', '%s %s' % (self.Handle, tounicode(Text)))
def Write(self, Text):
"""Writes data to stream.
:Parameters:
Text : unicode
Data to send.
"""
self.Application._Alter('WRITE', '%s %s' % (self.Handle, tounicode(Text)))
write = Write
def _GetApplication(self):
return self._Owner
Application = property(_GetApplication,
doc="""Application this stream belongs to.
:type: `Application`
""")
def _GetApplicationName(self):
return self.Application.Name
ApplicationName = property(_GetApplicationName,
doc="""Name of the application this stream belongs to. Same as ``ApplicationStream.Application.Name``.
:type: unicode
""")
def _GetDataLength_GetStreamLength(self, Type):
for s in split(self.Application._Property(Type)):
h, i = s.split('=')
if h == self.Handle:
return int(i)
def _GetDataLength(self):
i = self._GetDataLength_GetStreamLength('SENDING')
if i is not None:
return i
i = self._GetDataLength_GetStreamLength('RECEIVED')
if i is not None:
return i
return 0
DataLength = property(_GetDataLength,
doc="""Number of bytes awaiting in the read buffer.
:type: int
""")
def _GetHandle(self):
return self._Handle
Handle = property(_GetHandle,
doc="""Stream handle in u'<Skypename>:<n>' format.
:type: str
""")
def _GetPartnerHandle(self):
return self.Handle.split(':')[0]
PartnerHandle = property(_GetPartnerHandle,
doc="""Skypename of the user this stream is connected to.
:type: str
""")
class ApplicationStreamCollection(CachedCollection):
_CachedType = ApplicationStream
| {
"repo_name": "james-huang/sk_skype_bot",
"path": "Skype4Py/application.py",
"copies": "21",
"size": "7168",
"license": "mit",
"hash": -6327978058089401000,
"line_mean": 27.4444444444,
"line_max": 110,
"alpha_frac": 0.6141183036,
"autogenerated": false,
"ratio": 4.362751065124772,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014888326370948596,
"num_lines": 252
} |
'''APP2APP protocol.
'''
from utils import *
from user import *
import threading
class IApplication(Cached):
'''Represents an application in APP2APP protocol. Use L{ISkype.Application<skype.ISkype.Application>} to instatinate.
'''
def __repr__(self):
return '<%s with Name=%s>' % (Cached.__repr__(self)[1:-1], repr(self.Name))
def _Alter(self, AlterName, Args=None):
return self._Skype._Alter('APPLICATION', self._Name, AlterName, Args)
def _Init(self, Name, Skype):
self._Name = unicode(Name)
self._Skype = Skype
def _Property(self, PropName, Set=None):
return self._Skype._Property('APPLICATION', self._Name, PropName, Set)
def __Connect_app_streams(self, App, Streams):
if App == self:
s = [x for x in Streams if x.PartnerHandle == self.__Connect_username]
if s:
self.__Connect_stream[0] = s[0]
self.__Connect_event.set()
def Connect(self, Username, WaitConnected=False):
'''Connects application to user.
@param Username: Name of the user to connect to.
@type Username: unicode
@param WaitConnected: If True, causes the method to wait untill the connection is established.
@type WaitConnected: bool
@return: If C{WaitConnected} is True, returns the stream which can be used to send the data.
Otherwise returns None.
@rtype: L{IApplicationStream} or None
'''
if WaitConnected:
self.__Connect_event = threading.Event()
self.__Connect_stream = [None]
self.__Connect_username = Username
self.__Connect_app_streams(self, self.Streams)
self._Skype.RegisterEventHandler('ApplicationStreams', self.__Connect_app_streams)
self._Alter('CONNECT', Username)
self.__Connect_event.wait()
self._Skype.UnregisterEventHandler('ApplicationStreams', self.__Connect_app_streams)
try:
return self.__Connect_stream[0]
finally:
del self.__Connect_stream, self.__Connect_event, self.__Connect_username
else:
self._Alter('CONNECT', Username)
def Create(self):
'''Creates the APP2APP application in Skype client.
'''
self._Skype._DoCommand('CREATE APPLICATION %s' % self._Name)
def Delete(self):
'''Deletes the APP2APP application in Skype client.
'''
self._Skype._DoCommand('DELETE APPLICATION %s' % self._Name)
def SendDatagram(self, Text, Streams=None):
'''Sends datagram to application streams.
@param Text: Text to send.
@type Text: unicode
@param Streams: Streams to send the datagram to or None if all currently connected streams should be used.
@type Streams: sequence of L{IApplicationStream}
'''
if Streams == None:
Streams = self.Streams
for s in Streams:
s.SendDatagram(Text)
def _GetConnectableUsers(self):
return tuple([IUser(x, self._Skype) for x in esplit(self._Property('CONNECTABLE'))])
ConnectableUsers = property(_GetConnectableUsers,
doc='''All connectable users.
@type: tuple of L{IUser}
''')
def _GetConnectingUsers(self):
return tuple([IUser(x, self._Skype) for x in esplit(self._Property('CONNECTING'))])
ConnectingUsers = property(_GetConnectingUsers,
doc='''All users connecting at the moment.
@type: tuple of L{IUser}
''')
def _GetName(self):
return self._Name
Name = property(_GetName,
doc='''Name of the application.
@type: unicode
''')
def _GetReceivedStreams(self):
return tuple([IApplicationStream(x.split('=')[0], self) for x in esplit(self._Property('RECEIVED'))])
ReceivedStreams = property(_GetReceivedStreams,
doc='''All streams that received data and can be read.
@type: tuple of L{IApplicationStream}
''')
def _GetSendingStreams(self):
return tuple([IApplicationStream(x.split('=')[0], self) for x in esplit(self._Property('SENDING'))])
SendingStreams = property(_GetSendingStreams,
doc='''All streams that send data and at the moment.
@type: tuple of L{IApplicationStream}
''')
def _GetStreams(self):
return tuple([IApplicationStream(x, self) for x in esplit(self._Property('STREAMS'))])
Streams = property(_GetStreams,
doc='''All currently connected application streams.
@type: tuple of L{IApplicationStream}
''')
class IApplicationStream(Cached):
'''Represents an application stream in APP2APP protocol.
'''
def __len__(self):
return self.DataLength
def __repr__(self):
return '<%s with Handle=%s, Application=%s>' % (Cached.__repr__(self)[1:-1], repr(self.Handle), repr(self.Application))
def _Init(self, Handle, Application):
self._Handle = Handle
self._Application = Application
def Disconnect(self):
'''Disconnects the stream.
'''
self._Application._Alter('DISCONNECT', self._Handle)
close = Disconnect
def Read(self):
'''Reads data from stream.
@return: Read data or an empty string if none were available.
@rtype: unicode
'''
return self._Application._Alter('READ', self._Handle)
read = Read
def SendDatagram(self, Text):
'''Sends datagram to stream.
@param Text: Datagram to send.
@type Text: unicode
'''
self._Application._Alter('DATAGRAM', '%s %s' % (self._Handle, Text))
def Write(self, Text):
'''Writes data to stream.
@param Text: Data to send.
@type Text: unicode
'''
self._Application._Alter('WRITE', '%s %s' % (self._Handle, Text))
write = Write
def _GetApplication(self):
return self._Application
Application = property(_GetApplication,
doc='''Application this stream belongs to.
@type: L{IApplication}
''')
def _GetApplicationName(self):
return self._Application.Name
ApplicationName = property(_GetApplicationName,
doc='''Name of the application this stream belongs to. Same as C{IApplicationStream.Application.Name}.
@type: unicode
''')
def __GetDataLength_GetStreamLength(self, Type):
for s in esplit(self._Application._Property(Type)):
h, i = s.split('=')
if h == self._Handle:
return int(i)
def _GetDataLength(self):
i = self.__GetDataLength_GetStreamLength('SENDING')
if i != None:
return i
i = self.__GetDataLength_GetStreamLength('RECEIVED')
if i != None:
return i
return 0
DataLength = property(_GetDataLength,
doc='''Number of bytes awaiting in the read buffer.
@type: int
''')
def _GetHandle(self):
return self._Handle
Handle = property(_GetHandle,
doc='''Stream handle in u'<Skypename>:<n>' format.
@type: unicode
''')
def _GetPartnerHandle(self):
return self._Handle.split(':')[0]
PartnerHandle = property(_GetPartnerHandle,
doc='''Skypename of the user this stream is connected to.
@type: unicode
''')
| {
"repo_name": "mambelli/osg-bosco-marco",
"path": "src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/application.py",
"copies": "9",
"size": "7292",
"license": "apache-2.0",
"hash": -5506238162120580000,
"line_mean": 29.132231405,
"line_max": 127,
"alpha_frac": 0.6117663193,
"autogenerated": false,
"ratio": 4.110484780157836,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002067474145636459,
"num_lines": 242
} |
""" app - application interface """
import os
import tornado.web
from json import dumps
from tornado.escape import to_unicode
class Handler(tornado.web.RequestHandler):
@property
def cfg(self):
return self.application.cfg
def head(self, *args, **kwargs):
self.get(*args, **kwargs)
self.request.body = ''
def is_argument_present(self, name):
return not (self.request.arguments.get(name, None) is None)
def get_current_user(self):
return to_unicode(self.get_secure_cookie('userid'))
def get_secure_cookie(self, name, if_none=""):
cook = tornado.web.RequestHandler.get_secure_cookie(self, name)
if cook is None:
return if_none
return cook
def send_errmsg(self, errmsg):
self.set_secure_cookie("errmsg", errmsg)
def send_statmsg(self, statmsg):
self.set_secure_cookie("statmsg", statmsg)
def render(self, template_name, **kwargs):
error = self.get_secure_cookie("errmsg")
status = self.get_secure_cookie("statmsg")
self.clear_cookie("errmsg")
self.clear_cookie("statmsg")
tornado.web.RequestHandler.render(self,
template_name,
errmsg=error,
statmsg=status,
**kwargs)
# API render
def render_json(self, content):
self.set_header("Content-Type", "application/json")
self.set_header("Access-Control-Allow-Origin", "*")
self.write(dumps(content))
self.finish()
class Application(tornado.web.Application):
def __init__(self, opts, conf):
self.opts = opts
self.conf = conf
urls = self.route_add()
settings = dict(
template_path=None,
static_path=None,
xsrf_cookies=False if self.opts.debug else True,
cookie_secret="i love cookies!!@!#!@!",
debug=self.opts.debug)
tornado.web.Application.__init__(self, urls, **settings)
def route_add(self):
""" routes url to api endpoint """
urls = []
for segment, endpoint in self.conf.routes:
urls.append((r"/api/{v}/{s}".format(
v=self.conf.api_version,
s=segment), "brutus.api.{v}.{e}".format(
v=self.conf.api_version,
e=endpoint)))
return urls
| {
"repo_name": "sarahjanesllc-labs/brutus",
"path": "brutus/app.py",
"copies": "1",
"size": "2488",
"license": "mit",
"hash": -9098208473153865000,
"line_mean": 31.3116883117,
"line_max": 71,
"alpha_frac": 0.5562700965,
"autogenerated": false,
"ratio": 4.078688524590164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5134958621090164,
"avg_score": null,
"num_lines": null
} |
"""
Builds classifier models and saves them as pickles
"""
##########################################################################
## Imports
##########################################################################
import os
import time
import json
import pickle
import random
import apparel
import unicodecsv as csv
from datetime import datetime
from apparel.config import settings
from apparel.features import ProductFeatures
from nltk.classify.util import accuracy
from nltk.classify import MaxentClassifier
##########################################################################
## Module Constants
##########################################################################
DATE_FORMAT = "%a %b %d %H:%M:%S %Y"
##########################################################################
## Model Builder
##########################################################################
class ClassifierBuilder(object):
"""
Creates a classifier model using MaximumEntropy and saves it as a
pickle to disk. This class also writes out extra information to disk
to ensure that the model can be identified in the future.
"""
def __init__(self, corpus=None, **kwargs):
self.corpus = corpus or settings.corpus
self.validate = kwargs.pop('validate', True) # Perform cross validation
self.outpath = kwargs.pop('outpath', '.') # Where to write out the data
# Compute info and model paths
self.model_path, self.info_path = self.get_output_paths()
# Other required properties
self.accuracy = None # Accuracy of the model
self.started = None # Start timestamp of the build
self.finished = None # Finish timestamp of the build
self.buildtime = None # Time (seconds) of complete build
self.feattime = None # Time (seconds) to get features
self.traintime = None # Time (seconds) to train the model
self.validtime = None # Time (seconds) to run the validation
# Create a featurizer
self.featurizer = ProductFeatures()
# Cache the features on the model
self._featureset = None
def featureset(self):
"""
Opens the corpus path, reads the data and constructs features to
pass to the classifier. (A simple improvement is to cache this).
Returns a dictionary of features and the label as follows:
[(feats, label) for row in corpus]
This is the expected format for the MaxentClassifier.
"""
if self._featureset is None:
# Time how long it takes to extract features
start = time.time()
self._featureset = []
with open(self.corpus, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
label = row.pop('category')
feats = self.featurizer.featurize(**row)
self._featureset.append((feats, label))
# Record feature extraction time
self.feattime = time.time() - start
return self._featureset
def train(self, featureset=None):
"""
Trains the maximum entropy classifier and returns it. If a
featureset is specified it trains on that, otherwise it trains on
the models featureset.
Pass in a featureset during cross validation.
Returns the training time and the classifier.
"""
featureset = featureset or self.featureset()
# Time how long it takes to train
start = time.time()
classifier = MaxentClassifier.train(featureset,
algorithm='megam', trace=1, gaussian_prior_sigma=1)
delta = time.time() - start
return classifier, delta
def build(self):
"""
Builds the model and writes to the outpath (which should be a
directory). Two files are written:
- the pickle of the model
- a yaml file of associated data
Note, if a file already exists at the outpath, this will raise an
exception (don't want to overwrite a model by accident!)
"""
# Record the start time
self.started = datetime.now()
start = time.time()
# Extract the features and train the model
classifier, self.traintime = self.train()
# Write the classifier to disk
with open(self.model_path, 'w') as f:
pickle.dump(classifier, f, pickle.HIGHEST_PROTOCOL)
# Begin accuracy validation
if self.validate:
self.cross_validate()
# Record the finish time
self.finished = datetime.now()
self.buildtime = time.time() - start
# Write the information to disk
self.write_details()
def cross_validate(self):
"""
Performs cross validation by training the model on 90% of the
corpus then checking the accuracy on the remaining 10%.
"""
start = time.time()
feats = self.featureset()
offset = len(feats) / 10
random.shuffle(feats)
train = feats[:offset]
test = feats[offset:]
classifier, _ = self.train(train)
self.accuracy = accuracy(classifier, test)
self.validtime = time.time() - start
def get_output_paths(self):
"""
Returns two paths - the pickle path and the information yaml path.
Ensures those paths don't exist and wont' be overwritten.
"""
today = datetime.now().strftime('%Y-%d-%m')
mname = os.path.join(self.outpath, "model-%s.pickle" % today)
iname = os.path.join(self.outpath, "info-%s.json" % today)
for name in (mname, iname):
if os.path.exists(name):
raise Exception("Can't overwrite file at '%s'!" % name)
return mname, iname
def write_details(self):
"""
Writes the details of the classifier to a YAML file.
"""
details = {
'version': apparel.get_version(),
'started': self.started.strftime(DATE_FORMAT),
'finished': self.finished.strftime(DATE_FORMAT),
'accuracy': self.accuracy,
'validated': self.validate,
'corpus': self.corpus,
'paths': {
'model': self.model_path,
'info': self.info_path,
},
'classes': {
'classifier': MaxentClassifier.__name__,
'features': ProductFeatures.__name__,
},
'timer': {
'build': self.buildtime,
'features': self.feattime,
'validation': self.validtime,
'training': self.traintime,
}
}
with open(self.info_path, 'w') as f:
json.dump(details, f, indent=4)
if __name__ == '__main__':
builder = ClassifierBuilder()
print builder.build()
| {
"repo_name": "DistrictDataLabs/product-classifier",
"path": "apparel/build.py",
"copies": "2",
"size": "7297",
"license": "mit",
"hash": -2975127261031258600,
"line_mean": 31.2876106195,
"line_max": 88,
"alpha_frac": 0.5547485268,
"autogenerated": false,
"ratio": 4.512677798392084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0029536405499195153,
"num_lines": 226
} |
"""
Classifier package - utilize built model to perform classifications
"""
##########################################################################
## Imports
##########################################################################
import pickle
from operator import itemgetter
from apparel.config import settings
from apparel.features import ProductFeatures
##########################################################################
## Simple Classifier
##########################################################################
class ApparelClassifier(object):
"""
Performs classification of products using a classifier that is loaded
via a pickle at runtime. This classifier can be of any type, but we
expect the Maximum Entropy classifier trained from a CSV corpus.
"""
def __init__(self, model=None):
"""
Pass in the path of the pickle classifier object.
"""
## Get the default model from the settings if it isn't passed in
model = model or settings.model
## Load the model from the pickle
with open(model, 'rb') as pkl:
self._classifier = pickle.load(pkl)
## Create a featurizer to use
self.featurizer = ProductFeatures()
def classify(self, name, description=None, keywords=None):
"""
Classifies the text using the internal classifier. Returns a
probability distribution of the labels associated with the text.
"""
features = self.featurizer.featurize(name, description, keywords)
probdist = self._classifier.prob_classify(features)
labels = [(label, probdist.prob(label))
for label in probdist.samples()
if probdist.prob(label) > 0.01]
return sorted(labels, key=itemgetter(1), reverse=True)
def explain(self, name, description=None, keywords=None):
"""
Wrapper for classifier.explain - prints out (no way to capture the
string output, unfortunately) the features contributing to the
chosen classifier.
"""
features = self.featurizer.featurize(name, description, keywords)
self._classifier.explain(features)
def labels(self):
"""
Wrapper for classifier.labels - returns a list of the labels.
"""
return self._classifier.labels()
if __name__ == '__main__':
classifier = ApparelClassifier()
classifier.explain("GUESS Handbag, Isla Large Satchel")
| {
"repo_name": "georgetown-analytics/product-classifier",
"path": "apparel/classify.py",
"copies": "2",
"size": "2806",
"license": "mit",
"hash": 5626399216245243000,
"line_mean": 33.6419753086,
"line_max": 74,
"alpha_frac": 0.5923022096,
"autogenerated": false,
"ratio": 4.622734761120263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6215036970720262,
"avg_score": null,
"num_lines": null
} |
"""
Uses confire to get meaningful configurations from a yaml file
"""
##########################################################################
## Imports
##########################################################################
import os
import confire
##########################################################################
## Configuration
##########################################################################
class ApparelConfiguration(confire.Configuration):
"""
Meaningful defaults and required configurations.
debug: the app will print or log debug statements
testing: the app will not overwrite important resources
corpus: the location of the corpus on disk
model: the location of the pickled model on disk
"""
CONF_PATHS = [
"/etc/apparel.yaml", # System configuration
os.path.expanduser("~/.apparel.yaml"), # User specific config
os.path.abspath("conf/apparel.yaml"), # Local configuration
]
debug = True
testing = True
corpus = None
model = None
## Load settings immediately for import
settings = ApparelConfiguration.load()
if __name__ == '__main__':
print settings
| {
"repo_name": "georgetown-analytics/product-classifier",
"path": "apparel/config.py",
"copies": "2",
"size": "1528",
"license": "mit",
"hash": 207544591067363500,
"line_mean": 27.8301886792,
"line_max": 74,
"alpha_frac": 0.539921466,
"autogenerated": false,
"ratio": 4.672782874617737,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6212704340617736,
"avg_score": null,
"num_lines": null
} |
"""
Extracts the features from text for classification and building
"""
##########################################################################
## Imports
##########################################################################
import string
from nltk.corpus import stopwords
from nltk import wordpunct_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
##########################################################################
## Featurize Class
##########################################################################
class ProductFeatures(object):
"""
This class manages the extraction of text features from a product
document that might contain a name, a description, and keywords. It
ensures that stopwords and punctuation is excluded, and that all tokens
are normalized to lower case and to their lemma class (thus reducing
the feature space for better classification).
The reason this is a class is because data needs to be stored to do
the work of featurization - e.g. loading stopwords and punctuation.
"""
def __init__(self, stoplist=None, punct=None, lemmatizer=None):
# Load stopwords, punctuation, and lemmatizer
# This takes a bit of work, so we only want to do it once!
self.stopwords = stoplist or stopwords.words('english')
self.punctuation = punct or string.punctuation
self.lemmatizer = lemmatizer or WordNetLemmatizer()
def tokenize(self, text):
"""
Returns a list of individual tokens from the text utilizing NLTK's
tokenize built in utility (far better than split on space). It also
removes any stopwords and punctuation from the text, as well as
ensure that every token is normalized.
For now, token = word as in bag of words (the feature we're using).
"""
for token in wordpunct_tokenize(text):
token = self.normalize(token)
if token in self.punctuation: continue
if token in self.stopwords: continue
yield token
def normalize(self, word):
"""
Ensures words are in the same class (lemma) as well as lowercase
"""
word = word.lower()
return self.lemmatizer.lemmatize(word)
def featurize(self, name, description=None, keywords=None):
"""
Returns a dictionary of features to use with the Maximum Entropy
classifier. In this case we're using a "bag of words" approach.
"""
# Get the bag of words from the name
tokens = set(self.tokenize(name))
# Add the bag of words from the description (union)
if description is not None:
tokens = tokens | set(self.tokenize(description))
# Get the bag of keywords
keywords = set(self.tokenize(keywords)) if keywords else set([])
# Create the features
features = {}
for token in tokens:
features[token] = True
for keyword in keywords:
features["KEYWORD(%s)" % keyword] = True
return features
##########################################################################
## Development testing
##########################################################################
if __name__ == '__main__':
print ProductFeatures().featurize("The Women's EQ Medium Travel Bag from DAKINE. Though it may be small, that does not mean it cannot accomplish great things. The efficient 51 liter interior provides enough space for a week's worth . . .")
| {
"repo_name": "georgetown-analytics/product-classifier",
"path": "apparel/features.py",
"copies": "2",
"size": "3837",
"license": "mit",
"hash": 3831595678463831000,
"line_mean": 36.9900990099,
"line_max": 243,
"alpha_frac": 0.5913474068,
"autogenerated": false,
"ratio": 4.595209580838324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6186556987638324,
"avg_score": null,
"num_lines": null
} |
# app/asistencias/forms.py
# coding: utf-8
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, HiddenField
from wtforms import DateField
from wtforms.validators import DataRequired
from wtforms.widgets import TextArea
class ReunionForm(FlaskForm):
"""
Formulario para reunion
"""
id = HiddenField(u'id')
nombre_reunion = StringField(u'Nombre de la reunión')
# Modelo Familia
fecha_reunion = DateField(u'Fecha de la reunión',
validators=[DataRequired()])
comentarios_reunion = StringField(u'Comentarios',
widget=TextArea())
submit = SubmitField(u'Aceptar')
class AsistenciaForm(FlaskForm):
"""
Consulta de asistencias
"""
id_miembros = HiddenField(u'idMiembros')
id_reunion = HiddenField(u'idReunion')
submit = SubmitField(u'Buscar')
class ConsultaAsistenciasForm(FlaskForm):
"""
Consulta de asistencias
"""
id_miembro = HiddenField(u'idMiembro')
nomyape = StringField(u'Nombres y Apellidos de la Persona:')
submit = SubmitField(u'Buscar')
| {
"repo_name": "originaltebas/chmembers",
"path": "app/asistencias/forms.py",
"copies": "1",
"size": "1132",
"license": "mit",
"hash": 2747870004886938000,
"line_mean": 23.5652173913,
"line_max": 64,
"alpha_frac": 0.6690265487,
"autogenerated": false,
"ratio": 3.3333333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4502359882033334,
"avg_score": null,
"num_lines": null
} |
# app/asistencias/views.py
# coding: utf-8
from flask import flash, jsonify
from flask import redirect, render_template, url_for, request
from flask_login import current_user, login_required
from app.asistencias import asistencias
from app.asistencias.forms import ReunionForm
from app.asistencias.forms import AsistenciaForm, ConsultaAsistenciasForm
from app import db
from sqlalchemy import desc
from app.models import Reunion, Miembro, relacion_asistencias
def check_edit_or_admin():
"""
Si no es admin o editor lo manda al inicio
"""
if not current_user.get_urole() >= 1:
return redirect(url_for("home.hub"))
def check_only_admin():
"""
Si no es admin o editor lo manda al inicio
"""
if not current_user.get_urole() == 2:
return redirect(url_for("home.hub"))
@asistencias.route('/asistencias', methods=['GET'])
@login_required
def ver_asistencias():
"""
Lista de los reuniones para seguimiento de asistencias
"""
check_edit_or_admin()
# de arranque carga el listado
flag_listar = True
# flag_crear = False
# flag_consultar = False
query_asistencias = db.session.query(Reunion)\
.order_by(desc(Reunion.fecha_reunion)).all()
return render_template('asistencias/base_asistencias.html',
reuniones=query_asistencias,
flag_listar=flag_listar)
@asistencias.route('/asistencias/reunion/crear', methods=['GET', 'POST'])
@login_required
def crear_reunion():
"""
Crear una entrada de seguimiento
"""
check_edit_or_admin()
# Variable para el template. Para decirle si es Alta o Modif
flag_listar = False
flag_crear = True
# flag_consultar = False
form = ReunionForm()
if form.validate_on_submit():
obj_reu = Reunion(nombre_reunion=form.nombre_reunion.data,
fecha_reunion=form.fecha_reunion.data,
comentarios_reunion=form.comentarios_reunion.data)
try:
db.session.add(obj_reu)
db.session.commit()
flash('Has guardado los datos correctamente', 'success')
except Exception as e:
flash('Error:' + str(e), 'danger')
return redirect(url_for('asistencias.ver_asistencias'))
return render_template(
'asistencias/base_asistencias.html',
add_asistencias=flag_crear, flag_listar=flag_listar, form=form)
@asistencias.route('/asistencias/reunion/modif/<int:id>',
methods=['GET', 'POST'])
@login_required
def modif_reunion(id):
"""
Modificar una reunion para dar seguimiento
"""
check_edit_or_admin()
flag_crear = False
flag_listar = False
# flag_consultar = False
obj = Reunion.query.get_or_404(id)
form = ReunionForm(obj=obj)
# print('req: ', request.method)
# print('sub: ', form.is_submitted())
# print('val: ', form.validate())
# er = ""
# for field, errors in form.errors.items():
# for error in errors:
# er = er + "Campo: " +\
# getattr(form, field).label.text +\
# " - Error: " +\
# error + "<br/>"
# print(er)
if request.method == 'GET':
form.nombre_reunion.data = obj.nombre_reunion
form.fecha_reunion.data = obj.fecha_reunion
form.comentarios_reunion.data = obj.comentarios_reunion
if request.method == 'POST':
if form.validate_on_submit():
obj.nombre_reunion = form.nombre_reunion.data
obj.fecha_reunion = form.fecha_reunion.data
obj.comentarios_reunion = form.comentarios_reunion.data
try:
db.session.commit()
flash('Has modificado los datos correctamente', 'success')
except Exception as e:
flash('Error: ' + str(e), 'danger')
return redirect(url_for('asistencias.ver_asistencias'))
return render_template(
'asistencias/base_asistencias.html',
add_asistencias=flag_crear, flag_listar=flag_listar, form=form)
@asistencias.route('/asistencias/reunion/borrar/<int:id>',
methods=['GET'])
@login_required
def borrar_reunion(id):
"""
Borrar una reunion de seguimiento de asistencias
"""
check_edit_or_admin()
obj = Reunion.query.get_or_404(id)
try:
db.session.delete(obj)
db.session.commit()
flash('Has borrado los datos correctamente', 'success')
except Exception as e:
flash('Error: ' + str(e), 'danger')
return redirect(url_for('asistencias.ver_asistencias'))
@asistencias.route('/asistencias/registrar/<int:id>',
methods=['GET', 'POST'])
@login_required
def registrar_asistencias(id):
"""
Registrar asistencia a una reunion
"""
check_edit_or_admin()
# flag_crear = False
# flag_listar = False
# flag_consultar = False
flag_registrar = True
form = AsistenciaForm()
if request.method == 'GET':
query_reunion = Reunion.query.get_or_404(id)
m_sel = db.session.query(Miembro.id,
relacion_asistencias.c.id_miembro
.label('seleccionado'))\
.outerjoin(relacion_asistencias,
Miembro.id ==
relacion_asistencias.c.id_miembro)\
.filter(relacion_asistencias.c.id_reunion == id)\
.subquery()
query_miembros = db.session.query(Miembro)\
.outerjoin(m_sel,
m_sel.c.id == Miembro.id)\
.add_columns(Miembro.id,
Miembro.fullname,
Miembro.email,
Miembro.telefono_movil,
m_sel.c.seleccionado
)
return render_template(
'asistencias/base_asistencias.html',
reunion=query_reunion, miembros=query_miembros,
flag_registrar=flag_registrar, form=form)
if request.method == 'POST':
if form.validate_on_submit():
id_ms = form.id_miembros.data[:].split(",")
id_r = form.id_reunion.data
# Traigo el objeto reunion
reunion = db.session.query(Reunion).filter(Reunion.id == id_r)\
.first()
# Cojo los actuales para eliminarlos
obj_del = db.session.query(Miembro)\
.join(relacion_asistencias,
Miembro.id ==
relacion_asistencias.c.id_miembro)\
.filter(relacion_asistencias.c.id_reunion
== id_r).all()
# Cojo los nuevos para agregarlos
obj_add = db.session.query(Miembro)\
.filter(Miembro.id.in_(id_ms))\
.all()
for o in obj_del:
reunion.miembros.remove(o)
db.session.delete(reunion)
for i in obj_add:
reunion.miembros.append(i)
db.session.add(reunion)
try:
db.session.commit()
flash(u'Se ha registrado la asistencia correctamente.',
'success')
except Exception as e:
# error
flash('Error:', e, 'danger')
url = url_for('asistencias.ver_asistencias')
return jsonify(url=url)
@asistencias.route('/asistencias/consultas',
methods=['GET', 'POST'])
@login_required
def consulta_asistencias():
"""
Consultar los asistencias de una persona
"""
check_only_admin()
# flag_crear = False
# flag_listar = False
flag_consultar = True
# solo
form = ConsultaAsistenciasForm()
if form.validate_on_submit():
listado = db.session.query(Miembro)\
.join(relacion_asistencias,
Miembro.id ==
relacion_asistencias.c.id_miembro)\
.join(Reunion,
Reunion.id ==
relacion_asistencias.c.id_reunion)\
.filter(Miembro.id == form.id_miembro.data)\
.add_columns(Miembro.fullname,
Miembro.id.label("id_m"),
Reunion.id.label("id_r"),
Reunion.nombre_reunion,
Reunion.fecha_reunion,
Reunion.comentarios_reunion)\
.all()
return render_template(
'asistencias/base_asistencias.html',
flag_consultar=flag_consultar, form=form,
asistencias=listado, flag_asistencias=True)
return render_template(
'asistencias/base_asistencias.html',
flag_consultar=flag_consultar, form=form)
def Convert(tup, di):
for a, b in tup:
di.setdefault("id", a)
di.setdefault("name", b)
return di
@asistencias.route('/asistencias/autocomplete', methods=['GET'])
def autocomplete():
search = request.args.get('q')
# query = db.session.query(Miembro)\
# .filter(Miembro.fullname.like('%' + str(search) + '%'))
# results = [mv[0] for mv in query.all()]
results = [(row.id, row.fullname)
for row in Miembro.query
.filter(
Miembro.fullname.like('%' + str(search) + '%')).all()]
resdic = {}
Convert(results, resdic)
print(resdic)
return jsonify(matching_results=resdic)
| {
"repo_name": "originaltebas/chmembers",
"path": "app/asistencias/views.py",
"copies": "1",
"size": "10216",
"license": "mit",
"hash": -4341834828530445300,
"line_mean": 31.9548387097,
"line_max": 79,
"alpha_frac": 0.5275058731,
"autogenerated": false,
"ratio": 3.6472688325598,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9674416282720733,
"avg_score": 0.00007168458781362007,
"num_lines": 310
} |
"""AppAssure 5 Core API"""
from appassure.api import AppAssureAPI
class IBackgroundJobManagement(AppAssureAPI):
"""Full documentation online at
http://docs.appassure.com/display/AA50D/IBackgroundJobManagement
"""
def getJobsForAgentByPage(self, agentId, page):
"""Gets the current list of jobs for a specific
agent, such that the results are paged for easy viewing in a
paged grid in GUI.
"""
return self.session.request('jobmgr/agents/%s/jobs/paged?filter=%s&max=%s&page=%s'
% (agentId, page))
def getAllJobsForAgentCount(self, agentId, filter):
"""Gets the current number of jobs, in memory and
database, for a specific agent.
"""
return self.session.request('jobmgr/agents/%s/jobsCount?filter=%s'
% (agentId, filter))
def cancelChildJob(self, parentJobId, childJobId):
"""Cancels the child job."""
return self.session.request('jobmgr/jobs/%s/childJobs/%s'
% (parentJobId, childJobId), 'DELETE')
def getJob(self, jobId):
"""Gets the specified job."""
return self.session.request('jobmgr/jobs/%s'
% (jobId))
def cancelJob(self, jobId):
"""Cancels the job."""
return self.session.request('jobmgr/jobs/%s'
% (jobId), 'DELETE')
def updateJobRequest(self, data, jobId):
"""Updates the specified job."""
return self.session.request('jobmgr/jobs/%s'
% (jobId), 'POST',
self.getXML(data, 'backgroundJobRequest'))
def getCoreJobsByPage(self, page):
"""Gets the current list of jobs for the core, such
that the results are paged for easy viewing in a paged grid.
"""
return self.session.request('jobmgr/jobs/core/paged?filter=%s&max=%s&page=%s'
% (page))
def getAllJobsCount(self, filter):
"""Gets the current number of jobs, in memory and
database.
"""
return self.session.request('jobmgr/jobsCount?filter=%s'
% (filter))
| {
"repo_name": "rshipp/python-appassure",
"path": "appassure/core/IBackgroundJobManagement.py",
"copies": "1",
"size": "2117",
"license": "bsd-3-clause",
"hash": 668345675394163800,
"line_mean": 35.5,
"line_max": 90,
"alpha_frac": 0.6032120926,
"autogenerated": false,
"ratio": 3.8421052631578947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9920752904120642,
"avg_score": 0.004912890327450488,
"num_lines": 58
} |
"""AppAssure 5 Core API"""
from appassure.api import AppAssureAPI
class IEventsManagement(AppAssureAPI):
"""Full documentation online at
http://docs.appassure.com/display/AA50D/IEventsManagement
"""
def getAllAgentAlerts(self, agentId):
"""Gets the summary information for all alerts
associated with the specified agent. Note that this list can
potentially be very large.
"""
return self.session.request('events/agents/%s/alerts/all'
% (agentId))
def getAgentAlertsByPage(self, agentId, page):
"""Gets summary info for alerts for a given agent ID,
such that the results are paged for easy viewing in a paged
grid.
"""
return self.session.request('events/agents/%s/alerts/paged?max=%s&page=%s'
% (agentId, page))
def getAgentAlertsCount(self, agentId):
"""Gets non-dismissed alerts count for specified
agent.
"""
return self.session.request('events/agents/%s/alertsCount'
% (agentId))
def getAllAgentEvents(self, agentId):
"""Gets the summary information for all events
associated with the specified agent. Note that this list can
potentially be very large.
"""
return self.session.request('events/agents/%s/all'
% (agentId))
def dismissAllAgentAlerts(self, agentId):
"""Marks all alerts for the agent as read, thereby
dismissing them from the list of alerts.
"""
return self.session.request('events/agents/%s/all'
% (agentId), 'DELETE')
def getAgentEventsCount(self, agentId):
"""Gets events count for specified agent."""
return self.session.request('events/agents/%s/eventsCount'
% (agentId))
def getAgentEventsByPage(self, agentId, page):
"""Gets summary info for events for a given agent ID,
such that the results are paged for easy viewing in a paged
grid.
"""
return self.session.request('events/agents/%s/paged?max=%s&page=%s'
% (agentId, page))
def getCachedEventsByDate(self, data):
"""Gets the summary information for cached events
associated with the specified core ordering by date.
"""
return self.session.request('events/cachedEventsDateParam', 'PUT',
self.getXML(data, 'EventsDateRange'))
def getConfiguration(self):
"""Returns configuration information for events such
as email content and notification settings.
"""
return self.session.request('events/config')
def setConfiguration(self, data):
"""Sets configuration information for events such as
email content and notification settings.
"""
return self.session.request('events/config', 'PUT',
self.getXML(data, 'eventsConfiguration'))
def getAgentAlertsSettings(self, agentId):
"""Returns the alert settings for the specified agent
such as email content and notification settings.
"""
return self.session.request('events/config/agents/%s'
% (agentId))
def setAgentAlertsSettings(self, data, agentId):
"""Sets alert settings for the specified agent such
as email content and notification settings.
"""
return self.session.request('events/config/agents/%s'
% (agentId), 'PUT',
self.getXML(data, 'agentAlertSettings'))
def sendTestEmail(self, data):
"""Generates and sends a test email notification
based on the specified email configuration.
"""
return self.session.request('events/config/email/test', 'PUT',
self.getXML(data, 'sendTestEmailRequest'))
def getAllCoreAlerts(self):
"""Gets the summary information for all alerts
associated with the core. Note that this list can
potentially be very large.
"""
return self.session.request('events/core/alerts/all')
def getCoreAlertsByPage(self, page):
"""Gets summary info for events for the core, such
that the results are paged for easy viewing in a paged grid.
"""
return self.session.request('events/core/alerts/paged?max=%s&page=%s'
% (page))
def getCoreAlertsCount(self):
"""Gets non-dismissed alerts count for core."""
return self.session.request('events/core/alertsCount')
def getAllCoreEvents(self):
"""Gets the summary information for all events
associated with the core. Note that this list can
potentially be very large.
"""
return self.session.request('events/core/all')
def dismissAllCoreAlerts(self):
"""Marks all alerts for the core as read, thereby
dismissing them from the list of alerts.
"""
return self.session.request('events/core/all', 'DELETE')
def getCoreEventsCount(self):
"""Gets events count for core."""
return self.session.request('events/core/eventsCount')
def getCoreEventsByPage(self, page):
"""Gets summary info for events for the core, such
that the results are paged for easy viewing in a paged grid.
"""
return self.session.request('events/core/paged?max=%s&page=%s'
% (page))
def getDetailsForEvent(self, eventId):
"""Gets the details for a single event."""
return self.session.request('events/event/%s'
% (eventId))
def dismissEvent(self, eventId):
"""Marks an event as read, thereby dismissing it from
the list of events.
"""
return self.session.request('events/event/%s'
% (eventId), 'DELETE')
def getEventsByDate(self, data):
"""Gets the summary information for all events
associated with the specified core ordering by date.
"""
return self.session.request('events/EventsDateRange', 'PUT',
self.getXML(data, 'EventsDateRange'))
def getEventTypes(self):
"""Gets the list of all possible event types,
organized into groups.
"""
return self.session.request('events/types')
| {
"repo_name": "rshipp/python-appassure",
"path": "appassure/core/IEventsManagement.py",
"copies": "1",
"size": "6319",
"license": "bsd-3-clause",
"hash": 7273730901732545000,
"line_mean": 36.6130952381,
"line_max": 82,
"alpha_frac": 0.6235163792,
"autogenerated": false,
"ratio": 4.388194444444444,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5511710823644445,
"avg_score": null,
"num_lines": null
} |
"""AppAssure 5 Core API"""
from appassure.api import AppAssureAPI
class IRecoveryPointsManagement(AppAssureAPI):
"""Full documentation online at
http://docs.appassure.com/display/AA50D/IRecoveryPointsManagement
"""
def deleteRecoveryPointsForAgent(self, agentId):
"""Deletes all recovery points for specified agent."""
return self.session.request('recoveryPoints/agents/%s'
% (agentId), 'DELETE')
def getAllRecoveryPoints(self, agentId):
"""Gets the summary information for all recovery
points associated with the specified agent. Note that this
list can potentially be very large.
"""
return self.session.request('recoveryPoints/agents/%s/all'
% (agentId))
def startCheckAgentRecoveryPoints(self, agentId, fixErrors):
"""Starts a task that performs integrity check of all
agent's recovery points and tries to fix errors being found
"""
return self.session.request('recoveryPoints/agents/%s/integritycheck/fix?fixErrors=%s'
% (agentId, fixErrors))
def getRecoveryPointsByPage(self, agentId, max, page):
"""Gets summary info for recovery points for a given
agent ID, such that the results are paged for easy viewing
in a paged grid.
"""
return self.session.request('recoveryPoints/agents/%s/paged?max=%s&page=%s'
% (agentId, max, page))
def getRecoveryPoints(self, agentId, max, olderThan, newerThan):
"""Gets the summary information for the recovery
points associated with a specified agent that fall outside
of a last modified date/time range. You also specify a
maximum number of recovery points to return with the
specified range.
"""
return self.session.request('recoveryPoints/agents/%s/where?max=%s&olderThan=%s&newerThan=%s'
% (agentId, max, olderThan, newerThan))
def mergeAgentRecoveryPoints(self, data, sourceAgentId):
"""Merges the recovery points of the agent ID
identified by the URI into another agent ID specified in the
request.
"""
return self.session.request('recoveryPoints/agents/%s'
% (sourceAgentId), 'POST',
self.getXML(data, 'mergeRecoveryPointsRequest'))
def getAgentsRecoveryPointsInfo(self):
"""Gets a list of all agents with recovery points."""
return self.session.request('recoveryPoints/agents/all')
def getVolumeImageDetails(self, imageId):
"""'Gets information for a single volume image
specified by unique identifier.
"""
return self.session.request('recoveryPoints/images/%s/'
% (imageId))
def getImageRawData(self, imageId, blockOffset, blockLength):
"""Gets a stream of data consisting of the data in
the image at the specified offset and length. Useful only
for diagnostic purposes.
"""
return self.session.request('recoveryPoints/images/rawdata/%s.rawdata?blockOffset=%s&blockLength=%s'
% (imageId, blockOffset, blockLength))
def getImageRawKeys(self, imageId):
"""Gets a stream of offset/key pairs, containing the
block offsets in the image and the DVM keys of the record at
each block offset. Useful only for diagnostic purposes.
"""
return self.session.request('recoveryPoints/images/rawkeys/%s.rawkeys'
% (imageId))
def getImageRawKeysText(self, imageId):
"""Gets a stream of offset/key pairs, containing the
block offsets in the image and the DVM keys of the record at
each block offset. Useful only for diagnostic purposes.
"""
return self.session.request('recoveryPoints/images/rawkeys/%s.textkeys'
% (imageId))
def getMostRecentRecoveryPoints(self, data):
"""Gets summary info for the most recent recovery
point of every agent specified in the request.
"""
return self.session.request('recoveryPoints/recent', 'PUT',
self.getXML(data, 'getMostRecentRecoveryPoints'))
def deleteRecoveryPointsRange(self, data):
"""Deletes all recovery points in a specified time
period for the specified agent.
"""
return self.session.request('recoveryPoints/rps/deleteRecoveryPointsRange', 'POST',
self.getXML(data, 'adHocDeleteRecoveryPointsRequest'))
def deleteRecoveryPointsChain(self, recoveryPointId):
"""Deletes all volume image chains that contain
volume images for a given recovery point.
"""
return self.session.request('recoveryPoints/rps/%s'
% (recoveryPointId), 'DELETE')
def getRecoveryPointDetails(self, recoveryPointId):
"""Gets detailed info for a single recovery point.
"""
return self.session.request('recoveryPoints/rps/%s/details'
% (recoveryPointId))
def getRecoveryPointLockedKeys(self, recoveryPointId):
"""Gets a list of encryption keys used by a recovery
point that are locked and require a passphrase in order to
mount.
"""
return self.session.request('recoveryPoints/rps/%s/lockedkeys'
% (recoveryPointId))
def getRecoveryPointSummary(self, recoveryPointId):
"""Gets detailed info for a single recovery point.
"""
return self.session.request('recoveryPoints/rps/%s/summary'
% (recoveryPointId))
| {
"repo_name": "rshipp/python-appassure",
"path": "appassure/core/IRecoveryPointsManagement.py",
"copies": "1",
"size": "5610",
"license": "bsd-3-clause",
"hash": -1513177283983803400,
"line_mean": 41.8244274809,
"line_max": 108,
"alpha_frac": 0.6531194296,
"autogenerated": false,
"ratio": 4.441805225653207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003636633829867053,
"num_lines": 131
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.