text
stringlengths 4
1.02M
| meta
dict |
|---|---|
'''
search a set of log files for bad accel values
'''
import sys, time, os, glob
import zipfile
from pymavlink import mavutil
# extra imports for pyinstaller
import json
from pymavlink.dialects.v10 import ardupilotmega
search_dirs = ['c:\Program Files\APM Planner',
'c:\Program Files\Mission Planner',
'c:\Program Files (x86)\APM Planner',
'c:\Program Files (x86)\Mission Planner']
results = 'SearchResults.zip'
email = 'Craig Elder <craig@3drobotics.com>'
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--directory", action='append', default=search_dirs, help="directories to search")
parser.add_argument("--post-boot", action='store_true', help="post boot only")
parser.add_argument("--init-only", action='store_true', help="init only")
parser.add_argument("--single-axis", action='store_true', help="single axis only")
args = parser.parse_args()
logcount = 0
def AccelSearch(filename):
global logcount
mlog = mavutil.mavlink_connection(filename)
badcount = 0
badval = None
have_ok = False
last_t = 0
while True:
m = mlog.recv_match(type=['PARAM_VALUE','RAW_IMU'])
if m is None:
if last_t != 0:
logcount += 1
return False
if m.get_type() == 'PARAM_VALUE':
if m.param_id.startswith('INS_PRODUCT_ID'):
if m.param_value not in [0.0, 5.0]:
return False
if m.get_type() == 'RAW_IMU':
if m.time_usec < last_t:
have_ok = False
last_t = m.time_usec
if abs(m.xacc) >= 3000 and abs(m.yacc) > 3000 and abs(m.zacc) > 3000 and not args.single_axis:
if args.post_boot and not have_ok:
continue
if args.init_only and have_ok:
continue
print(have_ok, last_t, m)
break
# also look for a single axis that stays nearly constant at a large value
for axes in ['xacc', 'yacc', 'zacc']:
value1 = getattr(m, axes)
if abs(value1) > 2000:
if badval is None:
badcount = 1
badval = m
continue
value2 = getattr(badval, axes)
if abs(value1 - value2) < 30:
badcount += 1
badval = m
if badcount > 5:
logcount += 1
if args.init_only and have_ok:
continue
print(have_ok, badcount, badval, m)
return True
else:
badcount = 1
badval = m
if badcount == 0:
have_ok = True
if last_t != 0:
logcount += 1
return True
found = []
directories = args.directory
# allow drag and drop
if len(sys.argv) > 1:
directories = sys.argv[1:]
filelist = []
for d in directories:
if not os.path.exists(d):
continue
if os.path.isdir(d):
print("Searching in %s" % d)
for (root, dirs, files) in os.walk(d):
for f in files:
if not f.endswith('.tlog'):
continue
path = os.path.join(root, f)
filelist.append(path)
elif d.endswith('.tlog'):
filelist.append(d)
for i in range(len(filelist)):
f = filelist[i]
print("Checking %s ... [found=%u logcount=%u i=%u/%u]" % (f, len(found), logcount, i, len(filelist)))
if AccelSearch(f):
found.append(f)
if len(found) == 0:
print("No matching files found - all OK!")
raw_input('Press enter to close')
sys.exit(0)
print("Creating zip file %s" % results)
try:
zip = zipfile.ZipFile(results, 'w')
except Exception:
print("Unable to create zip file %s" % results)
print("Please send matching files manually")
for f in found:
print('MATCHED: %s' % f)
raw_input('Press enter to close')
sys.exit(1)
for f in found:
zip.write(f, arcname=os.path.basename(f))
zip.close()
print('==============================================')
print("Created %s with %u of %u matching logs" % (results, len(found), logcount))
print("Please send this file to %s" % email)
print('==============================================')
raw_input('Press enter to close')
sys.exit(0)
|
{
"content_hash": "9a39b18b9282ed51ec4546a23f44980b",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 106,
"avg_line_length": 31.888111888111887,
"alnum_prop": 0.524780701754386,
"repo_name": "fschill/mavue",
"id": "e71b4e28c84d6eb35867f0cf1c47777376e95cc6",
"size": "4583",
"binary": false,
"copies": "39",
"ref": "refs/heads/master",
"path": "pymavlink/tools/AccelSearch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2157500"
},
{
"name": "C#",
"bytes": "28231"
},
{
"name": "C++",
"bytes": "65460"
},
{
"name": "Java",
"bytes": "12770"
},
{
"name": "JavaScript",
"bytes": "28269"
},
{
"name": "Makefile",
"bytes": "1363"
},
{
"name": "Protocol Buffer",
"bytes": "1797"
},
{
"name": "PureBasic",
"bytes": "1574"
},
{
"name": "Python",
"bytes": "5463611"
},
{
"name": "Shell",
"bytes": "3110"
}
],
"symlink_target": ""
}
|
import pytest
from pybiomart import Dataset
from pybiomart.server import Server
# pylint: disable=redefined-outer-name, no-self-use
@pytest.fixture
def query_params():
"""Example query parameters."""
return {
'attributes': ['ensembl_gene_id'],
'filters': {
'chromosome_name': ['1']
}
}
class TestDatasetStatic(object):
"""Static (offline) tests for Dataset class."""
def test_attibutes(self, mock_dataset):
"""Tests basic attributes."""
assert mock_dataset.name == 'mmusculus_gene_ensembl'
assert mock_dataset.display_name == 'Mus musculus genes (GRCm38.p4)'
def test_fetch_configuration(self, mocker, mock_dataset,
dataset_config_response):
"""Tests fetching of filters/attributes."""
mock_get = mocker.patch.object(
mock_dataset, 'get', return_value=dataset_config_response)
assert len(mock_dataset.filters) > 0
assert len(mock_dataset.attributes) > 0
mock_get.assert_called_once_with(
type='configuration', dataset=mock_dataset.name)
def test_fetch_attribute(self, mocker, mock_dataset,
dataset_config_response):
"""Tests attributes of example attribute."""
mocker.patch.object(
mock_dataset, 'get', return_value=dataset_config_response)
# Test example attribute.
attr = mock_dataset.attributes['ensembl_gene_id']
assert attr.name == 'ensembl_gene_id'
assert attr.display_name == 'Ensembl Gene ID'
assert attr.description == 'Ensembl Stable ID of the Gene'
assert attr.default
def test_fetch_filters(self, mocker, mock_dataset,
dataset_config_response):
"""Tests attributes of example filter."""
mocker.patch.object(
mock_dataset, 'get', return_value=dataset_config_response)
# Test example filter.
filt = mock_dataset.filters['chromosome_name']
assert filt.name == 'chromosome_name'
assert filt.type == 'list'
assert filt.description == ''
def test_query(self, mocker, mock_dataset_with_config, query_params,
dataset_query_response):
"""Tests example query."""
mock_dataset = mock_dataset_with_config
mock_get = mocker.patch.object(
mock_dataset, 'get', return_value=dataset_query_response)
# Perform query.
res = mock_dataset.query(**query_params)
# Check query result.
assert len(res) > 0
assert 'Ensembl Gene ID' in res
# Check query xml.
query = b"""<Query datasetConfigVersion="0.6" formatter="TSV"
header="1" uniqueRows="1" virtualSchemaName="default">
<Dataset interface="default" name="mmusculus_gene_ensembl">
<Attribute name="ensembl_gene_id" />
<Filter name="chromosome_name" value="1" />
</Dataset></Query>"""
query = b''.join(query.split(b'\n'))
mock_get.assert_called_once_with(query=query)
def test_query_attr_name(self, mocker, mock_dataset_with_config,
query_params, dataset_query_response):
"""Tests example query, renaming columns to names."""
mock_dataset = mock_dataset_with_config
mocker.patch.object(
mock_dataset, 'get', return_value=dataset_query_response)
# Perform query.
res = mock_dataset.query(use_attr_names=True, **query_params)
# Check query result.
assert len(res) > 0
assert 'ensembl_gene_id' in res
def test_query_data_types(self, mocker, mock_dataset_with_config,
query_params, dataset_query_response):
"""Tests example query with data types specified."""
mock_dataset = mock_dataset_with_config
mock_get = mocker.patch.object(
mock_dataset, 'get', return_value=dataset_query_response)
data_types = {'Ensembl Gene ID': str}
query_params['dtypes'] = data_types
# Perform query.
res = mock_dataset.query(**query_params)
# Check query result.
assert len(res) > 0
assert 'Ensembl Gene ID' in res
def test_query_non_valid_data_types(self, mocker, mock_dataset_with_config,
query_params, dataset_query_response):
"""Tests example query with non valid data types specified."""
mock_dataset = mock_dataset_with_config
mock_get = mocker.patch.object(
mock_dataset, 'get', return_value=dataset_query_response)
data_types = {'Ensembl Gene ID': 'hello'}
query_params['dtypes'] = data_types
# Perform query.
with pytest.raises(ValueError):
res = mock_dataset.query(**query_params)
class TestDatasetLive(object):
"""Live unit tests for dataset."""
def test_ensembl(self):
"""Tests example query to ensembl."""
dataset = Dataset(
name='hsapiens_gene_ensembl',
host='http://www.ensembl.org',
use_cache=False)
result = dataset.query(
attributes=['ensembl_gene_id', 'external_gene_name'])
assert result.shape[0] > 0
assert result.shape[1] == 2
|
{
"content_hash": "1083550fec4583604e4bcef1b7bd1fb0",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 79,
"avg_line_length": 32.26219512195122,
"alnum_prop": 0.6042336042336043,
"repo_name": "jrderuiter/pybiomart",
"id": "666b3f281b5b8a3abcc66c3133d7593571a6afee",
"size": "5291",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2494"
},
{
"name": "Python",
"bytes": "39790"
}
],
"symlink_target": ""
}
|
from flask_wtf import FlaskForm
from wtforms import (TextField, SubmitField, IntegerField)
class AddUserForm(FlaskForm):
name = TextField(u'用户名')
password = TextField(u'密码')
type_code = IntegerField(u'类型')
submit = SubmitField(u'添加')
|
{
"content_hash": "3f291daa7edf4c9d9aae3d313dd44603",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 58,
"avg_line_length": 28,
"alnum_prop": 0.7182539682539683,
"repo_name": "tongxindao/shiyanlou",
"id": "02e1aee64310fa62a62c9e47b5197352439c8318",
"size": "311",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "shiyanlou_cs354-127b99c086/minecloud/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "265212"
},
{
"name": "C++",
"bytes": "686"
},
{
"name": "CSS",
"bytes": "261341"
},
{
"name": "HTML",
"bytes": "945024"
},
{
"name": "Java",
"bytes": "115"
},
{
"name": "JavaScript",
"bytes": "475129"
},
{
"name": "Makefile",
"bytes": "750"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "529824"
},
{
"name": "Shell",
"bytes": "384"
}
],
"symlink_target": ""
}
|
"""
Detect a cycle in a linked list.
Note that the head pointer may be 'None' if the list is empty.
A Node is defined as:
class Node(object):
def __init__(self, data = None, next_node = None):
self.data = data
self.next = next_node
"""
def has_cycle(head):
seen = set()
while head.next:
if head in seen:
return True
seen.add(head)
head = head.next
return False
|
{
"content_hash": "317bae9d22a039bc057d7ebb63bb5f8f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 62,
"avg_line_length": 21.38095238095238,
"alnum_prop": 0.5634743875278396,
"repo_name": "rootulp/hackerrank",
"id": "41d49e879cd09d05ad087f688495f45ef311ec78",
"size": "449",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/ctci-linked-list-cycle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "695"
},
{
"name": "HTML",
"bytes": "3180"
},
{
"name": "Java",
"bytes": "55554"
},
{
"name": "JavaScript",
"bytes": "18863"
},
{
"name": "Python",
"bytes": "116652"
},
{
"name": "Ruby",
"bytes": "44389"
},
{
"name": "Shell",
"bytes": "1226"
}
],
"symlink_target": ""
}
|
"""
numpy.ma : a package to handle missing or invalid values.
This package was initially written for numarray by Paul F. Dubois
at Lawrence Livermore National Laboratory.
In 2006, the package was completely rewritten by Pierre Gerard-Marchant
(University of Georgia) to make the MaskedArray class a subclass of ndarray,
and to improve support of structured arrays.
Copyright 1999, 2000, 2001 Regents of the University of California.
Released for unlimited redistribution.
* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois.
* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant
(pgmdevlist_AT_gmail_DOT_com)
* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com)
.. moduleauthor:: Pierre Gerard-Marchant
"""
# pylint: disable-msg=E1002
from __future__ import division, absolute_import, print_function
import sys
import operator
import warnings
import textwrap
import re
from functools import reduce
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
import numpy as np
import numpy.core.umath as umath
import numpy.core.numerictypes as ntypes
from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue
from numpy import array as narray
from numpy.lib.function_base import angle
from numpy.compat import (
getargspec, formatargspec, long, basestring, unicode, bytes
)
from numpy import expand_dims as n_expand_dims
from numpy.core.multiarray import normalize_axis_index
from numpy.core.numeric import normalize_axis_tuple
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
__all__ = [
'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute',
'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin',
'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos',
'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray',
'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil',
'choose', 'clip', 'common_fill_value', 'compress', 'compressed',
'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh',
'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal',
'diff', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp',
'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask',
'flatten_structured_array', 'floor', 'floor_divide', 'fmod',
'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask',
'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot',
'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA',
'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift',
'less', 'less_equal', 'load', 'loads', 'log', 'log10', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask',
'make_mask_descr', 'make_mask_none', 'mask_or', 'masked',
'masked_array', 'masked_equal', 'masked_greater',
'masked_greater_equal', 'masked_inside', 'masked_invalid',
'masked_less', 'masked_less_equal', 'masked_not_equal',
'masked_object', 'masked_outside', 'masked_print_option',
'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum',
'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero',
'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'rank', 'ravel', 'remainder',
'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_',
'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask',
'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide',
'var', 'where', 'zeros',
]
MaskType = np.bool_
nomask = MaskType(0)
class MaskedArrayFutureWarning(FutureWarning):
pass
def _deprecate_argsort_axis(arr):
"""
Adjust the axis passed to argsort, warning if necessary
Parameters
----------
arr
The array which argsort was called on
np.ma.argsort has a long-term bug where the default of the axis argument
is wrong (gh-8701), which now must be kept for backwards compatibiity.
Thankfully, this only makes a difference when arrays are 2- or more-
dimensional, so we only need a warning then.
"""
if arr.ndim <= 1:
# no warning needed - but switch to -1 anyway, to avoid surprising
# subclasses, which are more likely to implement scalar axes.
return -1
else:
# 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default
warnings.warn(
"In the future the default for argsort will be axis=-1, not the "
"current None, to match its documentation and np.argsort. "
"Explicitly pass -1 or None to silence this warning.",
MaskedArrayFutureWarning, stacklevel=3)
return None
def doc_note(initialdoc, note):
"""
Adds a Notes section to an existing docstring.
"""
if initialdoc is None:
return
if note is None:
return initialdoc
notesplit = re.split(r'\n\s*?Notes\n\s*?-----', initialdoc)
notedoc = """\
Notes
-----
%s""" % note
if len(notesplit) > 1:
notedoc = '\n\n ' + notedoc + '\n'
return ''.join(notesplit[:1] + [notedoc] + notesplit[1:])
def get_object_signature(obj):
"""
Get the signature from obj
"""
try:
sig = formatargspec(*getargspec(obj))
except TypeError:
sig = ''
return sig
###############################################################################
# Exceptions #
###############################################################################
class MAError(Exception):
"""
Class for masked array related errors.
"""
pass
class MaskError(MAError):
"""
Class for mask related errors.
"""
pass
###############################################################################
# Filling options #
###############################################################################
# b: boolean - c: complex - f: floats - i: integer - O: object - S: string
default_filler = {'b': True,
'c': 1.e20 + 0.0j,
'f': 1.e20,
'i': 999999,
'O': '?',
'S': b'N/A',
'u': 999999,
'V': b'???',
'U': u'N/A'
}
# Add datetime64 and timedelta64 types
for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps",
"fs", "as"]:
default_filler["M8[" + v + "]"] = np.datetime64("NaT", v)
default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v)
max_filler = ntypes._minvals
max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]])
min_filler = ntypes._maxvals
min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]])
if 'float128' in ntypes.typeDict:
max_filler.update([(np.float128, -np.inf)])
min_filler.update([(np.float128, +np.inf)])
def _recursive_fill_value(dtype, f):
"""
Recursively produce a fill value for `dtype`, calling f on scalar dtypes
"""
if dtype.names:
vals = tuple(_recursive_fill_value(dtype[name], f) for name in dtype.names)
return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d
elif dtype.subdtype:
subtype, shape = dtype.subdtype
subval = _recursive_fill_value(subtype, f)
return np.full(shape, subval)
else:
return f(dtype)
def _get_dtype_of(obj):
""" Convert the argument for *_fill_value into a dtype """
if isinstance(obj, np.dtype):
return obj
elif hasattr(obj, 'dtype'):
return obj.dtype
else:
return np.asanyarray(obj).dtype
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
The default filling value depends on the datatype of the input
array or the type of the input scalar:
======== ========
datatype default
======== ========
bool True
int 999999
float 1.e20
complex 1.e20+0j
object '?'
string 'N/A'
======== ========
For structured types, a structured scalar is returned, with each field the
default fill value for its type.
For subarray types, the fill value is an array of the same size containing
the default scalar fill value.
Parameters
----------
obj : ndarray, dtype or scalar
The array data-type or scalar for which the default fill value
is returned.
Returns
-------
fill_value : scalar
The default fill value.
Examples
--------
>>> np.ma.default_fill_value(1)
999999
>>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))
1e+20
>>> np.ma.default_fill_value(np.dtype(complex))
(1e+20+0j)
"""
def _scalar_fill_value(dtype):
if dtype.kind in 'Mm':
return default_filler.get(dtype.str[1:], '?')
else:
return default_filler.get(dtype.kind, '?')
dtype = _get_dtype_of(obj)
return _recursive_fill_value(dtype, _scalar_fill_value)
def _extremum_fill_value(obj, extremum, extremum_name):
def _scalar_fill_value(dtype):
try:
return extremum[dtype]
except KeyError:
raise TypeError(
"Unsuitable type {} for calculating {}."
.format(dtype, extremum_name)
)
dtype = _get_dtype_of(obj)
return _recursive_fill_value(dtype, _scalar_fill_value)
def minimum_fill_value(obj):
"""
Return the maximum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the minimum of an array with a given dtype.
Parameters
----------
obj : ndarray, dtype or scalar
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The maximum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
maximum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.minimum_fill_value(a)
127
>>> a = np.int32()
>>> ma.minimum_fill_value(a)
2147483647
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.minimum_fill_value(a)
127
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.minimum_fill_value(a)
inf
"""
return _extremum_fill_value(obj, min_filler, "minimum")
def maximum_fill_value(obj):
"""
Return the minimum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the maximum of an array with a given dtype.
Parameters
----------
obj : ndarray, dtype or scalar
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The minimum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
minimum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.maximum_fill_value(a)
-128
>>> a = np.int32()
>>> ma.maximum_fill_value(a)
-2147483648
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.maximum_fill_value(a)
-128
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.maximum_fill_value(a)
-inf
"""
return _extremum_fill_value(obj, max_filler, "maximum")
def _recursive_set_fill_value(fillvalue, dt):
"""
Create a fill value for a structured dtype.
Parameters
----------
fillvalue: scalar or array_like
Scalar or array representing the fill value. If it is of shorter
length than the number of fields in dt, it will be resized.
dt: dtype
The structured dtype for which to create the fill value.
Returns
-------
val: tuple
A tuple of values corresponding to the structured fill value.
"""
fillvalue = np.resize(fillvalue, len(dt.names))
output_value = []
for (fval, name) in zip(fillvalue, dt.names):
cdtype = dt[name]
if cdtype.subdtype:
cdtype = cdtype.subdtype[0]
if cdtype.names:
output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
else:
output_value.append(np.array(fval, dtype=cdtype).item())
return tuple(output_value)
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype.
If fill_value is not None, its value is forced to the given dtype.
The result is always a 0d array.
"""
ndtype = np.dtype(ndtype)
fields = ndtype.fields
if fill_value is None:
fill_value = default_fill_value(ndtype)
elif fields:
fdtype = [(_[0], _[1]) for _ in ndtype.descr]
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=fdtype)
except ValueError:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, fdtype))
else:
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype),
dtype=ndtype)
else:
if isinstance(fill_value, basestring) and (ndtype.char not in 'OSVU'):
# Note this check doesn't work if fill_value is not a scalar
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
else:
# In case we want to convert 1e20 to int.
# Also in case of converting string arrays.
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except (OverflowError, ValueError):
# Raise TypeError instead of OverflowError or ValueError.
# OverflowError is seldom used, and the real problem here is
# that the passed fill_value is not compatible with the ndtype.
err_msg = "Cannot convert fill_value %s to dtype %s"
raise TypeError(err_msg % (fill_value, ndtype))
return np.array(fill_value)
def set_fill_value(a, fill_value):
"""
Set the filling value of a, if a is a masked array.
This function changes the fill value of the masked array `a` in place.
If `a` is not a masked array, the function returns silently, without
doing anything.
Parameters
----------
a : array_like
Input array.
fill_value : dtype
Filling value. A consistency test is performed to make sure
the value is compatible with the dtype of `a`.
Returns
-------
None
Nothing returned by this function.
See Also
--------
maximum_fill_value : Return the default fill value for a dtype.
MaskedArray.fill_value : Return current fill value.
MaskedArray.set_fill_value : Equivalent method.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> a = ma.masked_where(a < 3, a)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=999999)
>>> ma.set_fill_value(a, -999)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=-999)
Nothing happens if `a` is not a masked array.
>>> a = range(5)
>>> a
[0, 1, 2, 3, 4]
>>> ma.set_fill_value(a, 100)
>>> a
[0, 1, 2, 3, 4]
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> ma.set_fill_value(a, 100)
>>> a
array([0, 1, 2, 3, 4])
"""
if isinstance(a, MaskedArray):
a.set_fill_value(fill_value)
return
def get_fill_value(a):
"""
Return the filling value of a, if any. Otherwise, returns the
default filling value for that type.
"""
if isinstance(a, MaskedArray):
result = a.fill_value
else:
result = default_fill_value(a)
return result
def common_fill_value(a, b):
"""
Return the common filling value of two masked arrays, if any.
If ``a.fill_value == b.fill_value``, return the fill value,
otherwise return None.
Parameters
----------
a, b : MaskedArray
The masked arrays for which to compare fill values.
Returns
-------
fill_value : scalar or None
The common fill value, or None.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=3)
>>> y = np.ma.array([0, 1.], fill_value=3)
>>> np.ma.common_fill_value(x, y)
3.0
"""
t1 = get_fill_value(a)
t2 = get_fill_value(b)
if t1 == t2:
return t1
return None
def filled(a, fill_value=None):
"""
Return input as an array with masked data replaced by a fill value.
If `a` is not a `MaskedArray`, `a` itself is returned.
If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to
``a.fill_value``.
Parameters
----------
a : MaskedArray or array_like
An input object.
fill_value : scalar, optional
Filling value. Default is None.
Returns
-------
a : ndarray
The filled array.
See Also
--------
compressed
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x.filled()
array([[999999, 1, 2],
[999999, 4, 5],
[ 6, 7, 8]])
"""
if hasattr(a, 'filled'):
return a.filled(fill_value)
elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
elif isinstance(a, dict):
return np.array(a, 'O')
else:
return np.array(a)
def get_masked_subclass(*arrays):
"""
Return the youngest subclass of MaskedArray from a list of (masked) arrays.
In case of siblings, the first listed takes over.
"""
if len(arrays) == 1:
arr = arrays[0]
if isinstance(arr, MaskedArray):
rcls = type(arr)
else:
rcls = MaskedArray
else:
arrcls = [type(a) for a in arrays]
rcls = arrcls[0]
if not issubclass(rcls, MaskedArray):
rcls = MaskedArray
for cls in arrcls[1:]:
if issubclass(cls, rcls):
rcls = cls
# Don't return MaskedConstant as result: revert to MaskedArray
if rcls.__name__ == 'MaskedConstant':
return MaskedArray
return rcls
def getdata(a, subok=True):
"""
Return the data of a masked array as an ndarray.
Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,
else return `a` as a ndarray or subclass (depending on `subok`) if not.
Parameters
----------
a : array_like
Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
subok : bool
Whether to force the output to be a `pure` ndarray (False) or to
return a subclass of ndarray if appropriate (True, default).
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getdata(a)
array([[1, 2],
[3, 4]])
Equivalently use the ``MaskedArray`` `data` attribute.
>>> a.data
array([[1, 2],
[3, 4]])
"""
try:
data = a._data
except AttributeError:
data = np.array(a, copy=False, subok=subok)
if not subok:
return data.view(ndarray)
return data
get_data = getdata
def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
"""
Return input with invalid data masked and replaced by a fill value.
Invalid data means values of `nan`, `inf`, etc.
Parameters
----------
a : array_like
Input array, a (subclass of) ndarray.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
copy : bool, optional
Whether to use a copy of `a` (True) or to fix `a` in place (False).
Default is True.
fill_value : scalar, optional
Value used for fixing invalid data. Default is None, in which case
the ``a.fill_value`` is used.
Returns
-------
b : MaskedArray
The input array with invalid entries fixed.
Notes
-----
A copy is performed by default.
Examples
--------
>>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
>>> x
masked_array(data = [-- -1.0 nan inf],
mask = [ True False False False],
fill_value = 1e+20)
>>> np.ma.fix_invalid(x)
masked_array(data = [-- -1.0 -- --],
mask = [ True False True True],
fill_value = 1e+20)
>>> fixed = np.ma.fix_invalid(x)
>>> fixed.data
array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20,
1.00000000e+20])
>>> x.data
array([ 1., -1., NaN, Inf])
"""
a = masked_array(a, copy=copy, mask=mask, subok=True)
invalid = np.logical_not(np.isfinite(a._data))
if not invalid.any():
return a
a._mask |= invalid
if fill_value is None:
fill_value = a.fill_value
a._data[invalid] = fill_value
return a
###############################################################################
# Ufuncs #
###############################################################################
ufunc_domain = {}
ufunc_fills = {}
class _DomainCheckInterval(object):
"""
Define a valid interval, so that :
``domain_check_interval(a,b)(x) == True`` where
``x < a`` or ``x > b``.
"""
def __init__(self, a, b):
"domain_check_interval(a,b)(x) = true where x < a or y > b"
if (a > b):
(a, b) = (b, a)
self.a = a
self.b = b
def __call__(self, x):
"Execute the call behavior."
# nans at masked positions cause RuntimeWarnings, even though
# they are masked. To avoid this we suppress warnings.
with np.errstate(invalid='ignore'):
return umath.logical_or(umath.greater(x, self.b),
umath.less(x, self.a))
class _DomainTan(object):
"""
Define a valid interval for the `tan` function, so that:
``domain_tan(eps) = True`` where ``abs(cos(x)) < eps``
"""
def __init__(self, eps):
"domain_tan(eps) = true where abs(cos(x)) < eps)"
self.eps = eps
def __call__(self, x):
"Executes the call behavior."
with np.errstate(invalid='ignore'):
return umath.less(umath.absolute(umath.cos(x)), self.eps)
class _DomainSafeDivide(object):
"""
Define a domain for safe division.
"""
def __init__(self, tolerance=None):
self.tolerance = tolerance
def __call__(self, a, b):
# Delay the selection of the tolerance to here in order to reduce numpy
# import times. The calculation of these parameters is a substantial
# component of numpy's import time.
if self.tolerance is None:
self.tolerance = np.finfo(float).tiny
# don't call ma ufuncs from __array_wrap__ which would fail for scalars
a, b = np.asarray(a), np.asarray(b)
with np.errstate(invalid='ignore'):
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
class _DomainGreater(object):
"""
DomainGreater(v)(x) is True where x <= v.
"""
def __init__(self, critical_value):
"DomainGreater(v)(x) = true where x <= v"
self.critical_value = critical_value
def __call__(self, x):
"Executes the call behavior."
with np.errstate(invalid='ignore'):
return umath.less_equal(x, self.critical_value)
class _DomainGreaterEqual(object):
"""
DomainGreaterEqual(v)(x) is True where x < v.
"""
def __init__(self, critical_value):
"DomainGreaterEqual(v)(x) = true where x < v"
self.critical_value = critical_value
def __call__(self, x):
"Executes the call behavior."
with np.errstate(invalid='ignore'):
return umath.less(x, self.critical_value)
class _MaskedUFunc(object):
def __init__(self, ufunc):
self.f = ufunc
self.__doc__ = ufunc.__doc__
self.__name__ = ufunc.__name__
def __str__(self):
return "Masked version of {}".format(self.f)
class _MaskedUnaryOperation(_MaskedUFunc):
"""
Defines masked version of unary operations, where invalid values are
pre-masked.
Parameters
----------
mufunc : callable
The function for which to define a masked version. Made available
as ``_MaskedUnaryOperation.f``.
fill : scalar, optional
Filling value, default is 0.
domain : class instance
Domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
"""
def __init__(self, mufunc, fill=0, domain=None):
super(_MaskedUnaryOperation, self).__init__(mufunc)
self.fill = fill
self.domain = domain
ufunc_domain[mufunc] = domain
ufunc_fills[mufunc] = fill
def __call__(self, a, *args, **kwargs):
"""
Execute the call behavior.
"""
d = getdata(a)
# Deal with domain
if self.domain is not None:
# Case 1.1. : Domained function
# nans at masked positions cause RuntimeWarnings, even though
# they are masked. To avoid this we suppress warnings.
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(d, *args, **kwargs)
# Make a mask
m = ~umath.isfinite(result)
m |= self.domain(d)
m |= getmask(a)
else:
# Case 1.2. : Function without a domain
# Get the result and the mask
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(d, *args, **kwargs)
m = getmask(a)
if not result.ndim:
# Case 2.1. : The result is scalarscalar
if m:
return masked
return result
if m is not nomask:
# Case 2.2. The result is an array
# We need to fill the invalid data back w/ the input Now,
# that's plain silly: in C, we would just skip the element and
# keep the original, but we do have to do it that way in Python
# In case result has a lower dtype than the inputs (as in
# equal)
try:
np.copyto(result, d, where=m)
except TypeError:
pass
# Transform to
masked_result = result.view(get_masked_subclass(a))
masked_result._mask = m
masked_result._update_from(a)
return masked_result
class _MaskedBinaryOperation(_MaskedUFunc):
"""
Define masked version of binary operations, where invalid
values are pre-masked.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_MaskedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__(self, mbfunc, fillx=0, filly=0):
"""
abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
super(_MaskedBinaryOperation, self).__init__(mbfunc)
self.fillx = fillx
self.filly = filly
ufunc_domain[mbfunc] = None
ufunc_fills[mbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"""
Execute the call behavior.
"""
# Get the data, as ndarray
(da, db) = (getdata(a), getdata(b))
# Get the result
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
# Get the mask for the result
(ma, mb) = (getmask(a), getmask(b))
if ma is nomask:
if mb is nomask:
m = nomask
else:
m = umath.logical_or(getmaskarray(a), mb)
elif mb is nomask:
m = umath.logical_or(ma, getmaskarray(b))
else:
m = umath.logical_or(ma, mb)
# Case 1. : scalar
if not result.ndim:
if m:
return masked
return result
# Case 2. : array
# Revert result to da where masked
if m is not nomask and m.any():
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, da, casting='unsafe', where=m)
except Exception:
pass
# Transforms to a (subclass of) MaskedArray
masked_result = result.view(get_masked_subclass(a, b))
masked_result._mask = m
if isinstance(a, MaskedArray):
masked_result._update_from(a)
elif isinstance(b, MaskedArray):
masked_result._update_from(b)
return masked_result
def reduce(self, target, axis=0, dtype=None):
"""
Reduce `target` along the given `axis`.
"""
tclass = get_masked_subclass(target)
m = getmask(target)
t = filled(target, self.filly)
if t.shape == ():
t = t.reshape(1)
if m is not nomask:
m = make_mask(m, copy=1)
m.shape = (1,)
if m is nomask:
tr = self.f.reduce(t, axis)
mr = nomask
else:
tr = self.f.reduce(t, axis, dtype=dtype or t.dtype)
mr = umath.logical_and.reduce(m, axis)
if not tr.shape:
if mr:
return masked
else:
return tr
masked_tr = tr.view(tclass)
masked_tr._mask = mr
return masked_tr
def outer(self, a, b):
"""
Return the function applied to the outer product of a and b.
"""
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
if m is not nomask:
np.copyto(d, da, where=m)
if not d.shape:
return d
masked_d = d.view(get_masked_subclass(a, b))
masked_d._mask = m
return masked_d
def accumulate(self, target, axis=0):
"""Accumulate `target` along `axis` after filling with y fill
value.
"""
tclass = get_masked_subclass(target)
t = filled(target, self.filly)
result = self.f.accumulate(t, axis)
masked_result = result.view(tclass)
return masked_result
class _DomainedBinaryOperation(_MaskedUFunc):
"""
Define binary operations that have a domain, like divide.
They have no reduce, outer or accumulate.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_DomainedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__(self, dbfunc, domain, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
super(_DomainedBinaryOperation, self).__init__(dbfunc)
self.domain = domain
self.fillx = fillx
self.filly = filly
ufunc_domain[dbfunc] = domain
ufunc_fills[dbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data
(da, db) = (getdata(a), getdata(b))
# Get the result
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(da, db, *args, **kwargs)
# Get the mask as a combination of the source masks and invalid
m = ~umath.isfinite(result)
m |= getmask(a)
m |= getmask(b)
# Apply the domain
domain = ufunc_domain.get(self.f, None)
if domain is not None:
m |= domain(da, db)
# Take care of the scalar case first
if (not m.ndim):
if m:
return masked
else:
return result
# When the mask is True, put back da if possible
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, 0, casting='unsafe', where=m)
# avoid using "*" since this may be overlaid
masked_da = umath.multiply(m, da)
# only add back if it can be cast safely
if np.can_cast(masked_da.dtype, result.dtype, casting='safe'):
result += masked_da
except Exception:
pass
# Transforms to a (subclass of) MaskedArray
masked_result = result.view(get_masked_subclass(a, b))
masked_result._mask = m
if isinstance(a, MaskedArray):
masked_result._update_from(a)
elif isinstance(b, MaskedArray):
masked_result._update_from(b)
return masked_result
# Unary ufuncs
exp = _MaskedUnaryOperation(umath.exp)
conjugate = _MaskedUnaryOperation(umath.conjugate)
sin = _MaskedUnaryOperation(umath.sin)
cos = _MaskedUnaryOperation(umath.cos)
tan = _MaskedUnaryOperation(umath.tan)
arctan = _MaskedUnaryOperation(umath.arctan)
arcsinh = _MaskedUnaryOperation(umath.arcsinh)
sinh = _MaskedUnaryOperation(umath.sinh)
cosh = _MaskedUnaryOperation(umath.cosh)
tanh = _MaskedUnaryOperation(umath.tanh)
abs = absolute = _MaskedUnaryOperation(umath.absolute)
angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base
fabs = _MaskedUnaryOperation(umath.fabs)
negative = _MaskedUnaryOperation(umath.negative)
floor = _MaskedUnaryOperation(umath.floor)
ceil = _MaskedUnaryOperation(umath.ceil)
around = _MaskedUnaryOperation(np.round_)
logical_not = _MaskedUnaryOperation(umath.logical_not)
# Domained unary ufuncs
sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0,
_DomainGreaterEqual(0.0))
log = _MaskedUnaryOperation(umath.log, 1.0,
_DomainGreater(0.0))
log2 = _MaskedUnaryOperation(umath.log2, 1.0,
_DomainGreater(0.0))
log10 = _MaskedUnaryOperation(umath.log10, 1.0,
_DomainGreater(0.0))
tan = _MaskedUnaryOperation(umath.tan, 0.0,
_DomainTan(1e-35))
arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccos = _MaskedUnaryOperation(umath.arccos, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0,
_DomainGreaterEqual(1.0))
arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0,
_DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15))
# Binary ufuncs
add = _MaskedBinaryOperation(umath.add)
subtract = _MaskedBinaryOperation(umath.subtract)
multiply = _MaskedBinaryOperation(umath.multiply, 1, 1)
arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0)
equal = _MaskedBinaryOperation(umath.equal)
equal.reduce = None
not_equal = _MaskedBinaryOperation(umath.not_equal)
not_equal.reduce = None
less_equal = _MaskedBinaryOperation(umath.less_equal)
less_equal.reduce = None
greater_equal = _MaskedBinaryOperation(umath.greater_equal)
greater_equal.reduce = None
less = _MaskedBinaryOperation(umath.less)
less.reduce = None
greater = _MaskedBinaryOperation(umath.greater)
greater.reduce = None
logical_and = _MaskedBinaryOperation(umath.logical_and)
alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce
logical_or = _MaskedBinaryOperation(umath.logical_or)
sometrue = logical_or.reduce
logical_xor = _MaskedBinaryOperation(umath.logical_xor)
bitwise_and = _MaskedBinaryOperation(umath.bitwise_and)
bitwise_or = _MaskedBinaryOperation(umath.bitwise_or)
bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor)
hypot = _MaskedBinaryOperation(umath.hypot)
# Domained binary ufuncs
divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1)
true_divide = _DomainedBinaryOperation(umath.true_divide,
_DomainSafeDivide(), 0, 1)
floor_divide = _DomainedBinaryOperation(umath.floor_divide,
_DomainSafeDivide(), 0, 1)
remainder = _DomainedBinaryOperation(umath.remainder,
_DomainSafeDivide(), 0, 1)
fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1)
mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1)
###############################################################################
# Mask creation functions #
###############################################################################
def _replace_dtype_fields_recursive(dtype, primitive_dtype):
"Private function allowing recursion in _replace_dtype_fields."
_recurse = _replace_dtype_fields_recursive
# Do we have some name fields ?
if dtype.names:
descr = []
for name in dtype.names:
field = dtype.fields[name]
if len(field) == 3:
# Prepend the title to the name
name = (field[-1], name)
descr.append((name, _recurse(field[0], primitive_dtype)))
new_dtype = np.dtype(descr)
# Is this some kind of composite a la (float,2)
elif dtype.subdtype:
descr = list(dtype.subdtype)
descr[0] = _recurse(dtype.subdtype[0], primitive_dtype)
new_dtype = np.dtype(tuple(descr))
# this is a primitive type, so do a direct replacement
else:
new_dtype = primitive_dtype
# preserve identity of dtypes
if new_dtype == dtype:
new_dtype = dtype
return new_dtype
def _replace_dtype_fields(dtype, primitive_dtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with all fields and subtypes in the given type
recursively replaced with `primitive_dtype`.
Arguments are coerced to dtypes first.
"""
dtype = np.dtype(dtype)
primitive_dtype = np.dtype(primitive_dtype)
return _replace_dtype_fields_recursive(dtype, primitive_dtype)
def make_mask_descr(ndtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with the type of all fields in `ndtype` to a
boolean type. Field names are not altered.
Parameters
----------
ndtype : dtype
The dtype to convert.
Returns
-------
result : dtype
A dtype that looks like `ndtype`, the type of all fields is boolean.
Examples
--------
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_descr(dtype)
dtype([('foo', '|b1'), ('bar', '|b1')])
>>> ma.make_mask_descr(np.float32)
dtype('bool')
"""
return _replace_dtype_fields(ndtype, MaskType)
def getmask(a):
"""
Return the mask of a masked array, or nomask.
Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the
mask is not `nomask`, else return `nomask`. To guarantee a full array
of booleans of the same shape as a, use `getmaskarray`.
Parameters
----------
a : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getdata : Return the data of a masked array as an ndarray.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmask(a)
array([[False, True],
[False, False]])
Equivalently use the `MaskedArray` `mask` attribute.
>>> a.mask
array([[False, True],
[False, False]])
Result when mask == `nomask`
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> ma.nomask
False
>>> ma.getmask(b) == ma.nomask
True
>>> b.mask == ma.nomask
True
"""
return getattr(a, '_mask', nomask)
get_mask = getmask
def getmaskarray(arr):
"""
Return the mask of a masked array, or full boolean array of False.
Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and
the mask is not `nomask`, else return a full boolean array of False of
the same shape as `arr`.
Parameters
----------
arr : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getdata : Return the data of a masked array as an ndarray.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmaskarray(a)
array([[False, True],
[False, False]])
Result when mask == ``nomask``
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> >ma.getmaskarray(b)
array([[False, False],
[False, False]])
"""
mask = getmask(arr)
if mask is nomask:
mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None))
return mask
def is_mask(m):
"""
Return True if m is a valid, standard mask.
This function does not check the contents of the input, only that the
type is MaskType. In particular, this function returns False if the
mask has a flexible dtype.
Parameters
----------
m : array_like
Array to test.
Returns
-------
result : bool
True if `m.dtype.type` is MaskType, False otherwise.
See Also
--------
isMaskedArray : Test whether input is an instance of MaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> m
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_mask(m)
False
>>> ma.is_mask(m.mask)
True
Input must be an ndarray (or have similar attributes)
for it to be considered a valid mask.
>>> m = [False, True, False]
>>> ma.is_mask(m)
False
>>> m = np.array([False, True, False])
>>> m
array([False, True, False])
>>> ma.is_mask(m)
True
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
'formats':[bool, bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
dtype=dtype)
>>> m
array([(True, False), (False, True), (True, False)],
dtype=[('monty', '|b1'), ('pithon', '|b1')])
>>> ma.is_mask(m)
False
"""
try:
return m.dtype.type is MaskType
except AttributeError:
return False
def _shrink_mask(m):
"""
Shrink a mask to nomask if possible
"""
if not m.dtype.names and not m.any():
return nomask
else:
return m
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interepreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has a
dtype of MaskType (bool). If the dtype is flexible, each field has
a boolean dtype. This is ignored when `m` is ``nomask``, in which
case ``nomask`` is always returned.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True])
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True])
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True])
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([ 0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False])
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
'formats':[int, int]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i4'), ('mouse', '<i4')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
if m is nomask:
return nomask
# Make sure the input dtype is valid.
dtype = make_mask_descr(dtype)
# legacy boolean special case: "existence of fields implies true"
if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_:
return np.ones(m.shape, dtype=dtype)
# Fill the mask in case there are missing data; turn it into an ndarray.
result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True)
# Bas les masques !
if shrink:
result = _shrink_mask(result)
return result
def make_mask_none(newshape, dtype=None):
"""
Return a boolean mask of the given shape, filled with False.
This function returns a boolean ndarray with all entries False, that can
be used in common mask manipulations. If a complex dtype is specified, the
type of each field is converted to a boolean type.
Parameters
----------
newshape : tuple
A tuple indicating the shape of the mask.
dtype : {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
Returns
-------
result : ndarray
An ndarray of appropriate shape and dtype, filled with False.
See Also
--------
make_mask : Create a boolean mask from an array.
make_mask_descr : Construct a dtype description list from a given dtype.
Examples
--------
>>> import numpy.ma as ma
>>> ma.make_mask_none((3,))
array([False, False, False])
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_none((3,), dtype=dtype)
array([(False, False), (False, False), (False, False)],
dtype=[('foo', '|b1'), ('bar', '|b1')])
"""
if dtype is None:
result = np.zeros(newshape, dtype=MaskType)
else:
result = np.zeros(newshape, dtype=make_mask_descr(dtype))
return result
def mask_or(m1, m2, copy=False, shrink=True):
"""
Combine two masks with the ``logical_or`` operator.
The result may be a view on `m1` or `m2` if the other is `nomask`
(i.e. False).
Parameters
----------
m1, m2 : array_like
Input masks.
copy : bool, optional
If copy is False and one of the inputs is `nomask`, return a view
of the other input mask. Defaults to False.
shrink : bool, optional
Whether to shrink the output to `nomask` if all its values are
False. Defaults to True.
Returns
-------
mask : output mask
The result masks values that are masked in either `m1` or `m2`.
Raises
------
ValueError
If `m1` and `m2` have different flexible dtypes.
Examples
--------
>>> m1 = np.ma.make_mask([0, 1, 1, 0])
>>> m2 = np.ma.make_mask([1, 0, 0, 0])
>>> np.ma.mask_or(m1, m2)
array([ True, True, True, False])
"""
def _recursive_mask_or(m1, m2, newmask):
names = m1.dtype.names
for name in names:
current1 = m1[name]
if current1.dtype.names:
_recursive_mask_or(current1, m2[name], newmask[name])
else:
umath.logical_or(current1, m2[name], newmask[name])
return
if (m1 is nomask) or (m1 is False):
dtype = getattr(m2, 'dtype', MaskType)
return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)
if (m2 is nomask) or (m2 is False):
dtype = getattr(m1, 'dtype', MaskType)
return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)
if m1 is m2 and is_mask(m1):
return m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
if (dtype1 != dtype2):
raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
if dtype1.names:
# Allocate an output mask array with the properly broadcast shape.
newmask = np.empty(np.broadcast(m1, m2).shape, dtype1)
_recursive_mask_or(m1, m2, newmask)
return newmask
return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)
def flatten_mask(mask):
"""
Returns a completely flattened version of the mask, where nested fields
are collapsed.
Parameters
----------
mask : array_like
Input array, which will be interpreted as booleans.
Returns
-------
flattened_mask : ndarray of bools
The flattened input.
Examples
--------
>>> mask = np.array([0, 0, 1])
>>> flatten_mask(mask)
array([False, False, True])
>>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
>>> flatten_mask(mask)
array([False, False, False, True])
>>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
>>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)
>>> flatten_mask(mask)
array([False, False, False, False, False, True])
"""
def _flatmask(mask):
"Flatten the mask and returns a (maybe nested) sequence of booleans."
mnames = mask.dtype.names
if mnames:
return [flatten_mask(mask[name]) for name in mnames]
else:
return mask
def _flatsequence(sequence):
"Generates a flattened version of the sequence."
try:
for element in sequence:
if hasattr(element, '__iter__'):
for f in _flatsequence(element):
yield f
else:
yield element
except TypeError:
yield sequence
mask = np.asarray(mask)
flattened = _flatsequence(_flatmask(mask))
return np.array([_ for _ in flattened], dtype=bool)
def _check_mask_axis(mask, axis, keepdims=np._NoValue):
"Check whether there are masked values along the given axis"
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
if mask is not nomask:
return mask.all(axis=axis, **kwargs)
return nomask
###############################################################################
# Masking functions #
###############################################################################
def masked_where(condition, a, copy=True):
"""
Mask an array where a condition is met.
Return `a` as an array masked where `condition` is True.
Any masked values of `a` or `condition` are also masked in the output.
Parameters
----------
condition : array_like
Masking condition. When `condition` tests floating point values for
equality, consider using ``masked_values`` instead.
a : array_like
Array to mask.
copy : bool
If True (default) make a copy of `a` in the result. If False modify
`a` in place and return a view.
Returns
-------
result : MaskedArray
The result of masking `a` where `condition` is True.
See Also
--------
masked_values : Mask using floating point equality.
masked_equal : Mask where equal to a given value.
masked_not_equal : Mask where `not` equal to a given value.
masked_less_equal : Mask where less than or equal to a given value.
masked_greater_equal : Mask where greater than or equal to a given value.
masked_less : Mask where less than a given value.
masked_greater : Mask where greater than a given value.
masked_inside : Mask inside a given interval.
masked_outside : Mask outside a given interval.
masked_invalid : Mask invalid values (NaNs or infs).
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_where(a <= 2, a)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
Mask array `b` conditional on `a`.
>>> b = ['a', 'b', 'c', 'd']
>>> ma.masked_where(a == 2, b)
masked_array(data = [a b -- d],
mask = [False False True False],
fill_value=N/A)
Effect of the `copy` argument.
>>> c = ma.masked_where(a <= 2, a)
>>> c
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([0, 1, 2, 3])
>>> c = ma.masked_where(a <= 2, a, copy=False)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([99, 1, 2, 3])
When `condition` or `a` contain masked values.
>>> a = np.arange(4)
>>> a = ma.masked_where(a == 2, a)
>>> a
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
>>> b = np.arange(4)
>>> b = ma.masked_where(b == 0, b)
>>> b
masked_array(data = [-- 1 2 3],
mask = [ True False False False],
fill_value=999999)
>>> ma.masked_where(a == 3, b)
masked_array(data = [-- 1 -- --],
mask = [ True False True True],
fill_value=999999)
"""
# Make sure that condition is a valid standard-type mask.
cond = make_mask(condition, shrink=False)
a = np.array(a, copy=copy, subok=True)
(cshape, ashape) = (cond.shape, a.shape)
if cshape and cshape != ashape:
raise IndexError("Inconsistent shape between the condition and the input"
" (got %s and %s)" % (cshape, ashape))
if hasattr(a, '_mask'):
cond = mask_or(cond, a._mask)
cls = type(a)
else:
cls = MaskedArray
result = a.view(cls)
# Assign to *.mask so that structured masks are handled correctly.
result.mask = _shrink_mask(cond)
return result
def masked_greater(x, value, copy=True):
"""
Mask an array where greater than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x > value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater(a, 2)
masked_array(data = [0 1 2 --],
mask = [False False False True],
fill_value=999999)
"""
return masked_where(greater(x, value), x, copy=copy)
def masked_greater_equal(x, value, copy=True):
"""
Mask an array where greater than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x >= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater_equal(a, 2)
masked_array(data = [0 1 -- --],
mask = [False False True True],
fill_value=999999)
"""
return masked_where(greater_equal(x, value), x, copy=copy)
def masked_less(x, value, copy=True):
"""
Mask an array where less than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x < value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less(a, 2)
masked_array(data = [-- -- 2 3],
mask = [ True True False False],
fill_value=999999)
"""
return masked_where(less(x, value), x, copy=copy)
def masked_less_equal(x, value, copy=True):
"""
Mask an array where less than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x <= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less_equal(a, 2)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
"""
return masked_where(less_equal(x, value), x, copy=copy)
def masked_not_equal(x, value, copy=True):
"""
Mask an array where `not` equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x != value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_not_equal(a, 2)
masked_array(data = [-- -- 2 --],
mask = [ True True False True],
fill_value=999999)
"""
return masked_where(not_equal(x, value), x, copy=copy)
def masked_equal(x, value, copy=True):
"""
Mask an array where equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x == value). For floating point arrays,
consider using ``masked_values(x, value)``.
See Also
--------
masked_where : Mask where a condition is met.
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_equal(a, 2)
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
"""
output = masked_where(equal(x, value), x, copy=copy)
output.fill_value = value
return output
def masked_inside(x, v1, v2, copy=True):
"""
Mask an array inside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` inside
the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2`
can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_inside(x, -0.3, 0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_inside(x, 0.3, -0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf >= v1) & (xf <= v2)
return masked_where(condition, x, copy=copy)
def masked_outside(x, v1, v2, copy=True):
"""
Mask an array outside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` outside
the interval [v1,v2] (x < v1)|(x > v2).
The boundaries `v1` and `v2` can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_outside(x, -0.3, 0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_outside(x, 0.3, -0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf < v1) | (xf > v2)
return masked_where(condition, x, copy=copy)
def masked_object(x, value, copy=True, shrink=True):
"""
Mask the array `x` where the data are exactly equal to value.
This function is similar to `masked_values`, but only suitable
for object arrays: for floating point, use `masked_values` instead.
Parameters
----------
x : array_like
Array to mask
value : object
Comparison value
copy : {True, False}, optional
Whether to return a copy of `x`.
shrink : {True, False}, optional
Whether to collapse a mask full of False to nomask
Returns
-------
result : MaskedArray
The result of masking `x` where equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> food = np.array(['green_eggs', 'ham'], dtype=object)
>>> # don't eat spoiled food
>>> eat = ma.masked_object(food, 'green_eggs')
>>> print(eat)
[-- ham]
>>> # plain ol` ham is boring
>>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)
>>> eat = ma.masked_object(fresh_food, 'green_eggs')
>>> print(eat)
[cheese ham pineapple]
Note that `mask` is set to ``nomask`` if possible.
>>> eat
masked_array(data = [cheese ham pineapple],
mask = False,
fill_value=?)
"""
if isMaskedArray(x):
condition = umath.equal(x._data, value)
mask = x._mask
else:
condition = umath.equal(np.asarray(x), value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(x, mask=mask, copy=copy, fill_value=value)
def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
"""
Mask using floating point equality.
Return a MaskedArray, masked where the data in array `x` are approximately
equal to `value`, determined using `isclose`. The default tolerances for
`masked_values` are the same as those for `isclose`.
For integer types, exact equality is used, in the same way as
`masked_equal`.
The fill_value is set to `value` and the mask is set to ``nomask`` if
possible.
Parameters
----------
x : array_like
Array to mask.
value : float
Masking value.
rtol, atol : float, optional
Tolerance parameters passed on to `isclose`
copy : bool, optional
Whether to return a copy of `x`.
shrink : bool, optional
Whether to collapse a mask full of False to ``nomask``.
Returns
-------
result : MaskedArray
The result of masking `x` where approximately equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
Examples
--------
>>> import numpy.ma as ma
>>> x = np.array([1, 1.1, 2, 1.1, 3])
>>> ma.masked_values(x, 1.1)
masked_array(data = [1.0 -- 2.0 -- 3.0],
mask = [False True False True False],
fill_value=1.1)
Note that `mask` is set to ``nomask`` if possible.
>>> ma.masked_values(x, 1.5)
masked_array(data = [ 1. 1.1 2. 1.1 3. ],
mask = False,
fill_value=1.5)
For integers, the fill value will be different in general to the
result of ``masked_equal``.
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> ma.masked_values(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=2)
>>> ma.masked_equal(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=999999)
"""
xnew = filled(x, value)
if np.issubdtype(xnew.dtype, np.floating):
mask = np.isclose(xnew, value, atol=atol, rtol=rtol)
else:
mask = umath.equal(xnew, value)
ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value)
if shrink:
ret.shrink_mask()
return ret
def masked_invalid(a, copy=True):
"""
Mask an array where invalid values occur (NaNs or infs).
This function is a shortcut to ``masked_where``, with
`condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved.
Only applies to arrays with a dtype where NaNs or infs make sense
(i.e. floating point types), but accepts any array_like object.
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5, dtype=float)
>>> a[2] = np.NaN
>>> a[3] = np.PINF
>>> a
array([ 0., 1., NaN, Inf, 4.])
>>> ma.masked_invalid(a)
masked_array(data = [0.0 1.0 -- -- 4.0],
mask = [False False True True False],
fill_value=1e+20)
"""
a = np.array(a, copy=copy, subok=True)
mask = getattr(a, '_mask', None)
if mask is not None:
condition = ~(np.isfinite(getdata(a)))
if mask is not nomask:
condition |= mask
cls = type(a)
else:
condition = ~(np.isfinite(a))
cls = MaskedArray
result = a.view(cls)
result._mask = condition
return result
###############################################################################
# Printing options #
###############################################################################
class _MaskedPrintOption(object):
"""
Handle the string used to represent missing data in a masked array.
"""
def __init__(self, display):
"""
Create the masked_print_option object.
"""
self._display = display
self._enabled = True
def display(self):
"""
Display the string to print for masked values.
"""
return self._display
def set_display(self, s):
"""
Set the string to print for masked values.
"""
self._display = s
def enabled(self):
"""
Is the use of the display value enabled?
"""
return self._enabled
def enable(self, shrink=1):
"""
Set the enabling shrink to `shrink`.
"""
self._enabled = shrink
def __str__(self):
return str(self._display)
__repr__ = __str__
# if you single index into a masked location you get this object.
masked_print_option = _MaskedPrintOption('--')
def _recursive_printoption(result, mask, printopt):
"""
Puts printoptions in result where mask is True.
Private function allowing for recursion
"""
names = result.dtype.names
if names:
for name in names:
curdata = result[name]
curmask = mask[name]
_recursive_printoption(curdata, curmask, printopt)
else:
np.copyto(result, printopt, where=mask)
return
# For better or worse, these end in a newline
_legacy_print_templates = dict(
long_std=textwrap.dedent("""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s)
"""),
long_flx=textwrap.dedent("""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
"""),
short_std=textwrap.dedent("""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s)
"""),
short_flx=textwrap.dedent("""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""")
)
###############################################################################
# MaskedArray class #
###############################################################################
def _recursive_filled(a, mask, fill_value):
"""
Recursively fill `a` with `fill_value`.
"""
names = a.dtype.names
for name in names:
current = a[name]
if current.dtype.names:
_recursive_filled(current, mask[name], fill_value[name])
else:
np.copyto(current, fill_value[name], where=mask[name])
def flatten_structured_array(a):
"""
Flatten a structured array.
The data type of the output is chosen such that it can represent all of the
(nested) fields.
Parameters
----------
a : structured array
Returns
-------
output : masked array or ndarray
A flattened masked array if the input is a masked array, otherwise a
standard ndarray.
Examples
--------
>>> ndtype = [('a', int), ('b', float)]
>>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
>>> flatten_structured_array(a)
array([[1., 1.],
[2., 2.]])
"""
def flatten_sequence(iterable):
"""
Flattens a compound of nested iterables.
"""
for elm in iter(iterable):
if hasattr(elm, '__iter__'):
for f in flatten_sequence(elm):
yield f
else:
yield elm
a = np.asanyarray(a)
inishape = a.shape
a = a.ravel()
if isinstance(a, MaskedArray):
out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])
out = out.view(MaskedArray)
out._mask = np.array([tuple(flatten_sequence(d.item()))
for d in getmaskarray(a)])
else:
out = np.array([tuple(flatten_sequence(d.item())) for d in a])
if len(inishape) > 1:
newshape = list(out.shape)
newshape[0] = inishape
out.shape = tuple(flatten_sequence(newshape))
return out
def _arraymethod(funcname, onmask=True):
"""
Return a class method wrapper around a basic array method.
Creates a class method which returns a masked array, where the new
``_data`` array is the output of the corresponding basic method called
on the original ``_data``.
If `onmask` is True, the new mask is the output of the method called
on the initial mask. Otherwise, the new mask is just a reference
to the initial mask.
Parameters
----------
funcname : str
Name of the function to apply on data.
onmask : bool
Whether the mask must be processed also (True) or left
alone (False). Default is True. Make available as `_onmask`
attribute.
Returns
-------
method : instancemethod
Class method wrapper of the specified basic array method.
"""
def wrapped_method(self, *args, **params):
result = getattr(self._data, funcname)(*args, **params)
result = result.view(type(self))
result._update_from(self)
mask = self._mask
if not onmask:
result.__setmask__(mask)
elif mask is not nomask:
# __setmask__ makes a copy, which we don't want
result._mask = getattr(mask, funcname)(*args, **params)
return result
methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None)
if methdoc is not None:
wrapped_method.__doc__ = methdoc.__doc__
wrapped_method.__name__ = funcname
return wrapped_method
class MaskedIterator(object):
"""
Flat iterator object to iterate over masked arrays.
A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array
`x`. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
MaskedArray.flat : Return a flat iterator over an array.
MaskedArray.flatten : Returns a flattened copy of an array.
Notes
-----
`MaskedIterator` is not exported by the `ma` module. Instead of
instantiating a `MaskedIterator` directly, use `MaskedArray.flat`.
Examples
--------
>>> x = np.ma.array(arange(6).reshape(2, 3))
>>> fl = x.flat
>>> type(fl)
<class 'numpy.ma.core.MaskedIterator'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
Extracting more than a single element b indexing the `MaskedIterator`
returns a masked array:
>>> fl[2:4]
masked_array(data = [2 3],
mask = False,
fill_value = 999999)
"""
def __init__(self, ma):
self.ma = ma
self.dataiter = ma._data.flat
if ma._mask is nomask:
self.maskiter = None
else:
self.maskiter = ma._mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
result = self.dataiter.__getitem__(indx).view(type(self.ma))
if self.maskiter is not None:
_mask = self.maskiter.__getitem__(indx)
if isinstance(_mask, ndarray):
# set shape to match that of data; this is needed for matrices
_mask.shape = result.shape
result._mask = _mask
elif isinstance(_mask, np.void):
return mvoid(result, mask=_mask, hardmask=self.ma._hardmask)
elif _mask: # Just a scalar, masked
return masked
return result
# This won't work if ravel makes a copy
def __setitem__(self, index, value):
self.dataiter[index] = getdata(value)
if self.maskiter is not None:
self.maskiter[index] = getmaskarray(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
Examples
--------
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
>>> fl.next()
3
>>> fl.next()
masked_array(data = --,
mask = True,
fill_value = 1e+20)
>>> fl.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next
d = self.dataiter.next()
StopIteration
"""
d = next(self.dataiter)
if self.maskiter is not None:
m = next(self.maskiter)
if isinstance(m, np.void):
return mvoid(d, mask=m, hardmask=self.ma._hardmask)
elif m: # Just a scalar, masked
return masked
return d
next = __next__
class MaskedArray(ndarray):
"""
An array class with possibly masked values.
Masked values of True exclude the corresponding element from any
computation.
Construction::
x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True,
ndmin=0, fill_value=None, keep_mask=True, hard_mask=None,
shrink=True, order=None)
Parameters
----------
data : array_like
Input data.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
dtype : dtype, optional
Data type of the output.
If `dtype` is None, the type of the data argument (``data.dtype``)
is used. If `dtype` is not None and different from ``data.dtype``,
a copy is performed.
copy : bool, optional
Whether to copy the input data (True), or to use a reference instead.
Default is False.
subok : bool, optional
Whether to return a subclass of `MaskedArray` if possible (True) or a
plain `MaskedArray`. Default is True.
ndmin : int, optional
Minimum number of dimensions. Default is 0.
fill_value : scalar, optional
Value used to fill in the masked values when necessary.
If None, a default based on the data-type is used.
keep_mask : bool, optional
Whether to combine `mask` with the mask of the input data, if any
(True), or to use only `mask` for the output (False). Default is True.
hard_mask : bool, optional
Whether to use a hard mask or not. With a hard mask, masked values
cannot be unmasked. Default is False.
shrink : bool, optional
Whether to force compression of an empty mask. Default is True.
order : {'C', 'F', 'A'}, optional
Specify the order of the array. If order is 'C', then the array
will be in C-contiguous order (last-index varies the fastest).
If order is 'F', then the returned array will be in
Fortran-contiguous order (first-index varies the fastest).
If order is 'A' (default), then the returned array may be
in any order (either C-, Fortran-contiguous, or even discontiguous),
unless a copy is required, in which case it will be C-contiguous.
"""
__array_priority__ = 15
_defaultmask = nomask
_defaulthardmask = False
_baseclass = ndarray
# Maximum number of elements per axis used when printing an array. The
# 1d case is handled separately because we need more values in this case.
_print_width = 100
_print_width_1d = 1500
def __new__(cls, data=None, mask=nomask, dtype=None, copy=False,
subok=True, ndmin=0, fill_value=None, keep_mask=True,
hard_mask=None, shrink=True, order=None, **options):
"""
Create a new masked array from scratch.
Notes
-----
A masked array can also be created by taking a .view(MaskedArray).
"""
# Process data.
_data = np.array(data, dtype=dtype, copy=copy,
order=order, subok=True, ndmin=ndmin)
_baseclass = getattr(data, '_baseclass', type(_data))
# Check that we're not erasing the mask.
if isinstance(data, MaskedArray) and (data.shape != _data.shape):
copy = True
# Here, we copy the _view_, so that we can attach new properties to it
# we must never do .view(MaskedConstant), as that would create a new
# instance of np.ma.masked, which make identity comparison fail
if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant):
_data = ndarray.view(_data, type(data))
else:
_data = ndarray.view(_data, cls)
# Backwards compatibility w/ numpy.core.ma.
if hasattr(data, '_mask') and not isinstance(data, ndarray):
_data._mask = data._mask
# FIXME _sharedmask is never used.
_sharedmask = True
# Process mask.
# Type of the mask
mdtype = make_mask_descr(_data.dtype)
if mask is nomask:
# Case 1. : no mask in input.
# Erase the current mask ?
if not keep_mask:
# With a reduced version
if shrink:
_data._mask = nomask
# With full version
else:
_data._mask = np.zeros(_data.shape, dtype=mdtype)
# Check whether we missed something
elif isinstance(data, (tuple, list)):
try:
# If data is a sequence of masked array
mask = np.array([getmaskarray(m) for m in data],
dtype=mdtype)
except ValueError:
# If data is nested
mask = nomask
# Force shrinking of the mask if needed (and possible)
if (mdtype == MaskType) and mask.any():
_data._mask = mask
_data._sharedmask = False
else:
_data._sharedmask = not copy
if copy:
_data._mask = _data._mask.copy()
# Reset the shape of the original mask
if getmask(data) is not nomask:
data._mask.shape = data.shape
else:
# Case 2. : With a mask in input.
# If mask is boolean, create an array of True or False
if mask is True and mdtype == MaskType:
mask = np.ones(_data.shape, dtype=mdtype)
elif mask is False and mdtype == MaskType:
mask = np.zeros(_data.shape, dtype=mdtype)
else:
# Read the mask with the current mdtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Make sure the mask and the data have the same shape
if mask.shape != _data.shape:
(nd, nm) = (_data.size, mask.size)
if nm == 1:
mask = np.resize(mask, _data.shape)
elif nm == nd:
mask = np.reshape(mask, _data.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MaskError(msg % (nd, nm))
copy = True
# Set the mask to the new value
if _data._mask is nomask:
_data._mask = mask
_data._sharedmask = not copy
else:
if not keep_mask:
_data._mask = mask
_data._sharedmask = not copy
else:
if _data.dtype.names:
def _recursive_or(a, b):
"do a|=b on each field of a, recursively"
for name in a.dtype.names:
(af, bf) = (a[name], b[name])
if af.dtype.names:
_recursive_or(af, bf)
else:
af |= bf
_recursive_or(_data._mask, mask)
else:
_data._mask = np.logical_or(mask, _data._mask)
_data._sharedmask = False
# Update fill_value.
if fill_value is None:
fill_value = getattr(data, '_fill_value', None)
# But don't run the check unless we have something to check.
if fill_value is not None:
_data._fill_value = _check_fill_value(fill_value, _data.dtype)
# Process extra options ..
if hard_mask is None:
_data._hardmask = getattr(data, '_hardmask', False)
else:
_data._hardmask = hard_mask
_data._baseclass = _baseclass
return _data
def _update_from(self, obj):
"""
Copies some attributes of obj to self.
"""
if isinstance(obj, ndarray):
_baseclass = type(obj)
else:
_baseclass = ndarray
# We need to copy the _basedict to avoid backward propagation
_optinfo = {}
_optinfo.update(getattr(obj, '_optinfo', {}))
_optinfo.update(getattr(obj, '_basedict', {}))
if not isinstance(obj, MaskedArray):
_optinfo.update(getattr(obj, '__dict__', {}))
_dict = dict(_fill_value=getattr(obj, '_fill_value', None),
_hardmask=getattr(obj, '_hardmask', False),
_sharedmask=getattr(obj, '_sharedmask', False),
_isfield=getattr(obj, '_isfield', False),
_baseclass=getattr(obj, '_baseclass', _baseclass),
_optinfo=_optinfo,
_basedict=_optinfo)
self.__dict__.update(_dict)
self.__dict__.update(_optinfo)
return
def __array_finalize__(self, obj):
"""
Finalizes the masked array.
"""
# Get main attributes.
self._update_from(obj)
# We have to decide how to initialize self.mask, based on
# obj.mask. This is very difficult. There might be some
# correspondence between the elements in the array we are being
# created from (= obj) and us. Or there might not. This method can
# be called in all kinds of places for all kinds of reasons -- could
# be empty_like, could be slicing, could be a ufunc, could be a view.
# The numpy subclassing interface simply doesn't give us any way
# to know, which means that at best this method will be based on
# guesswork and heuristics. To make things worse, there isn't even any
# clear consensus about what the desired behavior is. For instance,
# most users think that np.empty_like(marr) -- which goes via this
# method -- should return a masked array with an empty mask (see
# gh-3404 and linked discussions), but others disagree, and they have
# existing code which depends on empty_like returning an array that
# matches the input mask.
#
# Historically our algorithm was: if the template object mask had the
# same *number of elements* as us, then we used *it's mask object
# itself* as our mask, so that writes to us would also write to the
# original array. This is horribly broken in multiple ways.
#
# Now what we do instead is, if the template object mask has the same
# number of elements as us, and we do not have the same base pointer
# as the template object (b/c views like arr[...] should keep the same
# mask), then we make a copy of the template object mask and use
# that. This is also horribly broken but somewhat less so. Maybe.
if isinstance(obj, ndarray):
# XX: This looks like a bug -- shouldn't it check self.dtype
# instead?
if obj.dtype.names:
_mask = getmaskarray(obj)
else:
_mask = getmask(obj)
# If self and obj point to exactly the same data, then probably
# self is a simple view of obj (e.g., self = obj[...]), so they
# should share the same mask. (This isn't 100% reliable, e.g. self
# could be the first row of obj, or have strange strides, but as a
# heuristic it's not bad.) In all other cases, we make a copy of
# the mask, so that future modifications to 'self' do not end up
# side-effecting 'obj' as well.
if (_mask is not nomask and obj.__array_interface__["data"][0]
!= self.__array_interface__["data"][0]):
# We should make a copy. But we could get here via astype,
# in which case the mask might need a new dtype as well
# (e.g., changing to or from a structured dtype), and the
# order could have changed. So, change the mask type if
# needed and use astype instead of copy.
if self.dtype == obj.dtype:
_mask_dtype = _mask.dtype
else:
_mask_dtype = make_mask_descr(self.dtype)
if self.flags.c_contiguous:
order = "C"
elif self.flags.f_contiguous:
order = "F"
else:
order = "K"
_mask = _mask.astype(_mask_dtype, order)
else:
# Take a view so shape changes, etc., do not propagate back.
_mask = _mask.view()
else:
_mask = nomask
self._mask = _mask
# Finalize the mask
if self._mask is not nomask:
try:
self._mask.shape = self.shape
except ValueError:
self._mask = nomask
except (TypeError, AttributeError):
# When _mask.shape is not writable (because it's a void)
pass
# Finalize the fill_value
if self._fill_value is not None:
self._fill_value = _check_fill_value(self._fill_value, self.dtype)
elif self.dtype.names is not None:
# Finalize the default fill_value for structured arrays
self._fill_value = _check_fill_value(None, self.dtype)
def __array_wrap__(self, obj, context=None):
"""
Special hook for ufuncs.
Wraps the numpy array and sets the mask according to context.
"""
if obj is self: # for in-place operations
result = obj
else:
result = obj.view(type(self))
result._update_from(self)
if context is not None:
result._mask = result._mask.copy()
func, args, out_i = context
# args sometimes contains outputs (gh-10459), which we don't want
input_args = args[:func.nin]
m = reduce(mask_or, [getmaskarray(arg) for arg in input_args])
# Get the domain mask
domain = ufunc_domain.get(func, None)
if domain is not None:
# Take the domain, and make sure it's a ndarray
with np.errstate(divide='ignore', invalid='ignore'):
d = filled(domain(*input_args), True)
if d.any():
# Fill the result where the domain is wrong
try:
# Binary domain: take the last value
fill_value = ufunc_fills[func][-1]
except TypeError:
# Unary domain: just use this one
fill_value = ufunc_fills[func]
except KeyError:
# Domain not recognized, use fill_value instead
fill_value = self.fill_value
np.copyto(result, fill_value, where=d)
# Update the mask
if m is nomask:
m = d
else:
# Don't modify inplace, we risk back-propagation
m = (m | d)
# Make sure the mask has the proper size
if result is not self and result.shape == () and m:
return masked
else:
result._mask = m
result._sharedmask = False
return result
def view(self, dtype=None, type=None, fill_value=None):
"""
Return a view of the MaskedArray data
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16.
The default, None, results in the view having the same data-type
as `a`. As with ``ndarray.view``, dtype can also be specified as
an ndarray sub-class, which then specifies the type of the
returned object (this is equivalent to setting the ``type``
parameter).
type : Python type, optional
Type of the returned view, either ndarray or a subclass. The
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
If `fill_value` is not specified, but `dtype` is specified (and is not
an ndarray sub-class), the `fill_value` of the MaskedArray will be
reset. If neither `fill_value` nor `dtype` are specified (or if
`dtype` is an ndarray sub-class), then the fill value is preserved.
Finally, if `fill_value` is specified, but `dtype` is not, the fill
value is set to the specified value.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
"""
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
except TypeError:
output = ndarray.view(self, dtype)
else:
output = ndarray.view(self, dtype, type)
# also make the mask be a view (so attr changes to the view's
# mask do no affect original object's mask)
# (especially important to avoid affecting np.masked singleton)
if (getmask(output) is not nomask):
output._mask = output._mask.view()
# Make sure to reset the _fill_value if needed
if getattr(output, '_fill_value', None) is not None:
if fill_value is None:
if dtype is None:
pass # leave _fill_value as is
else:
output._fill_value = None
else:
output.fill_value = fill_value
return output
view.__doc__ = ndarray.view.__doc__
def __getitem__(self, indx):
"""
x.__getitem__(y) <==> x[y]
Return the item described by i, as a masked array.
"""
# We could directly use ndarray.__getitem__ on self.
# But then we would have to modify __array_finalize__ to prevent the
# mask of being reshaped if it hasn't been set up properly yet
# So it's easier to stick to the current version
dout = self.data[indx]
_mask = self._mask
def _is_scalar(m):
return not isinstance(m, np.ndarray)
def _scalar_heuristic(arr, elem):
"""
Return whether `elem` is a scalar result of indexing `arr`, or None
if undecidable without promoting nomask to a full mask
"""
# obviously a scalar
if not isinstance(elem, np.ndarray):
return True
# object array scalar indexing can return anything
elif arr.dtype.type is np.object_:
if arr.dtype is not elem.dtype:
# elem is an array, but dtypes do not match, so must be
# an element
return True
# well-behaved subclass that only returns 0d arrays when
# expected - this is not a scalar
elif type(arr).__getitem__ == ndarray.__getitem__:
return False
return None
if _mask is not nomask:
# _mask cannot be a subclass, so it tells us whether we should
# expect a scalar. It also cannot be of dtype object.
mout = _mask[indx]
scalar_expected = _is_scalar(mout)
else:
# attempt to apply the heuristic to avoid constructing a full mask
mout = nomask
scalar_expected = _scalar_heuristic(self.data, dout)
if scalar_expected is None:
# heuristics have failed
# construct a full array, so we can be certain. This is costly.
# we could also fall back on ndarray.__getitem__(self.data, indx)
scalar_expected = _is_scalar(getmaskarray(self)[indx])
# Did we extract a single item?
if scalar_expected:
# A record
if isinstance(dout, np.void):
# We should always re-cast to mvoid, otherwise users can
# change masks on rows that already have masked values, but not
# on rows that have no masked values, which is inconsistent.
return mvoid(dout, mask=mout, hardmask=self._hardmask)
# special case introduced in gh-5962
elif (self.dtype.type is np.object_ and
isinstance(dout, np.ndarray) and
dout is not masked):
# If masked, turn into a MaskedArray, with everything masked.
if mout:
return MaskedArray(dout, mask=True)
else:
return dout
# Just a scalar
else:
if mout:
return masked
else:
return dout
else:
# Force dout to MA
dout = dout.view(type(self))
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value
if isinstance(indx, basestring):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
# If we're indexing a multidimensional field in a
# structured array (such as dtype("(2,)i2,(2,)i1")),
# dimensionality goes up (M[field].ndim == M.ndim +
# M.dtype[field].ndim). That's fine for
# M[field] but problematic for M[field].fill_value
# which should have shape () to avoid breaking several
# methods. There is no great way out, so set to
# first element. See issue #6723.
if dout._fill_value.ndim > 0:
if not (dout._fill_value ==
dout._fill_value.flat[0]).all():
warnings.warn(
"Upon accessing multidimensional field "
"{indx:s}, need to keep dimensionality "
"of fill_value at 0. Discarding "
"heterogeneous fill_value and setting "
"all to {fv!s}.".format(indx=indx,
fv=dout._fill_value[0]),
stacklevel=2)
dout._fill_value = dout._fill_value.flat[0]
dout._isfield = True
# Update the mask if needed
if mout is not nomask:
# set shape to match that of data; this is needed for matrices
dout._mask = reshape(mout, dout.shape)
dout._sharedmask = True
# Note: Don't try to check for m.any(), that'll take too long
return dout
def __setitem__(self, indx, value):
"""
x.__setitem__(i, y) <==> x[i]=y
Set item described by index. If value is masked, masks those
locations.
"""
if self is masked:
raise MaskError('Cannot alter the masked element.')
_data = self._data
_mask = self._mask
if isinstance(indx, basestring):
_data[indx] = value
if _mask is nomask:
self._mask = _mask = make_mask_none(self.shape, self.dtype)
_mask[indx] = getmask(value)
return
_dtype = _data.dtype
nbfields = len(_dtype.names or ())
if value is masked:
# The mask wasn't set: create a full version.
if _mask is nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
# Now, set the mask to its value.
if nbfields:
_mask[indx] = tuple([True] * nbfields)
else:
_mask[indx] = True
return
# Get the _data part of the new value
dval = getattr(value, '_data', value)
# Get the _mask part of the new value
mval = getmask(value)
if nbfields and mval is nomask:
mval = tuple([False] * nbfields)
if _mask is nomask:
# Set the data, then the mask
_data[indx] = dval
if mval is not nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
_mask[indx] = mval
elif not self._hardmask:
# Set the data, then the mask
_data[indx] = dval
_mask[indx] = mval
elif hasattr(indx, 'dtype') and (indx.dtype == MaskType):
indx = indx * umath.logical_not(_mask)
_data[indx] = dval
else:
if nbfields:
err_msg = "Flexible 'hard' masks are not yet supported."
raise NotImplementedError(err_msg)
mindx = mask_or(_mask[indx], mval, copy=True)
dindx = self._data[indx]
if dindx.size > 1:
np.copyto(dindx, dval, where=~mindx)
elif mindx is nomask:
dindx = dval
_data[indx] = dindx
_mask[indx] = mindx
return
# Define so that we can overwrite the setter.
@property
def dtype(self):
return super(MaskedArray, self).dtype
@dtype.setter
def dtype(self, dtype):
super(MaskedArray, type(self)).dtype.__set__(self, dtype)
if self._mask is not nomask:
self._mask = self._mask.view(make_mask_descr(dtype), ndarray)
# Try to reset the shape of the mask (if we don't have a void).
# This raises a ValueError if the dtype change won't work.
try:
self._mask.shape = self.shape
except (AttributeError, TypeError):
pass
@property
def shape(self):
return super(MaskedArray, self).shape
@shape.setter
def shape(self, shape):
super(MaskedArray, type(self)).shape.__set__(self, shape)
# Cannot use self._mask, since it may not (yet) exist when a
# masked matrix sets the shape.
if getmask(self) is not nomask:
self._mask.shape = self.shape
def __setmask__(self, mask, copy=False):
"""
Set the mask.
"""
idtype = self.dtype
current_mask = self._mask
if mask is masked:
mask = True
if (current_mask is nomask):
# Make sure the mask is set
# Just don't do anything if there's nothing to do.
if mask is nomask:
return
current_mask = self._mask = make_mask_none(self.shape, idtype)
if idtype.names is None:
# No named fields.
# Hardmask: don't unmask the data
if self._hardmask:
current_mask |= mask
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method.
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# Otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
else:
# Named fields w/
mdtype = current_mask.dtype
mask = np.array(mask, copy=False)
# Mask is a singleton
if not mask.ndim:
# It's a boolean : make a record
if mask.dtype.kind == 'b':
mask = np.array(tuple([mask.item()] * len(mdtype)),
dtype=mdtype)
# It's a record: make sure the dtype is correct
else:
mask = mask.astype(mdtype)
# Mask is a sequence
else:
# Make sure the new mask is a ndarray with the proper dtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Hardmask: don't unmask the data
if self._hardmask:
for n in idtype.names:
current_mask[n] |= mask[n]
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method.
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# Otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
# Reshape if needed
if current_mask.shape:
current_mask.shape = self.shape
return
_set_mask = __setmask__
def _get_mask(self):
"""Return the current mask.
"""
# We could try to force a reshape, but that wouldn't work in some
# cases.
return self._mask
mask = property(fget=_get_mask, fset=__setmask__, doc="Mask")
def _get_recordmask(self):
"""
Return the mask of the records.
A record is masked when all the fields are masked.
"""
_mask = self._mask.view(ndarray)
if _mask.dtype.names is None:
return _mask
return np.all(flatten_structured_array(_mask), axis=-1)
def _set_recordmask(self):
"""
Return the mask of the records.
A record is masked when all the fields are masked.
"""
raise NotImplementedError("Coming soon: setting the mask per records!")
recordmask = property(fget=_get_recordmask)
def harden_mask(self):
"""
Force the mask to hard.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `harden_mask` sets `hardmask` to True.
See Also
--------
hardmask
"""
self._hardmask = True
return self
def soften_mask(self):
"""
Force the mask to soft.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `soften_mask` sets `hardmask` to False.
See Also
--------
hardmask
"""
self._hardmask = False
return self
hardmask = property(fget=lambda self: self._hardmask,
doc="Hardness of the mask")
def unshare_mask(self):
"""
Copy the mask and set the sharedmask flag to False.
Whether the mask is shared between masked arrays can be seen from
the `sharedmask` property. `unshare_mask` ensures the mask is not shared.
A copy of the mask is only made if it was shared.
See Also
--------
sharedmask
"""
if self._sharedmask:
self._mask = self._mask.copy()
self._sharedmask = False
return self
sharedmask = property(fget=lambda self: self._sharedmask,
doc="Share status of the mask (read-only).")
def shrink_mask(self):
"""
Reduce a mask to nomask when possible.
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4)
>>> x.mask
array([[False, False],
[False, False]])
>>> x.shrink_mask()
>>> x.mask
False
"""
self._mask = _shrink_mask(self._mask)
return self
baseclass = property(fget=lambda self: self._baseclass,
doc="Class of the underlying data (read-only).")
def _get_data(self):
"""Return the current data, as a view of the original
underlying data.
"""
return ndarray.view(self, self._baseclass)
_data = property(fget=_get_data)
data = property(fget=_get_data)
def _get_flat(self):
"Return a flat iterator."
return MaskedIterator(self)
def _set_flat(self, value):
"Set a flattened version of self to value."
y = self.ravel()
y[:] = value
flat = property(fget=_get_flat, fset=_set_flat,
doc="Flat version of the array.")
def get_fill_value(self):
"""
Return the filling value of the masked array.
Returns
-------
fill_value : scalar
The filling value.
Examples
--------
>>> for dt in [np.int32, np.int64, np.float64, np.complex128]:
... np.ma.array([0, 1], dtype=dt).get_fill_value()
...
999999
999999
1e+20
(1e+20+0j)
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.get_fill_value()
-inf
"""
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
# Temporary workaround to account for the fact that str and bytes
# scalars cannot be indexed with (), whereas all other numpy
# scalars can. See issues #7259 and #7267.
# The if-block can be removed after #7267 has been fixed.
if isinstance(self._fill_value, ndarray):
return self._fill_value[()]
return self._fill_value
def set_fill_value(self, value=None):
"""
Set the filling value of the masked array.
Parameters
----------
value : scalar, optional
The new filling value. Default is None, in which case a default
based on the data type is used.
See Also
--------
ma.set_fill_value : Equivalent function.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.fill_value
-inf
>>> x.set_fill_value(np.pi)
>>> x.fill_value
3.1415926535897931
Reset to default:
>>> x.set_fill_value()
>>> x.fill_value
1e+20
"""
target = _check_fill_value(value, self.dtype)
_fill_value = self._fill_value
if _fill_value is None:
# Create the attribute if it was undefined
self._fill_value = target
else:
# Don't overwrite the attribute, just fill it (for propagation)
_fill_value[()] = target
fill_value = property(fget=get_fill_value, fset=set_fill_value,
doc="Filling value.")
def filled(self, fill_value=None):
"""
Return a copy of self, with masked values filled with a given value.
**However**, if there are no masked values to fill, self will be
returned instead as an ndarray.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute of the array is used instead.
Returns
-------
filled_array : ndarray
A copy of ``self`` with invalid entries replaced by *fill_value*
(be it the function argument or the attribute of ``self``), or
``self`` itself as an ndarray if there are no invalid entries to
be replaced.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([1, 2, -999, 4, -999])
>>> type(x.filled())
<type 'numpy.ndarray'>
Subclassing is preserved. This means that if, e.g., the data part of
the masked array is a recarray, `filled` returns a recarray:
>>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray)
>>> m = np.ma.array(x, mask=[(True, False), (False, True)])
>>> m.filled()
rec.array([(999999, 2), ( -3, 999999)],
dtype=[('f0', '<i8'), ('f1', '<i8')])
"""
m = self._mask
if m is nomask:
return self._data
if fill_value is None:
fill_value = self.fill_value
else:
fill_value = _check_fill_value(fill_value, self.dtype)
if self is masked_singleton:
return np.asanyarray(fill_value)
if m.dtype.names:
result = self._data.copy('K')
_recursive_filled(result, self._mask, fill_value)
elif not m.any():
return self._data
else:
result = self._data.copy('K')
try:
np.copyto(result, fill_value, where=m)
except (TypeError, AttributeError):
fill_value = narray(fill_value, dtype=object)
d = result.astype(object)
result = np.choose(m, (d, fill_value))
except IndexError:
# ok, if scalar
if self._data.shape:
raise
elif m:
result = np.array(fill_value, dtype=self.dtype)
else:
result = self._data
return result
def compressed(self):
"""
Return all the non-masked data as a 1-D array.
Returns
-------
data : ndarray
A new `ndarray` holding the non-masked data is returned.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3)
>>> x.compressed()
array([0, 1])
>>> type(x.compressed())
<type 'numpy.ndarray'>
"""
data = ndarray.ravel(self._data)
if self._mask is not nomask:
data = data.compress(np.logical_not(ndarray.ravel(self._mask)))
return data
def compress(self, condition, axis=None, out=None):
"""
Return `a` where condition is ``True``.
If condition is a `MaskedArray`, missing values are considered
as ``False``.
Parameters
----------
condition : var
Boolean 1-d array selecting which entries to return. If len(condition)
is less than the size of a along the axis, then output is truncated
to length of condition array.
axis : {None, int}, optional
Axis along which the operation must be performed.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
result : MaskedArray
A :class:`MaskedArray` object.
Notes
-----
Please note the difference with :meth:`compressed` !
The output of :meth:`compress` has a mask, the output of
:meth:`compressed` does not.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.compress([1, 0, 1])
masked_array(data = [1 3],
mask = [False False],
fill_value=999999)
>>> x.compress([1, 0, 1], axis=1)
masked_array(data =
[[1 3]
[-- --]
[7 9]],
mask =
[[False False]
[ True True]
[False False]],
fill_value=999999)
"""
# Get the basic components
(_data, _mask) = (self._data, self._mask)
# Force the condition to a regular ndarray and forget the missing
# values.
condition = np.array(condition, copy=False, subok=False)
_new = _data.compress(condition, axis=axis, out=out).view(type(self))
_new._update_from(self)
if _mask is not nomask:
_new._mask = _mask.compress(condition, axis=axis)
return _new
def _insert_masked_print(self):
"""
Replace masked values with masked_print_option, casting all innermost
dtypes to object.
"""
if masked_print_option.enabled():
mask = self._mask
if mask is nomask:
res = self._data
else:
# convert to object array to make filled work
data = self._data
# For big arrays, to avoid a costly conversion to the
# object dtype, extract the corners before the conversion.
print_width = (self._print_width if self.ndim > 1
else self._print_width_1d)
for axis in range(self.ndim):
if data.shape[axis] > print_width:
ind = print_width // 2
arr = np.split(data, (ind, -ind), axis=axis)
data = np.concatenate((arr[0], arr[2]), axis=axis)
arr = np.split(mask, (ind, -ind), axis=axis)
mask = np.concatenate((arr[0], arr[2]), axis=axis)
rdtype = _replace_dtype_fields(self.dtype, "O")
res = data.astype(rdtype)
_recursive_printoption(res, mask, masked_print_option)
else:
res = self.filled(self.fill_value)
return res
def __str__(self):
return str(self._insert_masked_print())
if sys.version_info.major < 3:
def __unicode__(self):
return unicode(self._insert_masked_print())
def __repr__(self):
"""
Literal string representation.
"""
if self._baseclass is np.ndarray:
name = 'array'
else:
name = self._baseclass.__name__
# 2016-11-19: Demoted to legacy format
if np.get_printoptions()['legacy'] == '1.13':
is_long = self.ndim > 1
parameters = dict(
name=name,
nlen=" " * len(name),
data=str(self),
mask=str(self._mask),
fill=str(self.fill_value),
dtype=str(self.dtype)
)
is_structured = bool(self.dtype.names)
key = '{}_{}'.format(
'long' if is_long else 'short',
'flx' if is_structured else 'std'
)
return _legacy_print_templates[key] % parameters
prefix = 'masked_{}('.format(name)
dtype_needed = (
not np.core.arrayprint.dtype_is_implied(self.dtype) or
np.all(self.mask) or
self.size == 0
)
# determine which keyword args need to be shown
keys = ['data', 'mask', 'fill_value']
if dtype_needed:
keys.append('dtype')
# array has only one row (non-column)
is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1])
# choose what to indent each keyword with
min_indent = 2
if is_one_row:
# first key on the same line as the type, remaining keys
# aligned by equals
indents = {}
indents[keys[0]] = prefix
for k in keys[1:]:
n = builtins.max(min_indent, len(prefix + keys[0]) - len(k))
indents[k] = ' ' * n
prefix = '' # absorbed into the first indent
else:
# each key on its own line, indented by two spaces
indents = {k: ' ' * min_indent for k in keys}
prefix = prefix + '\n' # first key on the next line
# format the field values
reprs = {}
reprs['data'] = np.array2string(
self._insert_masked_print(),
separator=", ",
prefix=indents['data'] + 'data=',
suffix=',')
reprs['mask'] = np.array2string(
self._mask,
separator=", ",
prefix=indents['mask'] + 'mask=',
suffix=',')
reprs['fill_value'] = repr(self.fill_value)
if dtype_needed:
reprs['dtype'] = np.core.arrayprint.dtype_short_repr(self.dtype)
# join keys with values and indentations
result = ',\n'.join(
'{}{}={}'.format(indents[k], k, reprs[k])
for k in keys
)
return prefix + result + ')'
def _delegate_binop(self, other):
# This emulates the logic in
# private/binop_override.h:forward_binop_should_defer
if isinstance(other, type(self)):
return False
array_ufunc = getattr(other, "__array_ufunc__", False)
if array_ufunc is False:
other_priority = getattr(other, "__array_priority__", -1000000)
return self.__array_priority__ < other_priority
else:
# If array_ufunc is not None, it will be called inside the ufunc;
# None explicitly tells us to not call the ufunc, i.e., defer.
return array_ufunc is None
def _comparison(self, other, compare):
"""Compare self with other using operator.eq or operator.ne.
When either of the elements is masked, the result is masked as well,
but the underlying boolean data are still set, with self and other
considered equal if both are masked, and unequal otherwise.
For structured arrays, all fields are combined, with masked values
ignored. The result is masked if all fields were masked, with self
and other considered equal only if both were fully masked.
"""
omask = getmask(other)
smask = self.mask
mask = mask_or(smask, omask, copy=True)
odata = getdata(other)
if mask.dtype.names:
# For possibly masked structured arrays we need to be careful,
# since the standard structured array comparison will use all
# fields, masked or not. To avoid masked fields influencing the
# outcome, we set all masked fields in self to other, so they'll
# count as equal. To prepare, we ensure we have the right shape.
broadcast_shape = np.broadcast(self, odata).shape
sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True)
sbroadcast._mask = mask
sdata = sbroadcast.filled(odata)
# Now take care of the mask; the merged mask should have an item
# masked if all fields were masked (in one and/or other).
mask = (mask == np.ones((), mask.dtype))
else:
# For regular arrays, just use the data as they come.
sdata = self.data
check = compare(sdata, odata)
if isinstance(check, (np.bool_, bool)):
return masked if mask else check
if mask is not nomask:
# Adjust elements that were masked, which should be treated
# as equal if masked in both, unequal if masked in one.
# Note that this works automatically for structured arrays too.
check = np.where(mask, compare(smask, omask), check)
if mask.shape != check.shape:
# Guarantee consistency of the shape, making a copy since the
# the mask may need to get written to later.
mask = np.broadcast_to(mask, check.shape).copy()
check = check.view(type(self))
check._update_from(self)
check._mask = mask
# Cast fill value to bool_ if needed. If it cannot be cast, the
# default boolean fill value is used.
if check._fill_value is not None:
try:
fill = _check_fill_value(check._fill_value, np.bool_)
except (TypeError, ValueError):
fill = _check_fill_value(None, np.bool_)
check._fill_value = fill
return check
def __eq__(self, other):
"""Check whether other equals self elementwise.
When either of the elements is masked, the result is masked as well,
but the underlying boolean data are still set, with self and other
considered equal if both are masked, and unequal otherwise.
For structured arrays, all fields are combined, with masked values
ignored. The result is masked if all fields were masked, with self
and other considered equal only if both were fully masked.
"""
return self._comparison(other, operator.eq)
def __ne__(self, other):
"""Check whether other does not equal self elementwise.
When either of the elements is masked, the result is masked as well,
but the underlying boolean data are still set, with self and other
considered equal if both are masked, and unequal otherwise.
For structured arrays, all fields are combined, with masked values
ignored. The result is masked if all fields were masked, with self
and other considered equal only if both were fully masked.
"""
return self._comparison(other, operator.ne)
def __add__(self, other):
"""
Add self to other, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return add(self, other)
def __radd__(self, other):
"""
Add other to self, and return a new masked array.
"""
# In analogy with __rsub__ and __rdiv__, use original order:
# we get here from `other + self`.
return add(other, self)
def __sub__(self, other):
"""
Subtract other from self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return subtract(self, other)
def __rsub__(self, other):
"""
Subtract self from other, and return a new masked array.
"""
return subtract(other, self)
def __mul__(self, other):
"Multiply self by other, and return a new masked array."
if self._delegate_binop(other):
return NotImplemented
return multiply(self, other)
def __rmul__(self, other):
"""
Multiply other by self, and return a new masked array.
"""
# In analogy with __rsub__ and __rdiv__, use original order:
# we get here from `other * self`.
return multiply(other, self)
def __div__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return divide(self, other)
def __truediv__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return true_divide(self, other)
def __rtruediv__(self, other):
"""
Divide self into other, and return a new masked array.
"""
return true_divide(other, self)
def __floordiv__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return floor_divide(self, other)
def __rfloordiv__(self, other):
"""
Divide self into other, and return a new masked array.
"""
return floor_divide(other, self)
def __pow__(self, other):
"""
Raise self to the power other, masking the potential NaNs/Infs
"""
if self._delegate_binop(other):
return NotImplemented
return power(self, other)
def __rpow__(self, other):
"""
Raise other to the power self, masking the potential NaNs/Infs
"""
return power(other, self)
def __iadd__(self, other):
"""
Add other to self in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
else:
if m is not nomask:
self._mask += m
self._data.__iadd__(np.where(self._mask, self.dtype.type(0),
getdata(other)))
return self
def __isub__(self, other):
"""
Subtract other from self in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
self._data.__isub__(np.where(self._mask, self.dtype.type(0),
getdata(other)))
return self
def __imul__(self, other):
"""
Multiply self by other in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
self._data.__imul__(np.where(self._mask, self.dtype.type(1),
getdata(other)))
return self
def __idiv__(self, other):
"""
Divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__idiv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __ifloordiv__(self, other):
"""
Floor divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.floor_divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__ifloordiv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __itruediv__(self, other):
"""
True divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.true_divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__itruediv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __ipow__(self, other):
"""
Raise self to the power other, in place.
"""
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate(divide='ignore', invalid='ignore'):
self._data.__ipow__(np.where(self._mask, self.dtype.type(1),
other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
def __float__(self):
"""
Convert to float.
"""
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
warnings.warn("Warning: converting a masked element to nan.", stacklevel=2)
return np.nan
return float(self.item())
def __int__(self):
"""
Convert to int.
"""
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
raise MaskError('Cannot convert masked element to a Python int.')
return int(self.item())
def __long__(self):
"""
Convert to long.
"""
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
raise MaskError('Cannot convert masked element to a Python long.')
return long(self.item())
def get_imag(self):
"""
Return the imaginary part of the masked array.
The returned array is a view on the imaginary part of the `MaskedArray`
whose `get_imag` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The imaginary part of the masked array.
See Also
--------
get_real, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_imag()
masked_array(data = [1.0 -- 1.6],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.imag.view(type(self))
result.__setmask__(self._mask)
return result
imag = property(fget=get_imag, doc="Imaginary part.")
def get_real(self):
"""
Return the real part of the masked array.
The returned array is a view on the real part of the `MaskedArray`
whose `get_real` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The real part of the masked array.
See Also
--------
get_imag, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_real()
masked_array(data = [1.0 -- 3.45],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.real.view(type(self))
result.__setmask__(self._mask)
return result
real = property(fget=get_real, doc="Real part")
def count(self, axis=None, keepdims=np._NoValue):
"""
Count the non-masked elements of the array along the given axis.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the count is performed.
The default (`axis` = `None`) performs the count over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.10.0
If this is a tuple of ints, the count is performed on multiple
axes, instead of a single axis or all the axes as before.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
Returns
-------
result : ndarray or scalar
An array with the same shape as the input array, with the specified
axis removed. If the array is a 0-d array, or if `axis` is None, a
scalar is returned.
See Also
--------
count_masked : Count masked elements in array or along a given axis.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(6).reshape((2, 3))
>>> a[1, :] = ma.masked
>>> a
masked_array(data =
[[0 1 2]
[-- -- --]],
mask =
[[False False False]
[ True True True]],
fill_value = 999999)
>>> a.count()
3
When the `axis` keyword is specified an array of appropriate size is
returned.
>>> a.count(axis=0)
array([1, 1, 1])
>>> a.count(axis=1)
array([3, 0])
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
m = self._mask
# special case for matrices (we assume no other subclasses modify
# their dimensions)
if isinstance(self.data, np.matrix):
if m is nomask:
m = np.zeros(self.shape, dtype=np.bool_)
m = m.view(type(self.data))
if m is nomask:
# compare to _count_reduce_items in _methods.py
if self.shape is ():
if axis not in (None, 0):
raise np.AxisError(axis=axis, ndim=self.ndim)
return 1
elif axis is None:
if kwargs.get('keepdims', False):
return np.array(self.size, dtype=np.intp, ndmin=self.ndim)
return self.size
axes = normalize_axis_tuple(axis, self.ndim)
items = 1
for ax in axes:
items *= self.shape[ax]
if kwargs.get('keepdims', False):
out_dims = list(self.shape)
for a in axes:
out_dims[a] = 1
else:
out_dims = [d for n, d in enumerate(self.shape)
if n not in axes]
# make sure to return a 0-d array if axis is supplied
return np.full(out_dims, items, dtype=np.intp)
# take care of the masked singleton
if self is masked:
return 0
return (~m).sum(axis=axis, dtype=np.intp, **kwargs)
def ravel(self, order='C'):
"""
Returns a 1D version of self, as a view.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means to
index the elements in C-like order, with the last axis index
changing fastest, back to the first axis index changing slowest.
'F' means to index the elements in Fortran-like index order, with
the first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of the
memory layout of the underlying array, and only refer to the order
of axis indexing. 'A' means to read the elements in Fortran-like
index order if `m` is Fortran *contiguous* in memory, C-like order
otherwise. 'K' means to read the elements in the order they occur
in memory, except for reversing the data when strides are negative.
By default, 'C' index order is used.
Returns
-------
MaskedArray
Output view is of shape ``(self.size,)`` (or
``(np.ma.product(self.shape),)``).
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print(x.ravel())
[1 -- 3 -- 5 -- 7 -- 9]
"""
r = ndarray.ravel(self._data, order=order).view(type(self))
r._update_from(self)
if self._mask is not nomask:
r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape)
else:
r._mask = nomask
return r
def reshape(self, *s, **kwargs):
"""
Give a new shape to the array without changing its data.
Returns a masked array containing the same data, but with a new shape.
The result is a view on the original array; if this is not possible, a
ValueError is raised.
Parameters
----------
shape : int or tuple of ints
The new shape should be compatible with the original shape. If an
integer is supplied, then the result will be a 1-D array of that
length.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) or FORTRAN (column-major) order.
Returns
-------
reshaped_array : array
A new view on the array.
See Also
--------
reshape : Equivalent function in the masked array module.
numpy.ndarray.reshape : Equivalent method on ndarray object.
numpy.reshape : Equivalent function in the NumPy module.
Notes
-----
The reshaping operation cannot guarantee that a copy will not be made,
to modify the shape in place, use ``a.shape = s``
Examples
--------
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
>>> print(x)
[[-- 2]
[3 --]]
>>> x = x.reshape((4,1))
>>> print(x)
[[--]
[2]
[3]
[--]]
"""
kwargs.update(order=kwargs.get('order', 'C'))
result = self._data.reshape(*s, **kwargs).view(type(self))
result._update_from(self)
mask = self._mask
if mask is not nomask:
result._mask = mask.reshape(*s, **kwargs)
return result
def resize(self, newshape, refcheck=True, order=False):
"""
.. warning::
This method does nothing, except raise a ValueError exception. A
masked array does not own its data and therefore cannot safely be
resized in place. Use the `numpy.ma.resize` function instead.
This method is difficult to implement safely and may be deprecated in
future releases of NumPy.
"""
# Note : the 'order' keyword looks broken, let's just drop it
errmsg = "A masked array does not own its data "\
"and therefore cannot be resized.\n" \
"Use the numpy.ma.resize function instead."
raise ValueError(errmsg)
def put(self, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
Sets self._data.flat[n] = values[n] for each n in indices.
If `values` is shorter than `indices` then it will repeat.
If `values` has some masked values, the initial mask is updated
in consequence, else the corresponding values are unmasked.
Parameters
----------
indices : 1-D array_like
Target indices, interpreted as integers.
values : array_like
Values to place in self._data copy at target indices.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
'raise' : raise an error.
'wrap' : wrap around.
'clip' : clip to the range.
Notes
-----
`values` can be a scalar or length 1 array.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.put([0,4,8],[10,20,30])
>>> print(x)
[[10 -- 3]
[-- 20 --]
[7 -- 30]]
>>> x.put(4,999)
>>> print(x)
[[10 -- 3]
[-- 999 --]
[7 -- 30]]
"""
# Hard mask: Get rid of the values/indices that fall on masked data
if self._hardmask and self._mask is not nomask:
mask = self._mask[indices]
indices = narray(indices, copy=False)
values = narray(values, copy=False, subok=True)
values.resize(indices.shape)
indices = indices[~mask]
values = values[~mask]
self._data.put(indices, values, mode=mode)
# short circuit if neither self nor values are masked
if self._mask is nomask and getmask(values) is nomask:
return
m = getmaskarray(self)
if getmask(values) is nomask:
m.put(indices, False, mode=mode)
else:
m.put(indices, values._mask, mode=mode)
m = make_mask(m, copy=False, shrink=True)
self._mask = m
return
def ids(self):
"""
Return the addresses of the data and mask areas.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1])
>>> x.ids()
(166670640, 166659832)
If the array has no mask, the address of `nomask` is returned. This address
is typically not close to the data in memory:
>>> x = np.ma.array([1, 2, 3])
>>> x.ids()
(166691080, 3083169284L)
"""
if self._mask is nomask:
return (self.ctypes.data, id(nomask))
return (self.ctypes.data, self._mask.ctypes.data)
def iscontiguous(self):
"""
Return a boolean indicating whether the data is contiguous.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3])
>>> x.iscontiguous()
True
`iscontiguous` returns one of the flags of the masked array:
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : True
OWNDATA : False
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
"""
return self.flags['CONTIGUOUS']
def all(self, axis=None, out=None, keepdims=np._NoValue):
"""
Returns True if all elements evaluate to True.
The output array is masked where all the values along the given axis
are masked: if the output would have been a scalar and that all the
values are masked, then the output is `masked`.
Refer to `numpy.all` for full documentation.
See Also
--------
ndarray.all : corresponding function for ndarrays
numpy.all : equivalent function
Examples
--------
>>> np.ma.array([1,2,3]).all()
True
>>> a = np.ma.array([1,2,3], mask=True)
>>> (a.all() is np.ma.masked)
True
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
mask = _check_mask_axis(self._mask, axis, **kwargs)
if out is None:
d = self.filled(True).all(axis=axis, **kwargs).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
return masked
return d
self.filled(True).all(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def any(self, axis=None, out=None, keepdims=np._NoValue):
"""
Returns True if any of the elements of `a` evaluate to True.
Masked values are considered as False during computation.
Refer to `numpy.any` for full documentation.
See Also
--------
ndarray.any : corresponding function for ndarrays
numpy.any : equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
mask = _check_mask_axis(self._mask, axis, **kwargs)
if out is None:
d = self.filled(False).any(axis=axis, **kwargs).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
d = masked
return d
self.filled(False).any(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def nonzero(self):
"""
Return the indices of unmasked elements that are not zero.
Returns a tuple of arrays, one for each dimension, containing the
indices of the non-zero elements in that dimension. The corresponding
non-zero values can be obtained with::
a[a.nonzero()]
To group the indices by element, rather than dimension, use
instead::
np.transpose(a.nonzero())
The result of this is always a 2d array, with a row for each non-zero
element.
Parameters
----------
None
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
numpy.nonzero :
Function operating on ndarrays.
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array(np.eye(3))
>>> x
masked_array(data =
[[ 1. 0. 0.]
[ 0. 1. 0.]
[ 0. 0. 1.]],
mask =
False,
fill_value=1e+20)
>>> x.nonzero()
(array([0, 1, 2]), array([0, 1, 2]))
Masked elements are ignored.
>>> x[1, 1] = ma.masked
>>> x
masked_array(data =
[[1.0 0.0 0.0]
[0.0 -- 0.0]
[0.0 0.0 1.0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=1e+20)
>>> x.nonzero()
(array([0, 2]), array([0, 2]))
Indices can also be grouped by element.
>>> np.transpose(x.nonzero())
array([[0, 0],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, ma.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
masked_array(data =
[[False False False]
[ True True True]
[ True True True]],
mask =
False,
fill_value=999999)
>>> ma.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the condition array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return narray(self.filled(0), copy=False).nonzero()
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
(this docstring should be overwritten)
"""
#!!!: implement out + test!
m = self._mask
if m is nomask:
result = super(MaskedArray, self).trace(offset=offset, axis1=axis1,
axis2=axis2, out=out)
return result.astype(dtype)
else:
D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return D.astype(dtype).filled(0).sum(axis=-1, out=out)
trace.__doc__ = ndarray.trace.__doc__
def dot(self, b, out=None, strict=False):
"""
a.dot(b, out=None)
Masked dot product of two arrays. Note that `out` and `strict` are
located in different positions than in `ma.dot`. In order to
maintain compatibility with the functional version, it is
recommended that the optional arguments be treated as keyword only.
At some point that may be mandatory.
.. versionadded:: 1.10.0
Parameters
----------
b : masked_array_like
Inputs array.
out : masked_array, optional
Output argument. This must have the exact kind that would be
returned if it was not used. In particular, it must have the
right type, must be C-contiguous, and its dtype must be the
dtype that would be returned for `ma.dot(a,b)`. This is a
performance feature. Therefore, if these conditions are not
met, an exception is raised, instead of attempting to be
flexible.
strict : bool, optional
Whether masked data are propagated (True) or set to 0 (False)
for the computation. Default is False. Propagating the mask
means that if a masked value appears in a row or column, the
whole row or column is considered masked.
.. versionadded:: 1.10.2
See Also
--------
numpy.ma.dot : equivalent function
"""
return dot(self, b, out=out, strict=strict)
def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the sum of the array elements over the given axis.
Masked elements are set to 0 internally.
Refer to `numpy.sum` for full documentation.
See Also
--------
ndarray.sum : corresponding function for ndarrays
numpy.sum : equivalent function
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print(x.sum())
25
>>> print(x.sum(axis=1))
[4 5 16]
>>> print(x.sum(axis=0))
[8 5 12]
>>> print(type(x.sum(axis=0, dtype=np.int64)[0]))
<type 'numpy.int64'>
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
# No explicit output
if out is None:
result = self.filled(0).sum(axis, dtype=dtype, **kwargs)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
def cumsum(self, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the array elements over the given axis.
Masked values are set to 0 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Refer to `numpy.cumsum` for full documentation.
Notes
-----
The mask is lost if `out` is not a valid :class:`MaskedArray` !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
See Also
--------
ndarray.cumsum : corresponding function for ndarrays
numpy.cumsum : equivalent function
Examples
--------
>>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
>>> print(marr.cumsum())
[0 1 3 -- -- -- 9 16 24 33]
"""
result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self.mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of the array elements over the given axis.
Masked elements are set to 1 internally for computation.
Refer to `numpy.prod` for full documentation.
Notes
-----
Arithmetic is modular when using integer types, and no error is raised
on overflow.
See Also
--------
ndarray.prod : corresponding function for ndarrays
numpy.prod : equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
# No explicit output
if out is None:
result = self.filled(1).prod(axis, dtype=dtype, **kwargs)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
product = prod
def cumprod(self, axis=None, dtype=None, out=None):
"""
Return the cumulative product of the array elements over the given axis.
Masked values are set to 1 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Refer to `numpy.cumprod` for full documentation.
Notes
-----
The mask is lost if `out` is not a valid MaskedArray !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
See Also
--------
ndarray.cumprod : corresponding function for ndarrays
numpy.cumprod : equivalent function
"""
result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Returns the average of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.mean` for full documentation.
See Also
--------
ndarray.mean : corresponding function for ndarrays
numpy.mean : Equivalent function
numpy.ma.average: Weighted average.
Examples
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data = [1 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.mean()
1.5
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
if self._mask is nomask:
result = super(MaskedArray, self).mean(axis=axis,
dtype=dtype, **kwargs)[()]
else:
dsum = self.sum(axis=axis, dtype=dtype, **kwargs)
cnt = self.count(axis=axis, **kwargs)
if cnt.shape == () and (cnt == 0):
result = masked
else:
result = dsum * 1. / cnt
if out is not None:
out.flat = result
if isinstance(out, MaskedArray):
outmask = getmask(out)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getmask(result)
return out
return result
def anom(self, axis=None, dtype=None):
"""
Compute the anomalies (deviations from the arithmetic mean)
along the given axis.
Returns an array of anomalies, with the same shape as the input and
where the arithmetic mean is computed along the given axis.
Parameters
----------
axis : int, optional
Axis over which the anomalies are taken.
The default is to use the mean of the flattened array as reference.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
See Also
--------
mean : Compute the mean of the array.
Examples
--------
>>> a = np.ma.array([1,2,3])
>>> a.anom()
masked_array(data = [-1. 0. 1.],
mask = False,
fill_value = 1e+20)
"""
m = self.mean(axis, dtype)
if m is masked:
return m
if not axis:
return (self - m)
else:
return (self - expand_dims(m, axis))
def var(self, axis=None, dtype=None, out=None, ddof=0,
keepdims=np._NoValue):
"""
Returns the variance of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.var` for full documentation.
See Also
--------
ndarray.var : corresponding function for ndarrays
numpy.var : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
# Easy case: nomask, business as usual
if self._mask is nomask:
ret = super(MaskedArray, self).var(axis=axis, dtype=dtype, out=out,
ddof=ddof, **kwargs)[()]
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(nomask)
return out
return ret
# Some data are masked, yay!
cnt = self.count(axis=axis, **kwargs) - ddof
danom = self - self.mean(axis, dtype, keepdims=True)
if iscomplexobj(self):
danom = umath.absolute(danom) ** 2
else:
danom *= danom
dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self))
# Apply the mask if it's not a scalar
if dvar.ndim:
dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0))
dvar._update_from(self)
elif getmask(dvar):
# Make sure that masked is returned when the scalar is masked.
dvar = masked
if out is not None:
if isinstance(out, MaskedArray):
out.flat = 0
out.__setmask__(True)
elif out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or "\
"more location."
raise MaskError(errmsg)
else:
out.flat = np.nan
return out
# In case with have an explicit output
if out is not None:
# Set the data
out.flat = dvar
# Set the mask if needed
if isinstance(out, MaskedArray):
out.__setmask__(dvar.mask)
return out
return dvar
var.__doc__ = np.var.__doc__
def std(self, axis=None, dtype=None, out=None, ddof=0,
keepdims=np._NoValue):
"""
Returns the standard deviation of the array elements along given axis.
Masked entries are ignored.
Refer to `numpy.std` for full documentation.
See Also
--------
ndarray.std : corresponding function for ndarrays
numpy.std : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
dvar = self.var(axis, dtype, out, ddof, **kwargs)
if dvar is not masked:
if out is not None:
np.power(out, 0.5, out=out, casting='unsafe')
return out
dvar = sqrt(dvar)
return dvar
def round(self, decimals=0, out=None):
"""
Return each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
ndarray.around : corresponding function for ndarrays
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
if result.ndim > 0:
result._mask = self._mask
result._update_from(self)
elif self._mask:
# Return masked when the scalar is masked
result = masked
# No explicit output: we're done
if out is None:
return result
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
def argsort(self, axis=np._NoValue, kind='quicksort', order=None,
endwith=True, fill_value=None):
"""
Return an ndarray of indices that sort the array along the
specified axis. Masked values are filled beforehand to
`fill_value`.
Parameters
----------
axis : int, optional
Axis along which to sort. If None, the default, the flattened array
is used.
.. versionchanged:: 1.13.0
Previously, the default was documented to be -1, but that was
in error. At some future date, the default will change to -1, as
originally intended.
Until then, the axis should be given explicitly when
``arr.ndim > 1``, to avoid a FutureWarning.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
endwith : {True, False}, optional
Whether missing values (if any) should be treated as the largest values
(True) or the smallest values (False)
When the array contains unmasked values at the same extremes of the
datatype, the ordering of these values and the masked values is
undefined.
fill_value : {var}, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
MaskedArray.sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.ma.array([3,2,1], mask=[False, False, True])
>>> a
masked_array(data = [3 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.argsort()
array([1, 0, 2])
"""
# 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default
if axis is np._NoValue:
axis = _deprecate_argsort_axis(self)
if fill_value is None:
if endwith:
# nan > inf
if np.issubdtype(self.dtype, np.floating):
fill_value = np.nan
else:
fill_value = minimum_fill_value(self)
else:
fill_value = maximum_fill_value(self)
filled = self.filled(fill_value)
return filled.argsort(axis=axis, kind=kind, order=order)
def argmin(self, axis=None, fill_value=None, out=None):
"""
Return array of indices to the minimum values along the given axis.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
minimum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
ndarray or scalar
If multi-dimension input, returns a new ndarray of indices to the
minimum values along the given axis. Otherwise, returns a scalar
of index to the minimum values along the given axis.
Examples
--------
>>> x = np.ma.array(arange(4), mask=[1,1,0,0])
>>> x.shape = (2,2)
>>> print(x)
[[-- --]
[2 3]]
>>> print(x.argmin(axis=0, fill_value=-1))
[0 0]
>>> print(x.argmin(axis=0, fill_value=9))
[1 1]
"""
if fill_value is None:
fill_value = minimum_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argmin(axis, out=out)
def argmax(self, axis=None, fill_value=None, out=None):
"""
Returns array of indices of the maximum values along the given axis.
Masked values are treated as if they had the value fill_value.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
maximum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
index_array : {integer_array}
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a.argmax()
5
>>> a.argmax(0)
array([1, 1, 1])
>>> a.argmax(1)
array([2, 2])
"""
if fill_value is None:
fill_value = maximum_fill_value(self._data)
d = self.filled(fill_value).view(ndarray)
return d.argmax(axis, out=out)
def sort(self, axis=-1, kind='quicksort', order=None,
endwith=True, fill_value=None):
"""
Sort the array, in-place
Parameters
----------
a : array_like
Array to be sorted.
axis : int, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
endwith : {True, False}, optional
Whether missing values (if any) should be treated as the largest values
(True) or the smallest values (False)
When the array contains unmasked values at the same extremes of the
datatype, the ordering of these values and the masked values is
undefined.
fill_value : {var}, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Default
>>> a.sort()
>>> print(a)
[1 3 5 -- --]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Put missing values in the front
>>> a.sort(endwith=False)
>>> print(a)
[-- -- 1 3 5]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # fill_value takes over endwith
>>> a.sort(endwith=False, fill_value=3)
>>> print(a)
[1 -- -- 3 5]
"""
if self._mask is nomask:
ndarray.sort(self, axis=axis, kind=kind, order=order)
return
if self is masked:
return
sidx = self.argsort(axis=axis, kind=kind, order=order,
fill_value=fill_value, endwith=endwith)
self[...] = np.take_along_axis(self, sidx, axis=axis)
def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
"""
Return the minimum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must be of
the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of `minimum_fill_value`.
Returns
-------
amin : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
minimum_fill_value
Returns the minimum filling value for a given datatype.
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
if fill_value is None:
fill_value = minimum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).min(
axis=axis, out=out, **kwargs).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).min(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
# unique to masked arrays
def mini(self, axis=None):
"""
Return the array minimum along the specified axis.
.. deprecated:: 1.13.0
This function is identical to both:
* ``self.min(keepdims=True, axis=axis).squeeze(axis=axis)``
* ``np.ma.minimum.reduce(self, axis=axis)``
Typically though, ``self.min(axis=axis)`` is sufficient.
Parameters
----------
axis : int, optional
The axis along which to find the minima. Default is None, in which case
the minimum value in the whole array is returned.
Returns
-------
min : scalar or MaskedArray
If `axis` is None, the result is a scalar. Otherwise, if `axis` is
given and the array is at least 2-D, the result is a masked array with
dimension one smaller than the array on which `mini` is called.
Examples
--------
>>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2)
>>> print(x)
[[0 --]
[2 3]
[4 --]]
>>> x.mini()
0
>>> x.mini(axis=0)
masked_array(data = [0 3],
mask = [False False],
fill_value = 999999)
>>> print(x.mini(axis=1))
[0 2 4]
There is a small difference between `mini` and `min`:
>>> x[:,1].mini(axis=0)
masked_array(data = --,
mask = True,
fill_value = 999999)
>>> x[:,1].min(axis=0)
masked
"""
# 2016-04-13, 1.13.0, gh-8764
warnings.warn(
"`mini` is deprecated; use the `min` method or "
"`np.ma.minimum.reduce instead.",
DeprecationWarning, stacklevel=2)
return minimum.reduce(self, axis)
def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
"""
Return the maximum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of maximum_fill_value().
Returns
-------
amax : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
maximum_fill_value
Returns the maximum filling value for a given datatype.
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
if fill_value is None:
fill_value = maximum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).max(
axis=axis, out=out, **kwargs).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).max(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
def ptp(self, axis=None, out=None, fill_value=None, keepdims=False):
"""
Return (maximum - minimum) along the given dimension
(i.e. peak-to-peak value).
Parameters
----------
axis : {None, int}, optional
Axis along which to find the peaks. If None (default) the
flattened array is used.
out : {None, array_like}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
fill_value : {var}, optional
Value used to fill in the masked values.
Returns
-------
ptp : ndarray.
A new array holding the result, unless ``out`` was
specified, in which case a reference to ``out`` is returned.
"""
if out is None:
result = self.max(axis=axis, fill_value=fill_value,
keepdims=keepdims)
result -= self.min(axis=axis, fill_value=fill_value,
keepdims=keepdims)
return result
out.flat = self.max(axis=axis, out=out, fill_value=fill_value,
keepdims=keepdims)
min_value = self.min(axis=axis, fill_value=fill_value,
keepdims=keepdims)
np.subtract(out, min_value, out=out, casting='unsafe')
return out
def partition(self, *args, **kwargs):
warnings.warn("Warning: 'partition' will ignore the 'mask' "
"of the {}.".format(self.__class__.__name__),
stacklevel=2)
return super(MaskedArray, self).partition(*args, **kwargs)
def argpartition(self, *args, **kwargs):
warnings.warn("Warning: 'argpartition' will ignore the 'mask' "
"of the {}.".format(self.__class__.__name__),
stacklevel=2)
return super(MaskedArray, self).argpartition(*args, **kwargs)
def take(self, indices, axis=None, out=None, mode='raise'):
"""
"""
(_data, _mask) = (self._data, self._mask)
cls = type(self)
# Make sure the indices are not masked
maskindices = getmask(indices)
if maskindices is not nomask:
indices = indices.filled(0)
# Get the data, promoting scalars to 0d arrays with [...] so that
# .view works correctly
if out is None:
out = _data.take(indices, axis=axis, mode=mode)[...].view(cls)
else:
np.take(_data, indices, axis=axis, mode=mode, out=out)
# Get the mask
if isinstance(out, MaskedArray):
if _mask is nomask:
outmask = maskindices
else:
outmask = _mask.take(indices, axis=axis, mode=mode)
outmask |= maskindices
out.__setmask__(outmask)
# demote 0d arrays back to scalars, for consistency with ndarray.take
return out[()]
# Array methods
clip = _arraymethod('clip', onmask=False)
copy = _arraymethod('copy')
diagonal = _arraymethod('diagonal')
flatten = _arraymethod('flatten')
repeat = _arraymethod('repeat')
squeeze = _arraymethod('squeeze')
swapaxes = _arraymethod('swapaxes')
T = property(fget=lambda self: self.transpose())
transpose = _arraymethod('transpose')
def tolist(self, fill_value=None):
"""
Return the data portion of the masked array as a hierarchical Python list.
Data items are converted to the nearest compatible Python type.
Masked values are converted to `fill_value`. If `fill_value` is None,
the corresponding entries in the output list will be ``None``.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries. Default is None.
Returns
-------
result : list
The Python list representation of the masked array.
Examples
--------
>>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4)
>>> x.tolist()
[[1, None, 3], [None, 5, None], [7, None, 9]]
>>> x.tolist(-999)
[[1, -999, 3], [-999, 5, -999], [7, -999, 9]]
"""
_mask = self._mask
# No mask ? Just return .data.tolist ?
if _mask is nomask:
return self._data.tolist()
# Explicit fill_value: fill the array and get the list
if fill_value is not None:
return self.filled(fill_value).tolist()
# Structured array.
names = self.dtype.names
if names:
result = self._data.astype([(_, object) for _ in names])
for n in names:
result[n][_mask[n]] = None
return result.tolist()
# Standard arrays.
if _mask is nomask:
return [None]
# Set temps to save time when dealing w/ marrays.
inishape = self.shape
result = np.array(self._data.ravel(), dtype=object)
result[_mask.ravel()] = None
result.shape = inishape
return result.tolist()
def tostring(self, fill_value=None, order='C'):
"""
This function is a compatibility alias for tobytes. Despite its name it
returns bytes not strings.
"""
return self.tobytes(fill_value, order='C')
def tobytes(self, fill_value=None, order='C'):
"""
Return the array data as a string containing the raw bytes in the array.
The array is filled with a fill value before the string conversion.
.. versionadded:: 1.9.0
Parameters
----------
fill_value : scalar, optional
Value used to fill in the masked values. Default is None, in which
case `MaskedArray.fill_value` is used.
order : {'C','F','A'}, optional
Order of the data item in the copy. Default is 'C'.
- 'C' -- C order (row major).
- 'F' -- Fortran order (column major).
- 'A' -- Any, current order of array.
- None -- Same as 'A'.
See Also
--------
ndarray.tobytes
tolist, tofile
Notes
-----
As for `ndarray.tobytes`, information about the shape, dtype, etc.,
but also about `fill_value`, will be lost.
Examples
--------
>>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.tobytes()
'\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00'
"""
return self.filled(fill_value).tobytes(order=order)
def tofile(self, fid, sep="", format="%s"):
"""
Save a masked array to a file in binary format.
.. warning::
This function is not implemented yet.
Raises
------
NotImplementedError
When `tofile` is called.
"""
raise NotImplementedError("MaskedArray.tofile() not implemented yet.")
def toflex(self):
"""
Transforms a masked array into a flexible-type array.
The flexible type array that is returned will have two fields:
* the ``_data`` field stores the ``_data`` part of the array.
* the ``_mask`` field stores the ``_mask`` part of the array.
Parameters
----------
None
Returns
-------
record : ndarray
A new flexible-type `ndarray` with two fields: the first element
containing a value, the second element containing the corresponding
mask boolean. The returned record shape matches self.shape.
Notes
-----
A side-effect of transforming a masked array into a flexible `ndarray` is
that meta information (``fill_value``, ...) will be lost.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print(x.toflex())
[[(1, False) (2, True) (3, False)]
[(4, True) (5, False) (6, True)]
[(7, False) (8, True) (9, False)]]
"""
# Get the basic dtype.
ddtype = self.dtype
# Make sure we have a mask
_mask = self._mask
if _mask is None:
_mask = make_mask_none(self.shape, ddtype)
# And get its dtype
mdtype = self._mask.dtype
record = np.ndarray(shape=self.shape,
dtype=[('_data', ddtype), ('_mask', mdtype)])
record['_data'] = self._data
record['_mask'] = self._mask
return record
torecords = toflex
# Pickling
def __getstate__(self):
"""Return the internal state of the masked array, for pickling
purposes.
"""
cf = 'CF'[self.flags.fnc]
data_state = super(MaskedArray, self).__reduce__()[2]
return data_state + (getmaskarray(self).tobytes(cf), self._fill_value)
def __setstate__(self, state):
"""Restore the internal state of the masked array, for
pickling purposes. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(_, shp, typ, isf, raw, msk, flv) = state
super(MaskedArray, self).__setstate__((shp, typ, isf, raw))
self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk))
self.fill_value = flv
def __reduce__(self):
"""Return a 3-tuple for pickling a MaskedArray.
"""
return (_mareconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
def __deepcopy__(self, memo=None):
from copy import deepcopy
copied = MaskedArray.__new__(type(self), self, copy=True)
if memo is None:
memo = {}
memo[id(self)] = copied
for (k, v) in self.__dict__.items():
copied.__dict__[k] = deepcopy(v, memo)
return copied
def _mareconstruct(subtype, baseclass, baseshape, basetype,):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype)
_mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype))
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
class mvoid(MaskedArray):
"""
Fake a 'void' object to use for masked array with structured dtypes.
"""
def __new__(self, data, mask=nomask, dtype=None, fill_value=None,
hardmask=False, copy=False, subok=True):
_data = np.array(data, copy=copy, subok=subok, dtype=dtype)
_data = _data.view(self)
_data._hardmask = hardmask
if mask is not nomask:
if isinstance(mask, np.void):
_data._mask = mask
else:
try:
# Mask is already a 0D array
_data._mask = np.void(mask)
except TypeError:
# Transform the mask to a void
mdtype = make_mask_descr(dtype)
_data._mask = np.array(mask, dtype=mdtype)[()]
if fill_value is not None:
_data.fill_value = fill_value
return _data
def _get_data(self):
# Make sure that the _data part is a np.void
return super(mvoid, self)._data[()]
_data = property(fget=_get_data)
def __getitem__(self, indx):
"""
Get the index.
"""
m = self._mask
if isinstance(m[indx], ndarray):
# Can happen when indx is a multi-dimensional field:
# A = ma.masked_array(data=[([0,1],)], mask=[([True,
# False],)], dtype=[("A", ">i2", (2,))])
# x = A[0]; y = x["A"]; then y.mask["A"].size==2
# and we can not say masked/unmasked.
# The result is no longer mvoid!
# See also issue #6724.
return masked_array(
data=self._data[indx], mask=m[indx],
fill_value=self._fill_value[indx],
hard_mask=self._hardmask)
if m is not nomask and m[indx]:
return masked
return self._data[indx]
def __setitem__(self, indx, value):
self._data[indx] = value
if self._hardmask:
self._mask[indx] |= getattr(value, "_mask", False)
else:
self._mask[indx] = getattr(value, "_mask", False)
def __str__(self):
m = self._mask
if m is nomask:
return str(self._data)
rdtype = _replace_dtype_fields(self._data.dtype, "O")
data_arr = super(mvoid, self)._data
res = data_arr.astype(rdtype)
_recursive_printoption(res, self._mask, masked_print_option)
return str(res)
__repr__ = __str__
def __iter__(self):
"Defines an iterator for mvoid"
(_data, _mask) = (self._data, self._mask)
if _mask is nomask:
for d in _data:
yield d
else:
for (d, m) in zip(_data, _mask):
if m:
yield masked
else:
yield d
def __len__(self):
return self._data.__len__()
def filled(self, fill_value=None):
"""
Return a copy with masked fields filled with a given value.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute is used instead.
Returns
-------
filled_void
A `np.void` object
See Also
--------
MaskedArray.filled
"""
return asarray(self).filled(fill_value)[()]
def tolist(self):
"""
Transforms the mvoid object into a tuple.
Masked fields are replaced by None.
Returns
-------
returned_tuple
Tuple of fields
"""
_mask = self._mask
if _mask is nomask:
return self._data.tolist()
result = []
for (d, m) in zip(self._data, self._mask):
if m:
result.append(None)
else:
# .item() makes sure we return a standard Python object
result.append(d.item())
return tuple(result)
##############################################################################
# Shortcuts #
##############################################################################
def isMaskedArray(x):
"""
Test whether input is an instance of MaskedArray.
This function returns True if `x` is an instance of MaskedArray
and returns False otherwise. Any object is accepted as input.
Parameters
----------
x : object
Object to test.
Returns
-------
result : bool
True if `x` is a MaskedArray.
See Also
--------
isMA : Alias to isMaskedArray.
isarray : Alias to isMaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.eye(3, 3)
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> m = ma.masked_values(a, 0)
>>> m
masked_array(data =
[[1.0 -- --]
[-- 1.0 --]
[-- -- 1.0]],
mask =
[[False True True]
[ True False True]
[ True True False]],
fill_value=0.0)
>>> ma.isMaskedArray(a)
False
>>> ma.isMaskedArray(m)
True
>>> ma.isMaskedArray([0, 1, 2])
False
"""
return isinstance(x, MaskedArray)
isarray = isMaskedArray
isMA = isMaskedArray # backward compatibility
class MaskedConstant(MaskedArray):
# the lone np.ma.masked instance
__singleton = None
@classmethod
def __has_singleton(cls):
# second case ensures `cls.__singleton` is not just a view on the
# superclass singleton
return cls.__singleton is not None and type(cls.__singleton) is cls
def __new__(cls):
if not cls.__has_singleton():
# We define the masked singleton as a float for higher precedence.
# Note that it can be tricky sometimes w/ type comparison
data = np.array(0.)
mask = np.array(True)
# prevent any modifications
data.flags.writeable = False
mask.flags.writeable = False
# don't fall back on MaskedArray.__new__(MaskedConstant), since
# that might confuse it - this way, the construction is entirely
# within our control
cls.__singleton = MaskedArray(data, mask=mask).view(cls)
return cls.__singleton
def __array_finalize__(self, obj):
if not self.__has_singleton():
# this handles the `.view` in __new__, which we want to copy across
# properties normally
return super(MaskedConstant, self).__array_finalize__(obj)
elif self is self.__singleton:
# not clear how this can happen, play it safe
pass
else:
# everywhere else, we want to downcast to MaskedArray, to prevent a
# duplicate maskedconstant.
self.__class__ = MaskedArray
MaskedArray.__array_finalize__(self, obj)
def __array_prepare__(self, obj, context=None):
return self.view(MaskedArray).__array_prepare__(obj, context)
def __array_wrap__(self, obj, context=None):
return self.view(MaskedArray).__array_wrap__(obj, context)
def __str__(self):
return str(masked_print_option._display)
if sys.version_info.major < 3:
def __unicode__(self):
return unicode(masked_print_option._display)
def __repr__(self):
if self is MaskedConstant.__singleton:
return 'masked'
else:
# it's a subclass, or something is wrong, make it obvious
return object.__repr__(self)
def __reduce__(self):
"""Override of MaskedArray's __reduce__.
"""
return (self.__class__, ())
# inplace operations have no effect. We have to override them to avoid
# trying to modify the readonly data and mask arrays
def __iop__(self, other):
return self
__iadd__ = \
__isub__ = \
__imul__ = \
__ifloordiv__ = \
__itruediv__ = \
__ipow__ = \
__iop__
del __iop__ # don't leave this around
def copy(self, *args, **kwargs):
""" Copy is a no-op on the maskedconstant, as it is a scalar """
# maskedconstant is a scalar, so copy doesn't need to copy. There's
# precedent for this with `np.bool_` scalars.
return self
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
def __setattr__(self, attr, value):
if not self.__has_singleton():
# allow the singleton to be initialized
return super(MaskedConstant, self).__setattr__(attr, value)
elif self is self.__singleton:
raise AttributeError(
"attributes of {!r} are not writeable".format(self))
else:
# duplicate instance - we can end up here from __array_finalize__,
# where we set the __class__ attribute
return super(MaskedConstant, self).__setattr__(attr, value)
masked = masked_singleton = MaskedConstant()
masked_array = MaskedArray
def array(data, dtype=None, copy=False, order=None,
mask=nomask, fill_value=None, keep_mask=True,
hard_mask=False, shrink=True, subok=True, ndmin=0):
"""
Shortcut to MaskedArray.
The options are in a different order for convenience and backwards
compatibility.
"""
return MaskedArray(data, mask=mask, dtype=dtype, copy=copy,
subok=subok, keep_mask=keep_mask,
hard_mask=hard_mask, fill_value=fill_value,
ndmin=ndmin, shrink=shrink, order=order)
array.__doc__ = masked_array.__doc__
def is_masked(x):
"""
Determine whether input has masked values.
Accepts any object as input, but always returns False unless the
input is a MaskedArray containing masked values.
Parameters
----------
x : array_like
Array to check for masked values.
Returns
-------
result : bool
True if `x` is a MaskedArray with masked values, False otherwise.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> x
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_masked(x)
True
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 42)
>>> x
masked_array(data = [0 1 0 2 3],
mask = False,
fill_value=999999)
>>> ma.is_masked(x)
False
Always returns False if `x` isn't a MaskedArray.
>>> x = [False, True, False]
>>> ma.is_masked(x)
False
>>> x = 'a string'
>>> ma.is_masked(x)
False
"""
m = getmask(x)
if m is nomask:
return False
elif m.any():
return True
return False
##############################################################################
# Extrema functions #
##############################################################################
class _extrema_operation(_MaskedUFunc):
"""
Generic class for maximum/minimum functions.
.. note::
This is the base class for `_maximum_operation` and
`_minimum_operation`.
"""
def __init__(self, ufunc, compare, fill_value):
super(_extrema_operation, self).__init__(ufunc)
self.compare = compare
self.fill_value_func = fill_value
def __call__(self, a, b=None):
"Executes the call behavior."
if b is None:
# 2016-04-13, 1.13.0
warnings.warn(
"Single-argument form of np.ma.{0} is deprecated. Use "
"np.ma.{0}.reduce instead.".format(self.__name__),
DeprecationWarning, stacklevel=2)
return self.reduce(a)
return where(self.compare(a, b), a, b)
def reduce(self, target, axis=np._NoValue):
"Reduce target along the given axis."
target = narray(target, copy=False, subok=True)
m = getmask(target)
if axis is np._NoValue and target.ndim > 1:
# 2017-05-06, Numpy 1.13.0: warn on axis default
warnings.warn(
"In the future the default for ma.{0}.reduce will be axis=0, "
"not the current None, to match np.{0}.reduce. "
"Explicitly pass 0 or None to silence this warning.".format(
self.__name__
),
MaskedArrayFutureWarning, stacklevel=2)
axis = None
if axis is not np._NoValue:
kwargs = dict(axis=axis)
else:
kwargs = dict()
if m is nomask:
t = self.f.reduce(target, **kwargs)
else:
target = target.filled(
self.fill_value_func(target)).view(type(target))
t = self.f.reduce(target, **kwargs)
m = umath.logical_and.reduce(m, **kwargs)
if hasattr(t, '_mask'):
t._mask = m
elif m:
t = masked
return t
def outer(self, a, b):
"Return the function applied to the outer product of a and b."
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
result = self.f.outer(filled(a), filled(b))
if not isinstance(result, MaskedArray):
result = result.view(MaskedArray)
result._mask = m
return result
def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
try:
return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs)
except (AttributeError, TypeError):
# If obj doesn't have a min method, or if the method doesn't accept a
# fill_value argument
return asanyarray(obj).min(axis=axis, fill_value=fill_value,
out=out, **kwargs)
min.__doc__ = MaskedArray.min.__doc__
def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
try:
return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs)
except (AttributeError, TypeError):
# If obj doesn't have a max method, or if the method doesn't accept a
# fill_value argument
return asanyarray(obj).max(axis=axis, fill_value=fill_value,
out=out, **kwargs)
max.__doc__ = MaskedArray.max.__doc__
def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
try:
return obj.ptp(axis, out=out, fill_value=fill_value, **kwargs)
except (AttributeError, TypeError):
# If obj doesn't have a ptp method or if the method doesn't accept
# a fill_value argument
return asanyarray(obj).ptp(axis=axis, fill_value=fill_value,
out=out, **kwargs)
ptp.__doc__ = MaskedArray.ptp.__doc__
##############################################################################
# Definition of functions from the corresponding methods #
##############################################################################
class _frommethod(object):
"""
Define functions from existing MaskedArray methods.
Parameters
----------
methodname : str
Name of the method to transform.
"""
def __init__(self, methodname, reversed=False):
self.__name__ = methodname
self.__doc__ = self.getdoc()
self.reversed = reversed
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
meth = getattr(MaskedArray, self.__name__, None) or\
getattr(np, self.__name__, None)
signature = self.__name__ + get_object_signature(meth)
if meth is not None:
doc = """ %s\n%s""" % (
signature, getattr(meth, '__doc__', None))
return doc
def __call__(self, a, *args, **params):
if self.reversed:
args = list(args)
a, args[0] = args[0], a
marr = asanyarray(a)
method_name = self.__name__
method = getattr(type(marr), method_name, None)
if method is None:
# use the corresponding np function
method = getattr(np, method_name)
return method(marr, *args, **params)
all = _frommethod('all')
anomalies = anom = _frommethod('anom')
any = _frommethod('any')
compress = _frommethod('compress', reversed=True)
cumprod = _frommethod('cumprod')
cumsum = _frommethod('cumsum')
copy = _frommethod('copy')
diagonal = _frommethod('diagonal')
harden_mask = _frommethod('harden_mask')
ids = _frommethod('ids')
maximum = _extrema_operation(umath.maximum, greater, maximum_fill_value)
mean = _frommethod('mean')
minimum = _extrema_operation(umath.minimum, less, minimum_fill_value)
nonzero = _frommethod('nonzero')
prod = _frommethod('prod')
product = _frommethod('prod')
ravel = _frommethod('ravel')
repeat = _frommethod('repeat')
shrink_mask = _frommethod('shrink_mask')
soften_mask = _frommethod('soften_mask')
std = _frommethod('std')
sum = _frommethod('sum')
swapaxes = _frommethod('swapaxes')
#take = _frommethod('take')
trace = _frommethod('trace')
var = _frommethod('var')
count = _frommethod('count')
def take(a, indices, axis=None, out=None, mode='raise'):
"""
"""
a = masked_array(a)
return a.take(indices, axis=axis, out=out, mode=mode)
def power(a, b, third=None):
"""
Returns element-wise base array raised to power from second array.
This is the masked array version of `numpy.power`. For details see
`numpy.power`.
See Also
--------
numpy.power
Notes
-----
The *out* argument to `numpy.power` is not supported, `third` has to be
None.
"""
if third is not None:
raise MaskError("3-argument power not supported.")
# Get the masks
ma = getmask(a)
mb = getmask(b)
m = mask_or(ma, mb)
# Get the rawdata
fa = getdata(a)
fb = getdata(b)
# Get the type of the result (so that we preserve subclasses)
if isinstance(a, MaskedArray):
basetype = type(a)
else:
basetype = MaskedArray
# Get the result and view it as a (subclass of) MaskedArray
with np.errstate(divide='ignore', invalid='ignore'):
result = np.where(m, fa, umath.power(fa, fb)).view(basetype)
result._update_from(a)
# Find where we're in trouble w/ NaNs and Infs
invalid = np.logical_not(np.isfinite(result.view(ndarray)))
# Add the initial mask
if m is not nomask:
if not (result.ndim):
return masked
result._mask = np.logical_or(m, invalid)
# Fix the invalid parts
if invalid.any():
if not result.ndim:
return masked
elif result._mask is nomask:
result._mask = invalid
result._data[invalid] = result.fill_value
return result
argmin = _frommethod('argmin')
argmax = _frommethod('argmax')
def argsort(a, axis=np._NoValue, kind='quicksort', order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = np.asanyarray(a)
# 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default
if axis is np._NoValue:
axis = _deprecate_argsort_axis(a)
if isinstance(a, MaskedArray):
return a.argsort(axis=axis, kind=kind, order=order,
endwith=endwith, fill_value=fill_value)
else:
return a.argsort(axis=axis, kind=kind, order=order)
argsort.__doc__ = MaskedArray.argsort.__doc__
def sort(a, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = np.array(a, copy=True, subok=True)
if axis is None:
a = a.flatten()
axis = 0
if isinstance(a, MaskedArray):
a.sort(axis=axis, kind=kind, order=order,
endwith=endwith, fill_value=fill_value)
else:
a.sort(axis=axis, kind=kind, order=order)
return a
sort.__doc__ = MaskedArray.sort.__doc__
def compressed(x):
"""
Return all the non-masked data as a 1-D array.
This function is equivalent to calling the "compressed" method of a
`MaskedArray`, see `MaskedArray.compressed` for details.
See Also
--------
MaskedArray.compressed
Equivalent method.
"""
return asanyarray(x).compressed()
def concatenate(arrays, axis=0):
"""
Concatenate a sequence of arrays along the given axis.
Parameters
----------
arrays : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
result : MaskedArray
The concatenated array with any masked entries preserved.
See Also
--------
numpy.concatenate : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(3)
>>> a[1] = ma.masked
>>> b = ma.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
masked_array(data = [2 3 4],
mask = False,
fill_value = 999999)
>>> ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
"""
d = np.concatenate([getdata(a) for a in arrays], axis)
rcls = get_masked_subclass(*arrays)
data = d.view(rcls)
# Check whether one of the arrays has a non-empty mask.
for x in arrays:
if getmask(x) is not nomask:
break
else:
return data
# OK, so we have to concatenate the masks
dm = np.concatenate([getmaskarray(a) for a in arrays], axis)
dm = dm.reshape(d.shape)
# If we decide to keep a '_shrinkmask' option, we want to check that
# all of them are True, and then check for dm.any()
data._mask = _shrink_mask(dm)
return data
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
This function is the equivalent of `numpy.diag` that takes masked
values into account, see `numpy.diag` for details.
See Also
--------
numpy.diag : Equivalent function for ndarrays.
"""
output = np.diag(v, k).view(MaskedArray)
if getmask(v) is not nomask:
output._mask = np.diag(v._mask, k)
return output
def expand_dims(x, axis):
"""
Expand the shape of an array.
Expands the shape of the array by including a new axis before the one
specified by the `axis` parameter. This function behaves the same as
`numpy.expand_dims` but preserves masked elements.
See Also
--------
numpy.expand_dims : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array([1, 2, 4])
>>> x[1] = ma.masked
>>> x
masked_array(data = [1 -- 4],
mask = [False True False],
fill_value = 999999)
>>> np.expand_dims(x, axis=0)
array([[1, 2, 4]])
>>> ma.expand_dims(x, axis=0)
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
The same result can be achieved using slicing syntax with `np.newaxis`.
>>> x[np.newaxis, :]
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
"""
result = n_expand_dims(x, axis)
if isinstance(x, MaskedArray):
new_shape = result.shape
result = x.view()
result.shape = new_shape
if result._mask is not nomask:
result._mask.shape = new_shape
return result
def left_shift(a, n):
"""
Shift the bits of an integer to the left.
This is the masked array version of `numpy.left_shift`, for details
see that function.
See Also
--------
numpy.left_shift
"""
m = getmask(a)
if m is nomask:
d = umath.left_shift(filled(a), n)
return masked_array(d)
else:
d = umath.left_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def right_shift(a, n):
"""
Shift the bits of an integer to the right.
This is the masked array version of `numpy.right_shift`, for details
see that function.
See Also
--------
numpy.right_shift
"""
m = getmask(a)
if m is nomask:
d = umath.right_shift(filled(a), n)
return masked_array(d)
else:
d = umath.right_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def put(a, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
This function is equivalent to `MaskedArray.put`, see that method
for details.
See Also
--------
MaskedArray.put
"""
# We can't use 'frommethod', the order of arguments is different
try:
return a.put(indices, values, mode=mode)
except AttributeError:
return narray(a, copy=False).put(indices, values, mode=mode)
def putmask(a, mask, values): # , mode='raise'):
"""
Changes elements of an array based on conditional and input values.
This is the masked array version of `numpy.putmask`, for details see
`numpy.putmask`.
See Also
--------
numpy.putmask
Notes
-----
Using a masked array as `values` will **not** transform a `ndarray` into
a `MaskedArray`.
"""
# We can't use 'frommethod', the order of arguments is different
if not isinstance(a, MaskedArray):
a = a.view(MaskedArray)
(valdata, valmask) = (getdata(values), getmask(values))
if getmask(a) is nomask:
if valmask is not nomask:
a._sharedmask = True
a._mask = make_mask_none(a.shape, a.dtype)
np.copyto(a._mask, valmask, where=mask)
elif a._hardmask:
if valmask is not nomask:
m = a._mask.copy()
np.copyto(m, valmask, where=mask)
a.mask |= m
else:
if valmask is nomask:
valmask = getmaskarray(values)
np.copyto(a._mask, valmask, where=mask)
np.copyto(a._data, valdata, where=mask)
return
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
This function is exactly equivalent to `numpy.transpose`.
See Also
--------
numpy.transpose : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.arange(4).reshape((2,2))
>>> x[1, 1] = ma.masked
>>>> x
masked_array(data =
[[0 1]
[2 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
>>> ma.transpose(x)
masked_array(data =
[[0 2]
[1 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
"""
# We can't use 'frommethod', as 'transpose' doesn't take keywords
try:
return a.transpose(axes)
except AttributeError:
return narray(a, copy=False).transpose(axes).view(MaskedArray)
def reshape(a, new_shape, order='C'):
"""
Returns an array containing the same data with a new shape.
Refer to `MaskedArray.reshape` for full documentation.
See Also
--------
MaskedArray.reshape : equivalent function
"""
# We can't use 'frommethod', it whine about some parameters. Dmmit.
try:
return a.reshape(new_shape, order=order)
except AttributeError:
_tmp = narray(a, copy=False).reshape(new_shape, order=order)
return _tmp.view(MaskedArray)
def resize(x, new_shape):
"""
Return a new masked array with the specified size and shape.
This is the masked equivalent of the `numpy.resize` function. The new
array is filled with repeated copies of `x` (in the order that the
data are stored in memory). If `x` is masked, the new array will be
masked, and the new mask will be a repetition of the old one.
See Also
--------
numpy.resize : Equivalent function in the top level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.array([[1, 2] ,[3, 4]])
>>> a[0, 1] = ma.masked
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value = 999999)
>>> np.resize(a, (3, 3))
array([[1, 2, 3],
[4, 1, 2],
[3, 4, 1]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 -- 3]
[4 1 --]
[3 4 1]],
mask =
[[False True False]
[False False True]
[False False False]],
fill_value = 999999)
A MaskedArray is always returned, regardless of the input type.
>>> a = np.array([[1, 2] ,[3, 4]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 2 3]
[4 1 2]
[3 4 1]],
mask =
False,
fill_value = 999999)
"""
# We can't use _frommethods here, as N.resize is notoriously whiny.
m = getmask(x)
if m is not nomask:
m = np.resize(m, new_shape)
result = np.resize(x, new_shape).view(get_masked_subclass(x))
if result.ndim:
result._mask = m
return result
def rank(obj):
"""
maskedarray version of the numpy function.
.. note::
Deprecated since 1.10.0
"""
# 2015-04-12, 1.10.0
warnings.warn(
"`rank` is deprecated; use the `ndim` function instead. ",
np.VisibleDeprecationWarning, stacklevel=2)
return np.ndim(getdata(obj))
rank.__doc__ = np.rank.__doc__
def ndim(obj):
"""
maskedarray version of the numpy function.
"""
return np.ndim(getdata(obj))
ndim.__doc__ = np.ndim.__doc__
def shape(obj):
"maskedarray version of the numpy function."
return np.shape(getdata(obj))
shape.__doc__ = np.shape.__doc__
def size(obj, axis=None):
"maskedarray version of the numpy function."
return np.size(getdata(obj), axis)
size.__doc__ = np.size.__doc__
##############################################################################
# Extra functions #
##############################################################################
def where(condition, x=_NoValue, y=_NoValue):
"""
Return a masked array with elements from x or y, depending on condition.
Returns a masked array, shaped like condition, where the elements
are from `x` when `condition` is True, and from `y` otherwise.
If neither `x` nor `y` are given, the function returns a tuple of
indices where `condition` is True (the result of
``condition.nonzero()``).
Parameters
----------
condition : array_like, bool
The condition to meet. For each True element, yield the corresponding
element from `x`, otherwise from `y`.
x, y : array_like, optional
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
out : MaskedArray or tuple of ndarrays
The resulting masked array if `x` and `y` were given, otherwise
the result of ``condition.nonzero()``.
See Also
--------
numpy.where : Equivalent function in the top-level NumPy module.
Examples
--------
>>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
... [1, 0, 1],
... [0, 1, 0]])
>>> print(x)
[[0.0 -- 2.0]
[-- 4.0 --]
[6.0 -- 8.0]]
>>> np.ma.where(x > 5) # return the indices where x > 5
(array([2, 2]), array([0, 2]))
>>> print(np.ma.where(x > 5, x, -3.1416))
[[-3.1416 -- -3.1416]
[-- -3.1416 --]
[6.0 -- 8.0]]
"""
# handle the single-argument case
missing = (x is _NoValue, y is _NoValue).count(True)
if missing == 1:
raise ValueError("Must provide both 'x' and 'y' or neither.")
if missing == 2:
return nonzero(condition)
# we only care if the condition is true - false or masked pick y
cf = filled(condition, False)
xd = getdata(x)
yd = getdata(y)
# we need the full arrays here for correct final dimensions
cm = getmaskarray(condition)
xm = getmaskarray(x)
ym = getmaskarray(y)
# deal with the fact that masked.dtype == float64, but we don't actually
# want to treat it as that.
if x is masked and y is not masked:
xd = np.zeros((), dtype=yd.dtype)
xm = np.ones((), dtype=ym.dtype)
elif y is masked and x is not masked:
yd = np.zeros((), dtype=xd.dtype)
ym = np.ones((), dtype=xm.dtype)
data = np.where(cf, xd, yd)
mask = np.where(cf, xm, ym)
mask = np.where(cm, np.ones((), dtype=mask.dtype), mask)
# collapse the mask, for backwards compatibility
mask = _shrink_mask(mask)
return masked_array(data, mask=mask)
def choose(indices, choices, out=None, mode='raise'):
"""
Use an index array to construct a new array from a set of choices.
Given an array of integers and a set of n choice arrays, this method
will create a new array that merges each of the choice arrays. Where a
value in `a` is i, the new array will have the value that choices[i]
contains in the same place.
Parameters
----------
a : ndarray of ints
This array must contain integers in ``[0, n-1]``, where n is the
number of choices.
choices : sequence of arrays
Choice arrays. The index array and all of the choices should be
broadcastable to the same shape.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and `dtype`.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' : raise an error
* 'wrap' : wrap around
* 'clip' : clip to the range
Returns
-------
merged_array : array
See Also
--------
choose : equivalent function
Examples
--------
>>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]])
>>> a = np.array([2, 1, 0])
>>> np.ma.choose(a, choice)
masked_array(data = [3 2 1],
mask = False,
fill_value=999999)
"""
def fmask(x):
"Returns the filled array, or True if masked."
if x is masked:
return True
return filled(x)
def nmask(x):
"Returns the mask, True if ``masked``, False if ``nomask``."
if x is masked:
return True
return getmask(x)
# Get the indices.
c = filled(indices, 0)
# Get the masks.
masks = [nmask(x) for x in choices]
data = [fmask(x) for x in choices]
# Construct the mask
outputmask = np.choose(c, masks, mode=mode)
outputmask = make_mask(mask_or(outputmask, getmask(indices)),
copy=0, shrink=True)
# Get the choices.
d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(outputmask)
return out
d.__setmask__(outputmask)
return d
def round_(a, decimals=0, out=None):
"""
Return a copy of a, rounded to 'decimals' places.
When 'decimals' is negative, it specifies the number of positions
to the left of the decimal point. The real and imaginary parts of
complex numbers are rounded separately. Nothing is done if the
array is not of float type and 'decimals' is greater than or equal
to 0.
Parameters
----------
decimals : int
Number of decimals to round to. May be negative.
out : array_like
Existing array to use for output.
If not given, returns a default copy of a.
Notes
-----
If out is given and does not have a mask attribute, the mask of a
is lost!
"""
if out is None:
return np.round_(a, decimals, out)
else:
np.round_(getdata(a), decimals, out)
if hasattr(out, '_mask'):
out._mask = getmask(a)
return out
round = round_
# Needed by dot, so move here from extras.py. It will still be exported
# from extras.py for compatibility.
def mask_rowcols(a, axis=None):
"""
Mask rows and/or columns of a 2D array that contain masked values.
Mask whole rows and/or columns of a 2D array that contain
masked values. The masking behavior is selected using the
`axis` parameter.
- If `axis` is None, rows *and* columns are masked.
- If `axis` is 0, only rows are masked.
- If `axis` is 1 or -1, only columns are masked.
Parameters
----------
a : array_like, MaskedArray
The array to mask. If not a MaskedArray instance (or if no array
elements are masked). The result is a MaskedArray with `mask` set
to `nomask` (False). Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. If None, applies to a
flattened version of the array.
Returns
-------
a : MaskedArray
A modified version of the input array, masked depending on the value
of the `axis` parameter.
Raises
------
NotImplementedError
If input array `a` is not 2D.
See Also
--------
mask_rows : Mask rows of a 2D array that contain masked values.
mask_cols : Mask cols of a 2D array that contain masked values.
masked_where : Mask where a condition is met.
Notes
-----
The input array's mask is modified by this function.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(data =
[[0 0 0]
[0 -- 0]
[0 0 0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=999999)
>>> ma.mask_rowcols(a)
masked_array(data =
[[0 -- 0]
[-- -- --]
[0 -- 0]],
mask =
[[False True False]
[ True True True]
[False True False]],
fill_value=999999)
"""
a = array(a, subok=False)
if a.ndim != 2:
raise NotImplementedError("mask_rowcols works for 2D arrays only.")
m = getmask(a)
# Nothing is masked: return a
if m is nomask or not m.any():
return a
maskedval = m.nonzero()
a._mask = a._mask.copy()
if not axis:
a[np.unique(maskedval[0])] = masked
if axis in [None, 1, -1]:
a[:, np.unique(maskedval[1])] = masked
return a
# Include masked dot here to avoid import problems in getting it from
# extras.py. Note that it is not included in __all__, but rather exported
# from extras in order to avoid backward compatibility problems.
def dot(a, b, strict=False, out=None):
"""
Return the dot product of two arrays.
This function is the equivalent of `numpy.dot` that takes masked values
into account. Note that `strict` and `out` are in different position
than in the method version. In order to maintain compatibility with the
corresponding method, it is recommended that the optional arguments be
treated as keyword only. At some point that may be mandatory.
.. note::
Works only with 2-D arrays at the moment.
Parameters
----------
a, b : masked_array_like
Inputs arrays.
strict : bool, optional
Whether masked data are propagated (True) or set to 0 (False) for
the computation. Default is False. Propagating the mask means that
if a masked value appears in a row or column, the whole row or
column is considered masked.
out : masked_array, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
.. versionadded:: 1.10.2
See Also
--------
numpy.dot : Equivalent function for ndarrays.
Examples
--------
>>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]])
>>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]])
>>> np.ma.dot(a, b)
masked_array(data =
[[21 26]
[45 64]],
mask =
[[False False]
[False False]],
fill_value = 999999)
>>> np.ma.dot(a, b, strict=True)
masked_array(data =
[[-- --]
[-- 64]],
mask =
[[ True True]
[ True False]],
fill_value = 999999)
"""
# !!!: Works only with 2D arrays. There should be a way to get it to run
# with higher dimension
if strict and (a.ndim == 2) and (b.ndim == 2):
a = mask_rowcols(a, 0)
b = mask_rowcols(b, 1)
am = ~getmaskarray(a)
bm = ~getmaskarray(b)
if out is None:
d = np.dot(filled(a, 0), filled(b, 0))
m = ~np.dot(am, bm)
if d.ndim == 0:
d = np.asarray(d)
r = d.view(get_masked_subclass(a, b))
r.__setmask__(m)
return r
else:
d = np.dot(filled(a, 0), filled(b, 0), out._data)
if out.mask.shape != d.shape:
out._mask = np.empty(d.shape, MaskType)
np.dot(am, bm, out._mask)
np.logical_not(out._mask, out._mask)
return out
def inner(a, b):
"""
Returns the inner product of a and b for arrays of floating point types.
Like the generic NumPy equivalent the product sum is over the last dimension
of a and b. The first argument is not conjugated.
"""
fa = filled(a, 0)
fb = filled(b, 0)
if fa.ndim == 0:
fa.shape = (1,)
if fb.ndim == 0:
fb.shape = (1,)
return np.inner(fa, fb).view(MaskedArray)
inner.__doc__ = doc_note(np.inner.__doc__,
"Masked values are replaced by 0.")
innerproduct = inner
def outer(a, b):
"maskedarray version of the numpy function."
fa = filled(a, 0).ravel()
fb = filled(b, 0).ravel()
d = np.outer(fa, fb)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
return masked_array(d)
ma = getmaskarray(a)
mb = getmaskarray(b)
m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0)
return masked_array(d, mask=m)
outer.__doc__ = doc_note(np.outer.__doc__,
"Masked values are replaced by 0.")
outerproduct = outer
def _convolve_or_correlate(f, a, v, mode, propagate_mask):
"""
Helper function for ma.correlate and ma.convolve
"""
if propagate_mask:
# results which are contributed to by either item in any pair being invalid
mask = (
f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode)
| f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode)
)
data = f(getdata(a), getdata(v), mode=mode)
else:
# results which are not contributed to by any pair of valid elements
mask = ~f(~getmaskarray(a), ~getmaskarray(v))
data = f(filled(a, 0), filled(v, 0), mode=mode)
return masked_array(data, mask=mask)
def correlate(a, v, mode='valid', propagate_mask=True):
"""
Cross-correlation of two 1-dimensional sequences.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `np.convolve` docstring. Note that the default
is 'valid', unlike `convolve`, which uses 'full'.
propagate_mask : bool
If True, then a result element is masked if any masked element contributes towards it.
If False, then a result element is only masked if no non-masked element
contribute towards it
Returns
-------
out : MaskedArray
Discrete cross-correlation of `a` and `v`.
See Also
--------
numpy.correlate : Equivalent function in the top-level NumPy module.
"""
return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask)
def convolve(a, v, mode='full', propagate_mask=True):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `np.convolve` docstring.
propagate_mask : bool
If True, then if any masked element is included in the sum for a result
element, then the result is masked.
If False, then the result element is only masked if no non-masked cells
contribute towards it
Returns
-------
out : MaskedArray
Discrete, linear convolution of `a` and `v`.
See Also
--------
numpy.convolve : Equivalent function in the top-level NumPy module.
"""
return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask)
def allequal(a, b, fill_value=True):
"""
Return True if all entries of a and b are equal, using
fill_value as a truth value where either or both are masked.
Parameters
----------
a, b : array_like
Input arrays to compare.
fill_value : bool, optional
Whether masked values in a or b are considered equal (True) or not
(False).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN,
then False is returned.
See Also
--------
all, any
numpy.ma.allclose
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value=1e+20)
>>> b = array([1e10, 1e-7, -42.0])
>>> b
array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01])
>>> ma.allequal(a, b, fill_value=False)
False
>>> ma.allequal(a, b)
True
"""
m = mask_or(getmask(a), getmask(b))
if m is nomask:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
return d.all()
elif fill_value:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
dm = array(d, mask=m, copy=False)
return dm.filled(True).all(None)
else:
return False
def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
This function is equivalent to `allclose` except that masked values
are treated as equal (default) or unequal, depending on the `masked_equal`
argument.
Parameters
----------
a, b : array_like
Input arrays to compare.
masked_equal : bool, optional
Whether masked values in `a` and `b` are considered equal (True) or not
(False). They are considered equal by default.
rtol : float, optional
Relative tolerance. The relative difference is equal to ``rtol * b``.
Default is 1e-5.
atol : float, optional
Absolute tolerance. The absolute difference is equal to `atol`.
Default is 1e-8.
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any
numpy.allclose : the non-masked `allclose`.
Notes
-----
If the following equation is element-wise True, then `allclose` returns
True::
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Return True if all elements of `a` and `b` are equal subject to
given tolerances.
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value = 1e+20)
>>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
False
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
Masked values are not compared directly.
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
"""
x = masked_array(a, copy=False)
y = masked_array(b, copy=False)
# make sure y is an inexact type to avoid abs(MIN_INT); will cause
# casting of x later.
dtype = np.result_type(y, 1.)
if y.dtype != dtype:
y = masked_array(y, dtype=dtype, copy=False)
m = mask_or(getmask(x), getmask(y))
xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False)
# If we have some infs, they should fall at the same place.
if not np.all(xinf == filled(np.isinf(y), False)):
return False
# No infs at all
if not np.any(xinf):
d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)),
masked_equal)
return np.all(d)
if not np.all(filled(x[xinf] == y[xinf], masked_equal)):
return False
x = x[~xinf]
y = y[~xinf]
d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)),
masked_equal)
return np.all(d)
def asarray(a, dtype=None, order=None):
"""
Convert the input to a masked array of the given data-type.
No copy is performed if the input is already an `ndarray`. If `a` is
a subclass of `MaskedArray`, a base class `MaskedArray` is returned.
Parameters
----------
a : array_like
Input data, in any form that can be converted to a masked array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists, ndarrays and masked arrays.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
Masked array interpretation of `a`.
See Also
--------
asanyarray : Similar to `asarray`, but conserves subclasses.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
order = order or 'C'
return masked_array(a, dtype=dtype, copy=False, keep_mask=True,
subok=False, order=order)
def asanyarray(a, dtype=None):
"""
Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
# workaround for #8666, to preserve identity. Ideally the bottom line
# would handle this for us.
if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype):
return a
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)
##############################################################################
# Pickling #
##############################################################################
def _pickle_warn(method):
# NumPy 1.15.0, 2017-12-10
warnings.warn(
"np.ma.{method} is deprecated, use pickle.{method} instead"
.format(method=method),
DeprecationWarning,
stacklevel=3)
def dump(a, F):
"""
Pickle a masked array to a file.
This is a wrapper around ``cPickle.dump``.
Parameters
----------
a : MaskedArray
The array to be pickled.
F : str or file-like object
The file to pickle `a` to. If a string, the full path to the file.
"""
_pickle_warn('dump')
if not hasattr(F, 'readline'):
with open(F, 'w') as F:
pickle.dump(a, F)
else:
pickle.dump(a, F)
def dumps(a):
"""
Return a string corresponding to the pickling of a masked array.
This is a wrapper around ``cPickle.dumps``.
Parameters
----------
a : MaskedArray
The array for which the string representation of the pickle is
returned.
"""
_pickle_warn('dumps')
return pickle.dumps(a)
def load(F):
"""
Wrapper around ``cPickle.load`` which accepts either a file-like object
or a filename.
Parameters
----------
F : str or file
The file or file name to load.
See Also
--------
dump : Pickle an array
Notes
-----
This is different from `numpy.load`, which does not use cPickle but loads
the NumPy binary .npy format.
"""
_pickle_warn('load')
if not hasattr(F, 'readline'):
with open(F, 'r') as F:
return pickle.load(F)
else:
return pickle.load(F)
def loads(strg):
"""
Load a pickle from the current string.
The result of ``cPickle.loads(strg)`` is returned.
Parameters
----------
strg : str
The string to load.
See Also
--------
dumps : Return a string corresponding to the pickling of a masked array.
"""
_pickle_warn('loads')
return pickle.loads(strg)
def fromfile(file, dtype=float, count=-1, sep=''):
raise NotImplementedError(
"fromfile() not yet implemented for a MaskedArray.")
def fromflex(fxarray):
"""
Build a masked array from a suitable flexible-type array.
The input array has to have a data-type with ``_data`` and ``_mask``
fields. This type of array is output by `MaskedArray.toflex`.
Parameters
----------
fxarray : ndarray
The structured input array, containing ``_data`` and ``_mask``
fields. If present, other fields are discarded.
Returns
-------
result : MaskedArray
The constructed masked array.
See Also
--------
MaskedArray.toflex : Build a flexible-type array from a masked array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4)
>>> rec = x.toflex()
>>> rec
array([[(0, False), (1, True), (2, False)],
[(3, True), (4, False), (5, True)],
[(6, False), (7, True), (8, False)]],
dtype=[('_data', '<i4'), ('_mask', '|b1')])
>>> x2 = np.ma.fromflex(rec)
>>> x2
masked_array(data =
[[0 -- 2]
[-- 4 --]
[6 -- 8]],
mask =
[[False True False]
[ True False True]
[False True False]],
fill_value = 999999)
Extra fields can be present in the structured array but are discarded:
>>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]
>>> rec2 = np.zeros((2, 2), dtype=dt)
>>> rec2
array([[(0, False, 0.0), (0, False, 0.0)],
[(0, False, 0.0), (0, False, 0.0)]],
dtype=[('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')])
>>> y = np.ma.fromflex(rec2)
>>> y
masked_array(data =
[[0 0]
[0 0]],
mask =
[[False False]
[False False]],
fill_value = 999999)
"""
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
class _convert2ma(object):
"""
Convert functions from numpy to numpy.ma.
Parameters
----------
_methodname : string
Name of the method to transform.
"""
__doc__ = None
def __init__(self, funcname, params=None):
self._func = getattr(np, funcname)
self.__doc__ = self.getdoc()
self._extras = params or {}
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
doc = getattr(self._func, '__doc__', None)
sig = get_object_signature(self._func)
if doc:
# Add the signature of the function at the beginning of the doc
if sig:
sig = "%s%s\n" % (self._func.__name__, sig)
doc = sig + doc
return doc
def __call__(self, *args, **params):
# Find the common parameters to the call and the definition
_extras = self._extras
common_params = set(params).intersection(_extras)
# Drop the common parameters from the call
for p in common_params:
_extras[p] = params.pop(p)
# Get the result
result = self._func.__call__(*args, **params).view(MaskedArray)
if "fill_value" in common_params:
result.fill_value = _extras.get("fill_value", None)
if "hardmask" in common_params:
result._hardmask = bool(_extras.get("hard_mask", False))
return result
arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False))
clip = np.clip
diff = np.diff
empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False))
empty_like = _convert2ma('empty_like')
frombuffer = _convert2ma('frombuffer')
fromfunction = _convert2ma('fromfunction')
identity = _convert2ma(
'identity', params=dict(fill_value=None, hardmask=False))
indices = np.indices
ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False))
ones_like = np.ones_like
squeeze = np.squeeze
zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False))
zeros_like = np.zeros_like
def append(a, b, axis=None):
"""Append values to the end of an array.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Values are appended to a copy of this array.
b : array_like
These values are appended to a copy of `a`. It must be of the
correct shape (the same shape as `a`, excluding `axis`). If `axis`
is not specified, `b` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `v` are appended. If `axis` is not given,
both `a` and `b` are flattened before use.
Returns
-------
append : MaskedArray
A copy of `a` with `b` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, the result is a flattened array.
See Also
--------
numpy.append : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_values([1, 2, 3], 2)
>>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
>>> print(ma.append(a, b))
[1 -- 3 4 5 6 -- 8 9]
"""
return concatenate([a, b], axis)
|
{
"content_hash": "86e4711028822efbdf5ac66d5b3d3b2d",
"timestamp": "",
"source": "github",
"line_count": 8146,
"max_line_length": 94,
"avg_line_length": 31.622391357721582,
"alnum_prop": 0.5443834531592106,
"repo_name": "ryfeus/lambda-packs",
"id": "a8cf02336083dfdcf3410597f4f43cd01ef4f425",
"size": "257596",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Tensorflow_LightGBM_Scipy_nightly/source/numpy/ma/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphicalLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphicalLasso setting the sparsity of the model is
set by internal cross-validation in the GraphicalLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphicalLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
# #############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(
n_features, alpha=0.98, smallest_coef=0.4, largest_coef=0.7, random_state=prng
)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
# #############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphicalLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
# #############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [
("Empirical", emp_cov),
("Ledoit-Wolf", lw_cov_),
("GraphicalLassoCV", cov_),
("True", cov),
]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(
this_cov, interpolation="nearest", vmin=-vmax, vmax=vmax, cmap=plt.cm.RdBu_r
)
plt.xticks(())
plt.yticks(())
plt.title("%s covariance" % name)
# plot the precisions
precs = [
("Empirical", linalg.inv(emp_cov)),
("Ledoit-Wolf", lw_prec_),
("GraphicalLasso", prec_),
("True", prec),
]
vmax = 0.9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(
np.ma.masked_equal(this_prec, 0),
interpolation="nearest",
vmin=-vmax,
vmax=vmax,
cmap=plt.cm.RdBu_r,
)
plt.xticks(())
plt.yticks(())
plt.title("%s precision" % name)
if hasattr(ax, "set_facecolor"):
ax.set_facecolor(".7")
else:
ax.set_axis_bgcolor(".7")
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([0.2, 0.15, 0.75, 0.7])
plt.plot(model.cv_results_["alphas"], model.cv_results_["mean_score"], "o-")
plt.axvline(model.alpha_, color=".5")
plt.title("Model selection")
plt.ylabel("Cross-validation score")
plt.xlabel("alpha")
plt.show()
|
{
"content_hash": "3525555ac2a772bd3c86ff4cd4f299b7",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 84,
"avg_line_length": 34.09933774834437,
"alnum_prop": 0.6801320644785396,
"repo_name": "sergeyf/scikit-learn",
"id": "8670ed4e061951125c2abf46f0d0c0f222319992",
"size": "5149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/covariance/plot_sparse_cov.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "718114"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Python",
"bytes": "9906683"
},
{
"name": "Shell",
"bytes": "49565"
}
],
"symlink_target": ""
}
|
import errno
import os
import re
import subprocess
import sys
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = ""
parentdir_prefix = "borgweb-"
versionfile_source = "borgweb/_version.py"
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full": keywords["full"].strip()}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full": keywords["full"].strip()}
def git_parse_vcs_describe(git_describe, tag_prefix, verbose=False):
# TAG-NUM-gHEX[-dirty] or HEX[-dirty] . TAG might have hyphens.
# dirty
dirty = git_describe.endswith("-dirty")
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
dirty_suffix = ".dirty" if dirty else ""
# now we have TAG-NUM-gHEX or HEX
if "-" not in git_describe: # just HEX
return "0+untagged.g"+git_describe+dirty_suffix, dirty
# just TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
return "0+unparseable"+dirty_suffix, dirty
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
return None, dirty
tag = full_tag[len(tag_prefix):]
# distance: number of commits since tag
distance = int(mo.group(2))
# commit: short hex revision ID
commit = mo.group(3)
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+NUM.gHEX[.dirty]] . Note that if you get a
# tagged build and then dirty it, you'll get TAG+0.gHEX.dirty . So you
# can always test version.endswith(".dirty").
version = tag
if distance or dirty:
version += "+%d.g%s" % (distance, commit) + dirty_suffix
return version, dirty
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {} # get_versions() will try next method
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
stdout = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if stdout is None:
return {} # try next method
version, dirty = git_parse_vcs_describe(stdout, tag_prefix, verbose)
# build "full", which is FULLHEX[.dirty]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if dirty:
full += ".dirty"
return {"version": version, "full": full}
def get_versions(default={"version": "0+unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = {"refnames": git_refnames, "full": git_full}
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
|
{
"content_hash": "5d7d86f4dad85c6cb43074a69115cec1",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 78,
"avg_line_length": 37.77292576419214,
"alnum_prop": 0.5942196531791908,
"repo_name": "pguth/borgweb",
"id": "006b08d571968a5344fc20ec3f845d6193fbf3e6",
"size": "9125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "borgweb/_version.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1374"
},
{
"name": "HTML",
"bytes": "4796"
},
{
"name": "JavaScript",
"bytes": "741691"
},
{
"name": "Python",
"bytes": "59759"
}
],
"symlink_target": ""
}
|
import json
from django.http import HttpResponseBadRequest
from corehq.apps.app_manager.exceptions import (
ScheduleError,
)
from dimagi.utils.web import json_response
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.app_manager.models import (
FormSchedule,
)
from corehq.apps.app_manager.decorators import no_conflict_require_POST, \
require_can_edit_apps
@no_conflict_require_POST
@require_can_edit_apps
def edit_schedule_phases(request, domain, app_id, module_id):
NEW_PHASE_ID = -1
app = get_app(domain, app_id)
module = app.get_module(module_id)
phases = json.loads(request.POST.get('phases'))
changed_anchors = [(phase['id'], phase['anchor'])
for phase in phases if phase['id'] != NEW_PHASE_ID]
all_anchors = [phase['anchor'] for phase in phases]
enabled = json.loads(request.POST.get('has_schedule'))
try:
module.update_schedule_phase_anchors(changed_anchors)
module.update_schedule_phases(all_anchors)
module.has_schedule = enabled
except ScheduleError as e:
return HttpResponseBadRequest(unicode(e))
response_json = {}
app.save(response_json)
return json_response(response_json)
@no_conflict_require_POST
@require_can_edit_apps
def edit_visit_schedule(request, domain, app_id, module_id, form_id):
app = get_app(domain, app_id)
module = app.get_module(module_id)
form = module.get_form(form_id)
json_loads = json.loads(request.POST.get('schedule'))
enabled = json_loads.pop('enabled')
anchor = json_loads.pop('anchor')
schedule_form_id = json_loads.pop('schedule_form_id')
if enabled:
try:
phase, is_new_phase = module.get_or_create_schedule_phase(anchor=anchor)
except ScheduleError as e:
return HttpResponseBadRequest(unicode(e))
form.schedule_form_id = schedule_form_id
form.schedule = FormSchedule.wrap(json_loads)
phase.add_form(form)
else:
try:
form.disable_schedule()
except ScheduleError:
pass
response_json = {}
app.save(response_json)
return json_response(response_json)
def get_schedule_context(form):
from corehq.apps.app_manager.models import SchedulePhase
schedule_context = {}
module = form.get_module()
if not form.schedule:
# Forms created before the scheduler module existed don't have this property
# so we need to add it so everything works.
form.schedule = FormSchedule(enabled=False)
schedule_context.update({
'all_schedule_phase_anchors': [phase.anchor for phase in module.get_schedule_phases()],
'schedule_form_id': form.schedule_form_id,
})
if module.has_schedule:
phase = form.get_phase()
if phase is not None:
schedule_context.update({'schedule_phase': phase})
else:
schedule_context.update({'schedule_phase': SchedulePhase(anchor='')})
return schedule_context
|
{
"content_hash": "aebc1d62c5078b604fdb266be960f9ff",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 95,
"avg_line_length": 32.869565217391305,
"alnum_prop": 0.6689814814814815,
"repo_name": "qedsoftware/commcare-hq",
"id": "df401472e92fe95ae10e7c72925b56fb3fb3f602",
"size": "3024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/app_manager/views/schedules.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
}
|
"""
Test model creation with custom fields
"""
from django.db import models
from django.test import TestCase
from django_any import any_model
class MySlugField(models.SlugField):
pass
class ModelWithCustomField(models.Model):
slug = MySlugField()
class Meta:
app_label = 'django_any'
class CustomFieldsTest(TestCase):
def test_created_model_with_custom_field(self):
model = any_model(ModelWithCustomField)
self.assertEqual(type(model), ModelWithCustomField)
self.assertEqual(len(model._meta.fields), len(ModelWithCustomField._meta.local_fields))
self.assertTrue(model.slug)
self.assertTrue(isinstance(model.slug, basestring))
|
{
"content_hash": "6872a0d228beb1978f6583768026b536",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 95,
"avg_line_length": 24.964285714285715,
"alnum_prop": 0.7224606580829757,
"repo_name": "softak/webfaction_demo",
"id": "57a2a619b8d2b48d4d8882c49beb33588bb33729",
"size": "723",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/django_any/tests/model_customfield.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "33283"
},
{
"name": "JavaScript",
"bytes": "984889"
},
{
"name": "Python",
"bytes": "8055804"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
}
|
import libqtile.config
from libqtile.widget.check_updates import CheckUpdates, Popen # noqa: F401
from test.widgets.conftest import FakeBar
wrong_distro = "Barch"
good_distro = "Arch"
cmd_0_line = "export toto" # quick "monkeypatch" simulating 0 output, ie 0 update
cmd_1_line = "echo toto" # quick "monkeypatch" simulating 1 output, ie 1 update
cmd_error = "false"
nus = "No Update Available"
# This class returns None when first polled (to simulate that the task is still running)
# and then 0 on the second call.
class MockPopen:
def __init__(self, *args, **kwargs):
self.call_count = 0
def poll(self):
if self.call_count == 0:
self.call_count += 1
return None
return 0
# Bit of an ugly hack to replicate the above functionality but for a method.
class MockSpawn:
call_count = 0
@classmethod
def call_process(cls, *args, **kwargs):
if cls.call_count == 0:
cls.call_count += 1
return "Updates"
return ""
def test_unknown_distro():
"""test an unknown distribution"""
cu = CheckUpdates(distro=wrong_distro)
text = cu.poll()
assert text == "N/A"
def test_update_available(fake_qtile, fake_window):
"""test output with update (check number of updates and color)"""
cu2 = CheckUpdates(
distro=good_distro, custom_command=cmd_1_line, colour_have_updates="#123456"
)
fakebar = FakeBar([cu2], window=fake_window)
cu2._configure(fake_qtile, fakebar)
text = cu2.poll()
assert text == "Updates: 1"
assert cu2.layout.colour == cu2.colour_have_updates
def test_no_update_available_without_no_update_string(fake_qtile, fake_window):
"""test output with no update (without dedicated string nor color)"""
cu3 = CheckUpdates(distro=good_distro, custom_command=cmd_0_line)
fakebar = FakeBar([cu3], window=fake_window)
cu3._configure(fake_qtile, fakebar)
text = cu3.poll()
assert text == ""
def test_no_update_available_with_no_update_string_and_color_no_updates(fake_qtile, fake_window):
"""test output with no update (with dedicated string and color)"""
cu4 = CheckUpdates(
distro=good_distro,
custom_command=cmd_0_line,
no_update_string=nus,
colour_no_updates="#654321",
)
fakebar = FakeBar([cu4], window=fake_window)
cu4._configure(fake_qtile, fakebar)
text = cu4.poll()
assert text == nus
assert cu4.layout.colour == cu4.colour_no_updates
def test_update_available_with_restart_indicator(monkeypatch, fake_qtile, fake_window):
"""test output with no indicator where restart needed"""
cu5 = CheckUpdates(
distro=good_distro,
custom_command=cmd_1_line,
restart_indicator="*",
)
monkeypatch.setattr("os.path.exists", lambda x: True)
fakebar = FakeBar([cu5], window=fake_window)
cu5._configure(fake_qtile, fakebar)
text = cu5.poll()
assert text == "Updates: 1*"
def test_update_available_with_execute(manager_nospawn, minimal_conf_noscreen, monkeypatch):
"""test polling after executing command"""
# Use monkeypatching to patch both Popen (for execute command) and call_process
# This class returns None when first polled (to simulate that the task is still running)
# and then 0 on the second call.
class MockPopen:
def __init__(self, *args, **kwargs):
self.call_count = 0
def poll(self):
if self.call_count == 0:
self.call_count += 1
return None
return 0
# Bit of an ugly hack to replicate the above functionality but for a method.
class MockSpawn:
call_count = 0
@classmethod
def call_process(cls, *args, **kwargs):
if cls.call_count == 0:
cls.call_count += 1
return "Updates"
return ""
cu6 = CheckUpdates(
distro=good_distro,
custom_command="dummy",
execute="dummy",
no_update_string=nus,
)
# Patch the necessary object
monkeypatch.setattr(cu6, "call_process", MockSpawn.call_process)
monkeypatch.setattr("libqtile.widget.check_updates.Popen", MockPopen)
config = minimal_conf_noscreen
config.screens = [libqtile.config.Screen(top=libqtile.bar.Bar([cu6], 10))]
manager_nospawn.start(config)
topbar = manager_nospawn.c.bar["top"]
assert topbar.info()["widgets"][0]["text"] == "Updates: 1"
# Clicking the widget triggers the execute command
topbar.fake_button_press(0, "top", 0, 0, button=1)
# The second time we poll the widget, the update process is complete
# and there are no more updates
_, result = manager_nospawn.c.widget["checkupdates"].eval("self.poll()")
assert result == nus
def test_update_process_error(fake_qtile, fake_window):
"""test output where update check gives error"""
cu7 = CheckUpdates(
distro=good_distro,
custom_command=cmd_error,
no_update_string="ERROR",
)
fakebar = FakeBar([cu7], window=fake_window)
cu7._configure(fake_qtile, fakebar)
text = cu7.poll()
assert text == "ERROR"
def test_line_truncations(fake_qtile, monkeypatch, fake_window):
"""test update count is reduced"""
# Mock output to return 5 lines of text
def mock_process(*args, **kwargs):
return "1\n2\n3\n4\n5\n"
# Fedora is set up to remove 1 from line count
cu8 = CheckUpdates(distro="Fedora")
monkeypatch.setattr(cu8, "call_process", mock_process)
fakebar = FakeBar([cu8], window=fake_window)
cu8._configure(fake_qtile, fakebar)
text = cu8.poll()
# Should have 4 updates
assert text == "Updates: 4"
|
{
"content_hash": "f013fb014d014206938e0465adedb753",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 97,
"avg_line_length": 31.635359116022098,
"alnum_prop": 0.6466992665036675,
"repo_name": "ramnes/qtile",
"id": "78c88de862724a1129044e040f7ef85b7d61bd54",
"size": "5726",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/widgets/test_check_updates.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "535"
},
{
"name": "Python",
"bytes": "2135461"
},
{
"name": "Shell",
"bytes": "8090"
}
],
"symlink_target": ""
}
|
"""
Global settings:
Those which are typically edited during a deployment are in
000_config.py & their results parsed into here. Deployers
shouldn't typically need to edit any settings here.
"""
# Keep all our configuration options off the main global variables
# Use response.s3 for one-off variables which are visible in views without explicit passing
s3.formats = Storage()
# Workaround for this Bug in Selenium with FF4:
# http://code.google.com/p/selenium/issues/detail?id=1604
s3.interactive = settings.get_ui_confirm()
s3.base_url = "%s/%s" % (settings.get_base_public_url(),
appname)
s3.download_url = "%s/default/download" % s3.base_url
###############
# Client tests
###############
# Check whether browser is Mobile & store result in session
# - commented-out until we make use of it
#if session.s3.mobile is None:
# session.s3.mobile = s3base.s3_is_mobile_client(request)
#if session.s3.browser is None:
# session.s3.browser = s3base.s3_populate_browser_compatibility(request)
##################
# Global variables
##################
# Interactive view formats
s3.interactive_view_formats = ("html", "popup", "iframe")
# Strings to i18n
messages["UNAUTHORISED"] = "Not authorised!"
messages["BADFORMAT"] = "Unsupported data format!"
messages["BADMETHOD"] = "Unsupported method!"
messages["BADRECORD"] = "Record not found!"
messages["INVALIDREQUEST"] = "Invalid request!"
messages["XLWT_ERROR"] = "xlwt module not available within the running Python - this needs installing for XLS output!"
messages["REPORTLAB_ERROR"] = "ReportLab module not available within the running Python - this needs installing for PDF output!"
# Common Labels
#messages["BREADCRUMB"] = ">> "
messages["UNKNOWN_OPT"] = "Unknown"
messages["NONE"] = "-"
messages["READ"] = settings.get_ui_read_label()
messages["UPDATE"] = settings.get_ui_update_label()
messages["DELETE"] = "Delete"
messages["COPY"] = "Copy"
messages["NOT_APPLICABLE"] = "N/A"
messages["ADD_PERSON"] = "Add Person"
messages["ADD_LOCATION"] = "Add Location"
messages["SELECT_LOCATION"] = "Select a location"
for u in messages:
if isinstance(messages[u], str):
globals()[u] = T(messages[u])
# Pass to CRUD
s3mgr.LABEL["READ"] = READ
s3mgr.LABEL["UPDATE"] = UPDATE
s3mgr.LABEL["DELETE"] = DELETE
s3mgr.LABEL["COPY"] = COPY
# To get included in <HEAD>
s3.stylesheets = []
s3.external_stylesheets = []
# To get included at the end of <BODY>
s3.scripts = []
s3.js_global = []
s3.jquery_ready = []
###########
# Languages
###########
s3.l10n_languages = settings.get_L10n_languages()
# Default strings are in US English
T.current_languages = ["en", "en-us"]
# Check if user has selected a specific language
if request.vars._language:
language = request.vars._language
session.s3.language = language
elif session.s3.language:
# Use the last-selected language
language = session.s3.language
elif auth.is_logged_in():
# Use user preference
language = auth.user.language
else:
# Use system default
language = settings.get_L10n_default_language()
#else:
# # Use what browser requests (default web2py behaviour)
# T.force(T.http_accept_language)
# IE doesn't set request.env.http_accept_language
#if language != "en":
T.force(language)
# Store for views (e.g. Ext)
if language.find("-") == -1:
# Ext peculiarities
if language == "vi":
s3.language = "vn"
elif language == "el":
s3.language = "el_GR"
else:
s3.language = language
else:
lang_parts = language.split("-")
s3.language = "%s_%s" % (lang_parts[0], lang_parts[1].upper())
# List of Languages which use a Right-to-Left script (Arabic, Hebrew, Farsi, Urdu)
s3_rtl_languages = ["ur", "ar"]
if T.accepted_language in s3_rtl_languages:
s3.rtl = True
else:
s3.rtl = False
######
# Auth
######
_settings = auth.settings
_settings.lock_keys = False
_settings.password_min_length = 4
_settings.expiration = 28800 # seconds
_settings.facebook = settings.get_auth_facebook()
_settings.google = settings.get_auth_google()
if settings.get_auth_openid():
# Requires http://pypi.python.org/pypi/python-openid/
try:
from gluon.contrib.login_methods.openid_auth import OpenIDAuth
openid_login_form = OpenIDAuth(auth)
from gluon.contrib.login_methods.extended_login_form import ExtendedLoginForm
extended_login_form = ExtendedLoginForm(auth, openid_login_form,
signals=["oid", "janrain_nonce"])
auth.settings.login_form = extended_login_form
except ImportError:
session.warning = T("Library support not available for OpenID")
# Allow use of LDAP accounts for login
# NB Currently this means that change password should be disabled:
#_settings.actions_disabled.append("change_password")
# (NB These are not automatically added to PR or to Authenticated role since they enter via the login() method not register())
#from gluon.contrib.login_methods.ldap_auth import ldap_auth
# Require even alternate login methods to register users 1st
#_settings.alternate_requires_registration = True
# Active Directory
#_settings.login_methods.append(ldap_auth(mode="ad", server="dc.domain.org", base_dn="ou=Users,dc=domain,dc=org"))
# or if not wanting local users at all (no passwords saved within DB):
#_settings.login_methods = [ldap_auth(mode="ad", server="dc.domain.org", base_dn="ou=Users,dc=domain,dc=org")]
# Domino
#_settings.login_methods.append(ldap_auth(mode="domino", server="domino.domain.org"))
# OpenLDAP
#_settings.login_methods.append(ldap_auth(server="directory.sahanafoundation.org", base_dn="ou=users,dc=sahanafoundation,dc=org"))
# Allow use of Email accounts for login
#_settings.login_methods.append(email_auth("smtp.gmail.com:587", "@gmail.com"))
# Require captcha verification for registration
#auth.settings.captcha = RECAPTCHA(request, public_key="PUBLIC_KEY", private_key="PRIVATE_KEY")
# Require Email Verification
_settings.registration_requires_verification = settings.get_auth_registration_requires_verification()
_settings.on_failed_authorization = URL(c="default", f="user",
args="not_authorized")
_settings.reset_password_requires_verification = True
_settings.verify_email_next = URL(c="default", f="index")
# Notify Approver of new pending user registration. Action may be required.
_settings.verify_email_onaccept = auth.s3_verify_email_onaccept
# Auth Messages
_messages = auth.messages
_messages.verify_email = "Click on the link %(url)s%(key)s to verify your email" % \
dict(url="%s/default/user/verify_email/" % s3.base_url,
key="%(key)s")
_messages.verify_email_subject = "%(system_name)s - Verify Email" % \
{"system_name" : settings.get_system_name()}
_messages.reset_password = "%s %s/default/user/reset_password/%s %s" % \
(T("Click on the link"),
s3.base_url,
"%(key)s",
T("to reset your password"))
_messages.help_mobile_phone = T("Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.")
# Require Admin approval for self-registered users
_settings.registration_requires_approval = settings.get_auth_registration_requires_approval()
_messages.registration_pending = "Registration is still pending approval from Approver (%s) - please wait until confirmation received." % \
settings.get_mail_approver()
_messages.registration_pending_approval = "Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated." % \
settings.get_mail_approver()
_messages["approve_user"] = \
"""Your action is required to approve a New User for %(system_name)s:
%(name_format)s
Please go to %(base_url)s/admin/user to approve this user.""" \
% dict(system_name = settings.get_system_name(),
name_format = \
"""%(first_name)s %(last_name)s
%(email)s""",
base_url = s3.base_url)
_messages["new_user"] = \
"""A New User has registered for %(system_name)s:
%(name_format)s
No action is required.""" \
% dict(system_name = settings.get_system_name(),
name_format = \
"""%(first_name)s %(last_name)s
%(email)s""")
# We don't wish to clutter the groups list with 1 per user.
_settings.create_user_groups = False
# We need to allow basic logins for Webservices
_settings.allow_basic_login = True
_settings.logout_onlogout = s3_auth_on_logout
_settings.login_onaccept = s3_auth_on_login
_settings.login_next = settings.get_auth_login_next()
if settings.get_auth_registration_volunteer() and \
settings.has_module("vol"):
_settings.register_next = URL(c="vol", f="person")
# Default Language for authenticated users
_settings.table_user.language.default = settings.get_L10n_default_language()
# Languages available in User Profiles
field = _settings.table_user.language
if len(s3.l10n_languages) > 1:
field.requires = IS_IN_SET(s3.l10n_languages,
zero=None)
else:
field.default = s3.l10n_languages.keys()[0]
field.readable = False
field.writable = False
_settings.lock_keys = True
######
# Mail
######
# These settings could be made configurable as part of the Messaging Module
# - however also need to be used by Auth (order issues), DB calls are overheads
# - as easy for admin to edit source here as to edit DB (although an admin panel can be nice)
mail.settings.server = settings.get_mail_server()
mail.settings.tls = settings.get_mail_server_tls()
mail_server_login = settings.get_mail_server_login()
if mail_server_login:
mail.settings.login = mail_server_login
mail.settings.sender = settings.get_mail_sender()
# Email settings for registration verification
_settings.mailer = mail
#########
# Session
#########
# Custom Notifications
response.error = session.error
response.confirmation = session.confirmation
response.information = session.information
response.warning = session.warning
session.error = []
session.confirmation = []
session.information = []
session.warning = []
# Shortcuts for system role IDs, see modules/s3aaa.py/AuthS3
system_roles = auth.get_system_roles()
ADMIN = system_roles.ADMIN
AUTHENTICATED = system_roles.AUTHENTICATED
ANONYMOUS = system_roles.ANONYMOUS
EDITOR = system_roles.EDITOR
MAP_ADMIN = system_roles.MAP_ADMIN
ORG_ADMIN = system_roles.ORG_ADMIN
if s3.debug:
# Add the developer toolbar from modules/s3/s3utils.py
s3.toolbar = s3base.s3_dev_toolbar
######
# CRUD
######
def s3_formstyle(id, label, widget, comment, hidden=False):
"""
Provide the Sahana Eden Form Style
Label above the Inputs:
http://uxmovement.com/design-articles/faster-with-top-aligned-labels
"""
row = []
if hidden:
_class = "hide"
else:
_class = ""
# Label on the 1st row
row.append(TR(TD(label, _class="w2p_fl"), TD(""), _id=id + "1", _class=_class))
# Widget & Comment on the 2nd Row
row.append(TR(widget, TD(comment, _class="w2p_fc"), _id=id, _class=_class))
return tuple(row)
s3_formstyle_mobile = s3_formstyle
_crud = s3.crud
_crud.formstyle = s3_formstyle
_crud.submit_button = T("Save")
# Optional class for Submit buttons
#_crud.submit_style = "submit-button"
_crud.confirm_delete = T("Do you really want to delete these records?")
_crud.archive_not_delete = settings.get_security_archive_not_delete()
_crud.navigate_away_confirm = settings.get_ui_navigate_away_confirm()
# Web2py Crud
# Breaks refresh of List after Create: http://groups.google.com/group/web2py/browse_thread/thread/d5083ed08c685e34
#crud.settings.keepvalues = True
crud.messages.submit_button = s3.crud.submit_button
crud.settings.formstyle = s3.crud.formstyle
##################
# XML/JSON Formats
##################
s3mgr.crud = s3base.S3CRUD
s3mgr.search = s3base.S3Search
# Content Type Headers, default is application/xml for XML formats
# and text/x-json for JSON formats, other content types must be
# specified here:
s3mgr.content_type = Storage(
tc = "application/atom+xml", # TableCast feeds
rss = "application/rss+xml", # RSS
georss = "application/rss+xml", # GeoRSS
kml = "application/vnd.google-earth.kml+xml", # KML
)
# JSON Formats
s3mgr.json_formats = ["geojson", "s3json"]
# CSV Formats
s3mgr.csv_formats = ["hrf", "s3csv"]
s3mgr.ROWSPERPAGE = 20
#######
# Menus
#######
# Import menus and layouts
from eden.layouts import *
import eden.menus as default_menus
S3MainMenu = default_menus.S3MainMenu
S3OptionsMenu = default_menus.S3OptionsMenu
current.menu = Storage(options=None, override={})
if auth.permission.format in ("html"):
menus = "applications.%s.private.templates.%s.menus" % \
(appname, settings.get_theme())
try:
exec("import %s as deployment_menus" % menus)
except ImportError:
pass
else:
if "S3MainMenu" in deployment_menus.__dict__:
S3MainMenu = deployment_menus.S3MainMenu
if "S3OptionsMenu" in deployment_menus.__dict__:
S3OptionsMenu = deployment_menus.S3OptionsMenu
main = S3MainMenu.menu()
else:
main = None
menu = current.menu
menu["main"] = main
# Override controller menus
# @todo: replace by current.menu.override
s3_menu_dict = {}
##########
# Messages
##########
s3.messages = Messages(T)
system_name = settings.get_system_name_short()
s3.messages.confirmation_email_subject = "%s %s" % (system_name,
T("access granted"))
s3.messages.confirmation_email = "%s %s %s %s. %s." % (T("Welcome to the"),
system_name,
T("Portal at"),
s3.base_url,
T("Thanks for your assistance"))
# Valid Extensions for Image Upload fields
s3.IMAGE_EXTENSIONS = ["png", "PNG", "jpg", "JPG", "jpeg", "JPEG", "gif", "GIF", "tif", "TIF", "tiff", "TIFF", "bmp", "BMP", "raw", "RAW"]
# Default CRUD strings
ADD_RECORD = T("Add Record")
s3.crud_strings = Storage(
title_create = ADD_RECORD,
title_display = T("Record Details"),
title_list = T("Records"),
title_update = T("Edit Record"),
title_search = T("Search Records"),
title_map = T("Map"),
subtitle_create = T("Add New Record"),
label_list_button = T("List Records"),
label_create_button = ADD_RECORD,
label_delete_button = T("Delete Record"),
msg_record_created = T("Record added"),
msg_record_modified = T("Record updated"),
msg_record_deleted = T("Record deleted"),
msg_list_empty = T("No Records currently available"),
msg_match = T("Matching Records"),
msg_no_match = T("No Matching Records"),
name_nice = T("Record"),
name_nice_plural = T("Records"))
# END =========================================================================
|
{
"content_hash": "8faeae60bed69d475b7ac98cf20e6897",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 233,
"avg_line_length": 34.38073394495413,
"alnum_prop": 0.6742494996664443,
"repo_name": "anubhav929/eden",
"id": "919b755cc004cd2ecc9b5a6e0fab125c85fb9cc4",
"size": "15015",
"binary": false,
"copies": "1",
"ref": "refs/heads/eden",
"path": "models/00_settings.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "14862639"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "2202"
},
{
"name": "Python",
"bytes": "21998764"
},
{
"name": "Racket",
"bytes": "166"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from dp_tornado.engine.helper import Helper as dpHelper
import datetime as abs_datetime
class DateHelper(dpHelper):
def now(self, timezone=None):
return self.from_datetime(datetime=self.helper.datetime.now(timezone=timezone))
def from_datetime(self, datetime, timezone=None):
if not timezone:
return abs_datetime.datetime(
year=datetime.year,
month=datetime.month,
day=datetime.day,
tzinfo=datetime.tzinfo)
if not datetime.tzinfo:
datetime = self.helper.datetime.timezone.localize(datetime=datetime, timezone=timezone)
else:
datetime = self.helper.datetime.timezone.normalize(datetime=datetime, timezone=timezone)
return self.from_datetime(datetime=datetime)
def from_timestamp(self, timestamp, timezone=None, ms=False):
datetime = self.helper.datetime.from_timestamp(timestamp=timestamp, timezone=timezone, ms=ms)
return self.from_datetime(datetime=datetime)
def convert(
self,
auto=None,
datetime=None,
timezone=None,
timestamp=None,
yyyymmdd=None,
yyyymmddhhiiss=None,
ms=False):
datetime = self.helper.datetime.convert(
auto=auto,
datetime=datetime,
timezone=timezone,
timestamp=timestamp,
yyyymmdd=yyyymmdd,
yyyymmddhhiiss=yyyymmddhhiiss,
ms=ms)
return self.from_datetime(datetime=datetime)
def year(self, auto=None, datetime=None, timezone=None, timestamp=None, ms=False):
return self.convert(auto=auto, datetime=datetime, timezone=timezone, timestamp=timestamp, ms=ms).year
def month(self, auto=None, datetime=None, timezone=None, timestamp=None, ms=False):
return self.convert(auto=auto, datetime=datetime, timezone=timezone, timestamp=timestamp, ms=ms).month
def day(self, auto=None, datetime=None, timezone=None, timestamp=None, ms=False):
return self.convert(auto=auto, datetime=datetime, timezone=timezone, timestamp=timestamp, ms=ms).day
def weekday(self, auto=None, datetime=None, timezone=None, timestamp=None, ms=False, isoweekday=True):
datetime = self.convert(auto=auto, datetime=datetime, timezone=timezone, timestamp=timestamp, ms=ms)
return datetime.isoweekday() if isoweekday else datetime.weekday()
def tuple(self, auto=None, datetime=None, timezone=None, timestamp=None, ms=False):
datetime = self.convert(auto=auto, datetime=datetime, timezone=timezone, timestamp=timestamp, ms=ms)
time_set = [datetime.year, datetime.month, datetime.day]
if datetime.tzinfo:
time_set.append(self.helper.datetime.timezone.zone_from_tzinfo(datetime.tzinfo))
return time_set
def yyyymmdd(self, auto=None, datetime=None, timezone=None, timestamp=None, ms=False, concat=''):
datetime = self.convert(auto=auto, datetime=datetime, timezone=timezone, timestamp=timestamp, ms=ms)
return '%04d%s%02d%s%02d' % (datetime.year, concat, datetime.month, concat, datetime.day)
def mmdd(self, auto=None, datetime=None, timezone=None, timestamp=None, ms=False, concat=''):
datetime = self.convert(auto=auto, datetime=datetime, timezone=timezone, timestamp=timestamp, ms=ms)
return '%02d%s%02d' % (datetime.month, concat, datetime.day)
|
{
"content_hash": "5fc273262098b300f3b46f897efa6be4",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 110,
"avg_line_length": 45.37662337662338,
"alnum_prop": 0.6745850028620493,
"repo_name": "why2pac/dp-tornado",
"id": "e6e2dd2854fdd4c675aeeca23ebc0744c71cd7a8",
"size": "3520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dp_tornado/helper/datetime/date.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3715"
},
{
"name": "Dockerfile",
"bytes": "2157"
},
{
"name": "HTML",
"bytes": "9880"
},
{
"name": "JavaScript",
"bytes": "41639"
},
{
"name": "Python",
"bytes": "567608"
},
{
"name": "Shell",
"bytes": "9068"
}
],
"symlink_target": ""
}
|
import json
from .address import FieldType
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, FieldType):
return {
'key': o.key.name,
'label': o.label,
'required': o.required,
'options': o.options,
}
return super().default(o)
|
{
"content_hash": "553a9ab5df2b02b6a4c5b06b36f2a265",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 39,
"avg_line_length": 25.785714285714285,
"alnum_prop": 0.5069252077562327,
"repo_name": "3DHubs/Ranch",
"id": "c36803f5b899c22f2e32fa8a345f220c3ff759e8",
"size": "361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ranch/json.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26644"
}
],
"symlink_target": ""
}
|
"""Setup file for data structures assignment."""
from setuptools import setup
setup(
name="datastructures",
description="python implementations",
version=0.1,
author="Regenal",
author_email="regenal@mac.com",
license="MIT",
py_modules=['LinkList',],
package_dir={'': 'src'},
install_requires=[''],
extras_require={'test': ['pytest', 'pytest-watch', 'pytest-cov', 'tox']},
entry_points={
# 'console_scripts': [
# "command = module_name:main",
# ]
}
)
|
{
"content_hash": "a5dbaeb90913d12d5870b7b47d04a2ef",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 23.043478260869566,
"alnum_prop": 0.5830188679245283,
"repo_name": "regenalgrant/datastructures",
"id": "346aa2ec3221cd70fb1e3d6eb215cfdb396c5ef2",
"size": "530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26103"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers as ser
from rest_framework import exceptions
from rest_framework.exceptions import ValidationError
from modularodm import Q
from modularodm.exceptions import ValidationValueError
from framework.auth.core import Auth
from framework.exceptions import PermissionsError
from website.models import Node, User, Comment
from website.exceptions import NodeStateError
from website.util import permissions as osf_permissions
from api.base.utils import get_object_or_error, absolute_reverse
from api.base.serializers import (JSONAPISerializer, WaterbutlerLink, NodeFileHyperLinkField, IDField, TypeField,
TargetTypeField, JSONAPIListField, LinksField, RelationshipField, DevOnly,
HideIfRegistration)
from api.base.exceptions import InvalidModelValueError
class NodeTagField(ser.Field):
def to_representation(self, obj):
if obj is not None:
return obj._id
return None
def to_internal_value(self, data):
return data
class NodeSerializer(JSONAPISerializer):
# TODO: If we have to redo this implementation in any of the other serializers, subclass ChoiceField and make it
# handle blank choices properly. Currently DRF ChoiceFields ignore blank options, which is incorrect in this
# instance
filterable_fields = frozenset([
'id',
'title',
'description',
'public',
'tags',
'category',
'date_created',
'date_modified',
'registration',
'root',
'parent'
])
non_anonymized_fields = [
'id',
'title',
'description',
'category',
'date_created',
'date_modified',
'registration',
'tags',
'public',
'links',
'children',
'comments',
'contributors',
'files',
'node_links',
'parent',
'root',
'logs',
]
id = IDField(source='_id', read_only=True)
type = TypeField()
category_choices = Node.CATEGORY_MAP.keys()
category_choices_string = ', '.join(["'{}'".format(choice) for choice in category_choices])
title = ser.CharField(required=True)
description = ser.CharField(required=False, allow_blank=True, allow_null=True)
category = ser.ChoiceField(choices=category_choices, help_text="Choices: " + category_choices_string)
date_created = ser.DateTimeField(read_only=True)
date_modified = ser.DateTimeField(read_only=True)
registration = ser.BooleanField(read_only=True, source='is_registration')
fork = ser.BooleanField(read_only=True, source='is_fork')
collection = DevOnly(ser.BooleanField(read_only=True, source='is_folder'))
dashboard = ser.BooleanField(read_only=True, source='is_dashboard')
tags = JSONAPIListField(child=NodeTagField(), required=False)
# Public is only write-able by admins--see update method
public = ser.BooleanField(source='is_public', required=False,
help_text='Nodes that are made public will give read-only access '
'to everyone. Private nodes require explicit read '
'permission. Write and admin access are the same for '
'public and private nodes. Administrators on a parent '
'node have implicit read permissions for all child nodes')
links = LinksField({'html': 'get_absolute_url'})
# TODO: When we have osf_permissions.ADMIN permissions, make this writable for admins
children = RelationshipField(
related_view='nodes:node-children',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_node_count'},
)
comments = RelationshipField(
related_view='nodes:node-comments',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'unread': 'get_unread_comments_count'})
contributors = RelationshipField(
related_view='nodes:node-contributors',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_contrib_count'},
)
files = RelationshipField(
related_view='nodes:node-providers',
related_view_kwargs={'node_id': '<pk>'}
)
forked_from = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<forked_from_id>'}
)
node_links = DevOnly(RelationshipField(
related_view='nodes:node-pointers',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_pointers_count'},
))
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
)
registrations = DevOnly(HideIfRegistration(RelationshipField(
related_view='nodes:node-registrations',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_registration_count'}
)))
root = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<root._id>'}
)
logs = RelationshipField(
related_view='nodes:node-logs',
related_view_kwargs={'node_id': '<pk>'},
)
class Meta:
type_ = 'nodes'
def get_absolute_url(self, obj):
return obj.absolute_url
# TODO: See if we can get the count filters into the filter rather than the serializer.
def get_user_auth(self, request):
user = request.user
if user.is_anonymous():
auth = Auth(None)
else:
auth = Auth(user)
return auth
def get_node_count(self, obj):
auth = self.get_user_auth(self.context['request'])
nodes = [node for node in obj.nodes if node.can_view(auth) and node.primary and not node.is_deleted]
return len(nodes)
def get_contrib_count(self, obj):
return len(obj.contributors)
def get_registration_count(self, obj):
auth = self.get_user_auth(self.context['request'])
registrations = [node for node in obj.node__registrations if node.can_view(auth)]
return len(registrations)
def get_pointers_count(self, obj):
return len(obj.nodes_pointer)
def get_unread_comments_count(self, obj):
auth = self.get_user_auth(self.context['request'])
user = auth.user
return Comment.find_unread(user=user, node=obj)
def create(self, validated_data):
node = Node(**validated_data)
try:
node.save()
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
return node
def update(self, node, validated_data):
"""Update instance with the validated data. Requires
the request to be in the serializer context.
"""
assert isinstance(node, Node), 'node must be a Node'
auth = self.get_user_auth(self.context['request'])
old_tags = set([tag._id for tag in node.tags])
if 'tags' in validated_data:
current_tags = set(validated_data.get('tags'))
del validated_data['tags']
elif self.partial:
current_tags = set(old_tags)
else:
current_tags = set()
for new_tag in (current_tags - old_tags):
node.add_tag(new_tag, auth=auth)
for deleted_tag in (old_tags - current_tags):
node.remove_tag(deleted_tag, auth=auth)
if validated_data:
try:
node.update(validated_data, auth=auth)
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
except PermissionsError:
raise exceptions.PermissionDenied
return node
class NodeDetailSerializer(NodeSerializer):
"""
Overrides NodeSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class NodeContributorsSerializer(JSONAPISerializer):
""" Separate from UserSerializer due to necessity to override almost every field as read only
"""
non_anonymized_fields = ['bibliographic', 'permission']
filterable_fields = frozenset([
'id',
'bibliographic',
'permission'
])
id = IDField(source='_id', required=True)
type = TypeField()
bibliographic = ser.BooleanField(help_text='Whether the user will be included in citations for this node or not.',
default=True)
permission = ser.ChoiceField(choices=osf_permissions.PERMISSIONS, required=False, allow_null=True,
default=osf_permissions.reduce_permissions(osf_permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS),
help_text='User permission level. Must be "read", "write", or "admin". Defaults to "write".')
links = LinksField({
'self': 'get_absolute_url'
})
users = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<pk>'},
always_embed=True
)
class Meta:
type_ = 'contributors'
def get_absolute_url(self, obj):
node_id = self.context['request'].parser_context['kwargs']['node_id']
return absolute_reverse(
'nodes:node-contributor-detail',
kwargs={
'node_id': node_id,
'user_id': obj._id
}
)
class NodeContributorsCreateSerializer(NodeContributorsSerializer):
"""
Overrides NodeContributorsSerializer to add target_type field
"""
target_type = TargetTypeField(target_type='users')
def create(self, validated_data):
auth = Auth(self.context['request'].user)
node = self.context['view'].get_node()
contributor = get_object_or_error(User, validated_data['_id'], display_name='user')
# Node object checks for contributor existence but can still change permissions anyway
if contributor in node.contributors:
raise exceptions.ValidationError('{} is already a contributor'.format(contributor.fullname))
bibliographic = validated_data['bibliographic']
permissions = osf_permissions.expand_permissions(validated_data.get('permission')) or osf_permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS
node.add_contributor(contributor=contributor, auth=auth, visible=bibliographic, permissions=permissions, save=True)
contributor.permission = osf_permissions.reduce_permissions(node.get_permissions(contributor))
contributor.bibliographic = node.get_visible(contributor)
contributor.node_id = node._id
return contributor
class NodeContributorDetailSerializer(NodeContributorsSerializer):
"""
Overrides node contributor serializer to add additional methods
"""
def update(self, instance, validated_data):
contributor = instance
auth = Auth(self.context['request'].user)
node = self.context['view'].get_node()
visible = validated_data.get('bibliographic')
permission = validated_data.get('permission')
try:
node.update_contributor(contributor, permission, visible, auth, save=True)
except NodeStateError as e:
raise exceptions.ValidationError(e)
contributor.permission = osf_permissions.reduce_permissions(node.get_permissions(contributor))
contributor.bibliographic = node.get_visible(contributor)
contributor.node_id = node._id
return contributor
class NodeLinksSerializer(JSONAPISerializer):
id = IDField(source='_id')
type = TypeField()
target_type = TargetTypeField(target_type='nodes')
# TODO: We don't show the title because the current user may not have access to this node. We may want to conditionally
# include this field in the future.
# title = ser.CharField(read_only=True, source='node.title', help_text='The title of the node that this Node Link '
# 'points to')
target_node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
always_embed=True
)
class Meta:
type_ = 'node_links'
links = LinksField({
'self': 'get_absolute_url'
})
def get_absolute_url(self, obj):
node_id = self.context['request'].parser_context['kwargs']['node_id']
return absolute_reverse(
'nodes:node-pointer-detail',
kwargs={
'node_id': node_id,
'node_link_id': obj._id
}
)
def create(self, validated_data):
request = self.context['request']
user = request.user
auth = Auth(user)
node = self.context['view'].get_node()
target_node_id = validated_data['_id']
pointer_node = Node.load(target_node_id)
if not pointer_node or pointer_node.is_folder:
raise InvalidModelValueError(
source={'pointer': '/data/relationships/node_links/data/id'},
detail='Target Node \'{}\' not found.'.format(target_node_id)
)
try:
pointer = node.add_pointer(pointer_node, auth, save=True)
return pointer
except ValueError:
raise InvalidModelValueError(
source={'pointer': '/data/relationships/node_links/data/id'},
detail='Target Node \'{}\' already pointed to by \'{}\'.'.format(target_node_id, node._id)
)
def update(self, instance, validated_data):
pass
class NodeProviderSerializer(JSONAPISerializer):
id = ser.SerializerMethodField(read_only=True)
kind = ser.CharField(read_only=True)
name = ser.CharField(read_only=True)
path = ser.CharField(read_only=True)
node = ser.CharField(source='node_id', read_only=True)
provider = ser.CharField(read_only=True)
files = NodeFileHyperLinkField(
related_view='nodes:node-files',
related_view_kwargs={'node_id': '<node_id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
never_embed=True
)
links = LinksField({
'upload': WaterbutlerLink(),
'new_folder': WaterbutlerLink(kind='folder')
})
class Meta:
type_ = 'files'
@staticmethod
def get_id(obj):
return '{}:{}'.format(obj.node._id, obj.provider)
class NodeAlternativeCitationSerializer(JSONAPISerializer):
id = IDField(source="_id", read_only=True)
type = TypeField()
name = ser.CharField(required=True)
text = ser.CharField(required=True)
class Meta:
type_ = 'citations'
def create(self, validated_data):
errors = self.error_checker(validated_data)
if len(errors) > 0:
raise ValidationError(detail=errors)
node = self.context['view'].get_node()
auth = Auth(self.context['request']._user)
citation = node.add_citation(auth, save=True, **validated_data)
return citation
def update(self, instance, validated_data):
errors = self.error_checker(validated_data)
if len(errors) > 0:
raise ValidationError(detail=errors)
node = self.context['view'].get_node()
auth = Auth(self.context['request']._user)
instance = node.edit_citation(auth, instance, save=True, **validated_data)
return instance
def error_checker(self, data):
errors = []
name = data.get('name', None)
text = data.get('text', None)
citations = self.context['view'].get_node().alternative_citations
if not (self.instance and self.instance.name == name) and citations.find(Q('name', 'eq', name)).count() > 0:
errors.append("There is already a citation named '{}'".format(name))
if not (self.instance and self.instance.text == text):
matching_citations = citations.find(Q('text', 'eq', text))
if matching_citations.count() > 0:
names = "', '".join([str(citation.name) for citation in matching_citations])
errors.append("Citation matches '{}'".format(names))
return errors
|
{
"content_hash": "b4214616a6b9d716343dfeb328adb784",
"timestamp": "",
"source": "github",
"line_count": 449,
"max_line_length": 141,
"avg_line_length": 36.43429844097996,
"alnum_prop": 0.6205758298184486,
"repo_name": "Ghalko/osf.io",
"id": "74b0bcd2a66c65abba41f8af8fe6d05fede74837",
"size": "16359",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api/nodes/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "131914"
},
{
"name": "HTML",
"bytes": "49734"
},
{
"name": "JavaScript",
"bytes": "1365776"
},
{
"name": "Mako",
"bytes": "615025"
},
{
"name": "Python",
"bytes": "4647431"
},
{
"name": "Shell",
"bytes": "2118"
}
],
"symlink_target": ""
}
|
from pexpect import *
import os, sys
import getpass
import time
class ssh_session:
"Session with extra state including the password to be used."
def __init__(self, user, host, password=None, verbose=0):
self.user = user
self.host = host
self.verbose = verbose
self.password = password
self.keys = [
'authenticity',
'assword:',
'@@@@@@@@@@@@',
'Command not found.',
EOF,
]
self.f = open('ssh.out','w')
def __del__(self):
self.f.close()
def __repr__(self):
outl = 'class :'+self.__class__.__name__
for attr in self.__dict__:
if attr == 'password':
outl += '\n\t'+attr+' : '+'*'*len(self.password)
else:
outl += '\n\t'+attr+' : '+str(getattr(self, attr))
return outl
def __exec(self, command):
"Execute a command on the remote host. Return the output."
child = spawn(command,
#timeout=10,
)
if self.verbose:
sys.stderr.write("-> " + command + "\n")
seen = child.expect(self.keys)
self.f.write(str(child.before) + str(child.after)+'\n')
if seen == 0:
child.sendline('yes')
seen = child.expect(self.keys)
if seen == 1:
if not self.password:
self.password = getpass.getpass('Remote password: ')
child.sendline(self.password)
child.readline()
time.sleep(5)
# Added to allow the background running of remote process
if not child.isalive():
seen = child.expect(self.keys)
if seen == 2:
lines = child.readlines()
self.f.write(lines)
if self.verbose:
sys.stderr.write("<- " + child.before + "|\n")
try:
self.f.write(str(child.before) + str(child.after)+'\n')
except:
pass
return child.before
def ssh(self, command):
return self.__exec("ssh -l %s %s \"%s\"" \
% (self.user,self.host,command))
def scp(self, src, dst):
return self.__exec("scp %s %s@%s:%s" \
% (src, self.user, self.host, dst))
def exists(self, file):
"Retrieve file permissions of specified remote file."
seen = self.ssh("/bin/ls -ld %s" % file)
if seen.find("No such file") > -1:
return None # File doesn't exist
else:
return seen.split()[0] # Return permission field of listing.
|
{
"content_hash": "32097ff938c0d0e3630e1f644072bbba",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 80,
"avg_line_length": 30.466666666666665,
"alnum_prop": 0.4762946754194019,
"repo_name": "yuzhichang/pexpect",
"id": "6d1a774812cde3d9c7821c4f72672b4d07fe4405",
"size": "2819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ssh_session.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "294856"
}
],
"symlink_target": ""
}
|
"""Provide a dispatcher to dispatch events.
This component implements a dispatcher to dispatch events from one or more
Sessions through callbacks.
"""
from __future__ import absolute_import
from . import internals
class EventDispatcher(object):
"""Dispatches events from one or more Sessions through callbacks
EventDispatcher objects are optionally specified when Session objects are
created. A single EventDispatcher can be shared by multiple Session
objects.
The EventDispatcher provides an event-driven interface, generating
callbacks from one or more internal threads for one or more sessions.
"""
__handle = None
def __init__(self, numDispatcherThreads=1):
"""Construct an EventDispatcher.
If 'numDispatcherThreads' is 1 (the default) then a single internal
thread is created to dispatch events. If 'numDispatcherThreads' is
greater than 1 then an internal pool of 'numDispatcherThreads' threads
is created to dispatch events. The behavior is undefined if
'numDispatcherThreads' is 0.
"""
self.__handle = internals.blpapi_EventDispatcher_create(
numDispatcherThreads)
def __del__(self):
"""Destructor."""
internals.blpapi_EventDispatcher_destroy(self.__handle)
def start(self):
"""Start generating callbacks.
Start generating callbacks for events from sessions associated with
this EventDispatcher.
"""
return internals.blpapi_EventDispatcher_start(self.__handle)
def stop(self, async=False):
"""Stop generating callbacks.
Stop generating callbacks for events from sessions associated with this
EventDispatcher. If the specified 'async' is False (the default) then
this method blocks until all current callbacks which were dispatched
through this EventDispatcher have completed. If 'async' is True, this
method returns immediately and no further callbacks will be dispatched.
Note: If stop is called with 'async' of False from within a callback
dispatched by this EventDispatcher then the 'async' parameter is
overridden to True.
"""
return internals.blpapi_EventDispatcher_stop(self.__handle, async)
def _handle(self):
"""Return the internal implementation."""
return self.__handle
__copyright__ = """
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
|
{
"content_hash": "77bea8edb84af32dc0ecfcc313de0ef2",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 38.2967032967033,
"alnum_prop": 0.7288378766140603,
"repo_name": "anshulkgupta/viznow",
"id": "38409dc9f2f39eedbf48d3d8f29e4d9c3ee9e5d7",
"size": "3507",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Mayank/blpapi_python3.5.5/blpapi/eventdispatcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "684167"
},
{
"name": "CSS",
"bytes": "561059"
},
{
"name": "JavaScript",
"bytes": "658864"
},
{
"name": "PHP",
"bytes": "477"
},
{
"name": "Perl",
"bytes": "297"
},
{
"name": "Python",
"bytes": "872406"
}
],
"symlink_target": ""
}
|
import factory
reload(factory)
from factory import *
import cpp_ethereum
reload(cpp_ethereum)
from cpp_ethereum import *
#
# Windows factories
#
def win_cpp_factory(branch='master', isPullRequest=False):
factory = BuildFactory()
sed = '"C:\\Program Files (x86)\\GnuWin32\\bin\\sed.exe"'
for step in [
Git(
haltOnFailure=True,
logEnviron=False,
repourl='https://github.com/ethereum/cpp-ethereum.git',
branch=branch,
mode='full',
method='copy',
codebase='cpp-ethereum',
retry=(5, 3)
),
SetPropertyFromCommand(
haltOnFailure=True,
logEnviron=False,
name="set-protocol",
command='%s -ne "s/.*c_protocolVersion = \(.*\);/\\1/p" libethcore\Common.cpp' % sed,
property="protocol"
),
SetPropertyFromCommand(
haltOnFailure=True,
logEnviron=False,
name="set-version",
command='%s -ne "s/^set(PROJECT_VERSION \\"\(.*\)\\")$/\\1/p" CMakeLists.txt' % sed,
property="version"
),
ShellCommand(
haltOnFailure=True,
logEnviron=False,
name="dependencies",
description='dependencies',
descriptionDone='dependencies',
command=['getstuff.bat'],
workdir="build/extdep"
),
Configure(
haltOnFailure=True,
logEnviron=False,
command=["cmake", ".", "-G", "Visual Studio 12 Win64"]
),
MsBuild12(
haltOnFailure=True,
logEnviron=False,
projectfile="ethereum.sln",
config="Release",
platform="x64"
)
]: factory.addStep(step)
if not isPullRequest:
for step in [
MsBuild12(
haltOnFailure=True,
logEnviron=False,
name="installer",
projectfile="PACKAGE.vcxproj",
config="Release",
platform="x64"
),
SetProperty(
description="setting filename",
descriptionDone="set filename",
name="set-filename",
property="filename",
value=Interpolate("Ethereum-%(prop:version)s-win64-%(kw:time_string)s-%(kw:short_revision)s.exe",
time_string=get_time_string,
short_revision=get_short_revision)
),
FileUpload(
name="upload",
slavesrc=Interpolate("Ethereum-%(prop:version)s-win64.exe"),
masterdest=Interpolate("public_html/builds/%(prop:buildername)s/%(prop:filename)s"),
url=Interpolate("/builds/%(prop:buildername)s/%(prop:filename)s")
),
MasterShellCommand(
name="clean-latest-link",
description='cleaning latest link',
descriptionDone='clean latest link',
command=['rm', '-f', Interpolate("public_html/builds/%(prop:buildername)s/Ethereum-win64-latest.exe")]
),
MasterShellCommand(
haltOnFailure=True,
name="link-latest",
description='linking latest',
descriptionDone='link latest',
command=['ln', '-sf', Interpolate("%(prop:filename)s"), Interpolate("public_html/builds/%(prop:buildername)s/Ethereum-win64-latest.exe")]
)
]: factory.addStep(step)
return factory
|
{
"content_hash": "1855a5723b4823eb6d6a54fbed85d801",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 153,
"avg_line_length": 34.53333333333333,
"alnum_prop": 0.5179260893546608,
"repo_name": "ethereum/ethereum-buildbot",
"id": "1d641eb8496188dbd27808ce062b54f0c2e67990",
"size": "3673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "factories/cpp_ethereum_windows.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10308"
},
{
"name": "HTML",
"bytes": "8586"
},
{
"name": "Python",
"bytes": "185941"
},
{
"name": "Ruby",
"bytes": "2406"
}
],
"symlink_target": ""
}
|
import os
import requests
from flask import Flask, request, send_from_directory, Response
url_to_proxy = "https://lainon.life"
server = Flask(__name__)
@server.route("/")
def handle_root():
return send_from_directory("_site", "index.html")
@server.route("/<path:_>")
def handle_request(_):
path = request.path
if os.path.exists("_site" + path):
server.logger.info("serving locally: " + path)
return send_from_directory("_site", path[1:])
# try to proxy not existing files
server.logger.info("proxying: " + path)
response = requests.get(url_to_proxy + path)
if response.status_code != 200:
# replicate the status code of the remote server
flask_response = Response()
flask_response.status_code = response.status_code
return flask_response
# send files
mime = response.headers["content-type"]
flask_response = Response(response.content, mimetype=mime)
flask_response.status_code = response.status_code
return flask_response
server.run()
|
{
"content_hash": "495a23fbe4ce7594edf62ee4d2799ed8",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 63,
"avg_line_length": 26.075,
"alnum_prop": 0.6653883029721956,
"repo_name": "barrucadu/lainonlife",
"id": "a5436a622442809a030768026a7a40b1becc52f8",
"size": "1066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frontend/devserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4609"
},
{
"name": "HTML",
"bytes": "204"
},
{
"name": "JavaScript",
"bytes": "11448"
},
{
"name": "Python",
"bytes": "19206"
},
{
"name": "Shell",
"bytes": "1775"
},
{
"name": "Smarty",
"bytes": "5513"
}
],
"symlink_target": ""
}
|
from django_seo_js import settings
from django_seo_js.backends import SelectedBackend
from django_seo_js.helpers import request_should_be_ignored
import logging
logger = logging.getLogger(__name__)
class EscapedFragmentMiddleware(SelectedBackend):
def process_request(self, request):
if not settings.ENABLED:
return
if request_should_be_ignored(request):
return
if "_escaped_fragment_" not in request.GET:
return
url = self.backend.build_absolute_uri(request)
try:
return self.backend.get_response_for_url(url, request)
except Exception as e:
logger.exception(e)
class HashBangMiddleware(EscapedFragmentMiddleware):
def __init__(self, *args, **kwargs):
logging.info(
"Deprecation note: HashBangMiddleware has been renamed EscapedFragmentMiddleware,"
" for more clarity. Upgrade your MIDDLEWARE_CLASSES to \n"
" 'django_seo_js.middleware.EscapedFragmentMiddleware'"
" when you get a chance. HashBangMiddleware will be removed in v0.5"
)
super(HashBangMiddleware, self).__init__(*args, **kwargs)
|
{
"content_hash": "83d721c8d6c68e603b6dadb8d0eb99cd",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 94,
"avg_line_length": 33.138888888888886,
"alnum_prop": 0.6630343671416596,
"repo_name": "skoczen/django-seo-js",
"id": "2ae9b6175bb101136c2d1c6c401c3083f379510f",
"size": "1193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_seo_js/middleware/escaped_fragment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35135"
}
],
"symlink_target": ""
}
|
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TrumpaTron'
copyright = u'2017, Josh Carlson'
author = u'Josh Carlson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.1'
# The full version, including alpha/beta/rc tags.
release = u'1.0.1-1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TrumpaTrondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TrumpaTron.tex', u'TrumpaTron Documentation',
u'Josh Carlson', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'trumpatron', u'TrumpaTron Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TrumpaTron', u'TrumpaTron Documentation',
author, 'TrumpaTron', 'One line description of project.',
'Miscellaneous'),
]
|
{
"content_hash": "eb2c41a82433d79f4503745b1099075b",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 78,
"avg_line_length": 29.80952380952381,
"alnum_prop": 0.6656017039403621,
"repo_name": "magneticstain/TrumpaTron",
"id": "a9d50d0e797a46eda045c485a95bd8fad9b9ac53",
"size": "4811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33252"
},
{
"name": "Shell",
"bytes": "333"
}
],
"symlink_target": ""
}
|
from core import Dump
import json, re, psycopg2, datetime
db = psycopg2.connect("dbname=gazetteer")
cursor = db.cursor()
cursor.execute("SELECT code FROM places_featuretype;")
# NOV = novitiate ... and months occur in citation-notes a lot :(
months = "jan feb mar apr may jun jul aug sep oct nov dec".split(" ")
fcodes = [code[0] for code in cursor.fetchall()
if code[0].lower() not in months]
def extract_fcode(text):
code = fcode_match.search(text)
if code: return code.groups()[0].upper()
fcode_match = re.compile(r'\b(' + "|".join(fcodes) + r')\b', re.IGNORECASE)
ns_coords = re.compile(r'([0-9.]+)(?:\D*([0-9.]+))?(?:\D*([0-9.]+))?\D*([NS])')
ew_coords = re.compile(r'([0-9.]+)(?:\D*([0-9.]+))?(?:\D*([0-9.]+))?\D*([EW])')
def coords_to_dms(regex, text):
while type(text) is list: text = text[0]
dms = regex.search(text)
if dms:
dms = dms.groups()
else:
return None
try:
coord = float(dms[0])
if dms[1]: coord += float(dms[1]) / 60.0
if dms[2]: coord += float(dms[2]) / 3600.0
if dms[3][0] in "SW": coord = -coord
if (dms[3][0] in "EW" and -180 <= coord <= 180) or -90 <= coord <= 90:
return coord
except ValueError:
pass
return None
def get_labels(variants):
if type(variants) is not list:
variants = [variants]
result = []
for variant in variants:
if not variant or type(variant) is not dict: continue
labels = variant.get("variantLabel")
if not labels: continue
if type(labels) is not list: labels = [variant["variantLabel"]]
result.extend(labels)
return result
def extract_lc_auth(data_path, dump_path):
dump = Dump(dump_path + "/lc_auth/lc_auth.%04d.json.gz")
for line in file(data_path):
auth = json.loads(line)
if "isMemberOfMADSScheme" in auth: del auth["isMemberOfMADSScheme"]
if "adminMetadata" in auth: del auth["adminMetadata"]
alt_names = []
#print "hasVariant: ", auth.get("hasVariant")
for label in get_labels(auth.get("hasVariant")):
alt_names.append({"name": label})
for label in get_labels(auth.get("hasEarlierEstablishedForm")):
alt_names.append({"name": label, "type": "historical"})
geom = fcode = None
has_source = auth.get("hasSource")
if has_source:
note = has_source.get("citation-note")
if note:
lat = coords_to_dms(ns_coords, note)
lon = coords_to_dms(ew_coords, note)
if lat and lon:
geom = {"type": "Point", "coordinates": [lon, lat]}
# search the citation-note
fcode = extract_fcode(note)
if not fcode:
source = has_source.get("citation-source", "")
fcode = extract_fcode(source)
if not fcode:
fcode = "AUTH"
uri = auth["id"]
if "authoritativeLabel" not in auth:
continue
updated = datetime.datetime.utcnow().replace(second=0, microsecond=0).isoformat()
auth_source = {}
auth_source = {
"type" : [],
"id" : "",
"authoritativeLabel" : "",
"note" : "",
"editorialNote": ""
}
for key in auth_source.keys():
if auth.has_key(key):
auth_source[key] = auth[key]
place = {
"name": auth["authoritativeLabel"],
"feature_code": fcode,
"alternate": alt_names,
"is_primary": True,
"updated": updated,
"source": auth_source,
"uris": [uri],
"relationships": [],
"timeframe": {},
"admin": []
}
if geom:
place["geometry"] = geom
place["centroid"] = geom["coordinates"]
else:
place["geometry"] = {}
place["centroid"] = []
dump.write(uri, place)
dump.close()
if __name__ == "__main__":
import sys
data_path, dump_path = sys.argv[1:3]
extract_lc_auth(data_path, dump_path)
|
{
"content_hash": "f7164a453f0ed4c22fe0d612ed803f9b",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 89,
"avg_line_length": 35.09166666666667,
"alnum_prop": 0.5255283780574685,
"repo_name": "LibraryOfCongress/gazetteer",
"id": "eec9c0bdcfa8137468df4ab41f81cd81de43e9e9",
"size": "4211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "etl/parser/lc_auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3868"
},
{
"name": "CSS",
"bytes": "66016"
},
{
"name": "JavaScript",
"bytes": "217868"
},
{
"name": "Makefile",
"bytes": "84"
},
{
"name": "Python",
"bytes": "297091"
},
{
"name": "Shell",
"bytes": "2261"
}
],
"symlink_target": ""
}
|
""" A little script illustrating how to use a (randomly initialized)
convolutional network to play a game of Pente. """
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.rl.environments.twoplayergames.pente import PenteGame
from pybrain.rl.environments.twoplayergames.gomokuplayers.randomplayer import RandomGomokuPlayer
from pybrain.rl.environments.twoplayergames.gomokuplayers.moduledecision import ModuleDecidingPlayer
from pybrain.structure.networks.custom.convboard import ConvolutionalBoardNetwork
dim = 7
g = PenteGame((dim, dim))
print g
n = ConvolutionalBoardNetwork(dim, 5, 3)
p1 = ModuleDecidingPlayer(n, g)
p2 = RandomGomokuPlayer(g)
p2.color = g.WHITE
g.playToTheEnd(p1, p2)
print g
|
{
"content_hash": "a0b70aa77eb17ce7d8e683f6db3a141d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 100,
"avg_line_length": 36.94736842105263,
"alnum_prop": 0.8076923076923077,
"repo_name": "arnaudsj/pybrain",
"id": "9532875216b5f8996ed4ab44380d79bfdf4fa57d",
"size": "724",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/rl/environments/capturegame/pente.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('goals', '0059_auto_20150727_2159'),
]
operations = [
migrations.CreateModel(
name='PackageEnrollment',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('accepted', models.BooleanField(default=False)),
('enrolled_on', models.DateTimeField(auto_now_add=True)),
('categories', models.ManyToManyField(to='goals.Category')),
('enrolled_by', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='enrolled')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['enrolled_on'],
'verbose_name': 'Package Enrollment',
'verbose_name_plural': 'Package Enrollments',
},
),
]
|
{
"content_hash": "2542b800ca515667e4ee3fea563638c6",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 114,
"avg_line_length": 37.096774193548384,
"alnum_prop": 0.5826086956521739,
"repo_name": "tndatacommons/tndata_backend",
"id": "b10e7d5dcfeb53740da4126c945bedcd04905acf",
"size": "1174",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tndata_backend/goals/migrations/0060_packageenrollment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29078"
},
{
"name": "HTML",
"bytes": "680433"
},
{
"name": "JavaScript",
"bytes": "186991"
},
{
"name": "Makefile",
"bytes": "393"
},
{
"name": "Python",
"bytes": "2023392"
},
{
"name": "Shell",
"bytes": "2282"
}
],
"symlink_target": ""
}
|
"""
Paver-enable template
Copyright (c) 2009, Damien Lebrun
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from datetime import date
import shutil
import os
import ConfigParser
from paste.script.templates import Template, var
from setuptools.command.setopt import edit_config, config_file
from pkg_resources import resource_filename
__author__ = 'Damien Lebrun <dinoboff@hotmail.com>'
YEAR = date.today().year
LICENSE_HEADER = """%(description)s
Copyright (c) %(year)s, %(author)s
All rights reserved.
"""
GPL = """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU%(gpl_type)s General Public License as published by
the Free Software Foundation, either version %(gpl_version)s of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU%(gpl_type)s General Public License for more details.
You should have received a copy of the GNU%(gpl_type)s General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
BSD = """
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the %(org)s nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
FREE_BSD = """
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
def add_license_details(vars):
"""Populates ``vars`` with ``gpl_type``, ``gpl_version``,
``license_body`` and ``license_files``.
"""
vars.setdefault('description', '')
vars.setdefault('author', '')
vars.setdefault('author_email', '')
vars.setdefault('license_name', '')
vars['gpl_type'] = ''
vars['gpl_version'] = ''
vars['license_files'] = []
license_tmpl = ''
license = vars.get('license_name', '').strip().upper()
if license:
if license == 'BSD':
if vars.get('org'):
license_tmpl = BSD
else:
license_tmpl = FREE_BSD
elif 'GPL' in license:
license_tmpl = GPL
if license == 'GPLV2':
vars.update(gpl_version=2, license_files=('gpl-2.0',))
elif license in ('LGPL', 'LGPLV3',):
vars.update(
gpl_version=3,
gpl_type=' Lesser',
license_files=('gpl', 'lgpl',))
elif license == 'LGPLV2':
vars.update(
gpl_version=2,
gpl_type=' Lesser',
license_files=('gpl-2.0', 'lgpl-2.1',))
elif license in ('AGPL', 'AGPLV3',):
vars.update(
gpl_version=3,
gpl_type=' Affero',
license_files=('gpl', 'agpl',))
else:
vars.update(gpl_version=3, license_files=('gpl',))
vars['license_body'] = (LICENSE_HEADER + license_tmpl) % vars
def copy_license(license_files, output_dir):
"""copy gpl license files to output directory"""
for file_name in license_files:
full_name = '%s.txt' % file_name
rel_path = 'paster-templates/licenses/%s' % full_name
abs_path = resource_filename(__name__, rel_path)
shutil.copyfile(abs_path, os.path.join(output_dir, full_name))
def get_default():
"""Get default author name and email to use for new package"""
config_path = config_file('user')
cp = ConfigParser.RawConfigParser()
cp.read(config_path)
defaults=[]
for option in ('author', 'author_email'):
try:
defaults.append(cp.get('paver', 'default.%s' % option))
except (ConfigParser.NoSectionError, ConfigParser.NoSectionError,):
defaults.append('')
return defaults
def save_defaults(**kw):
"""Save in ~/.pydistutils the author name and email.
To be used as default value for the next use of this template."""
options = {}
for option in ('author','author_email'):
value = kw.get(option, '')
if value:
options['default.%s' % option] = value
edit_config(config_file('user'), {'paver': options})
DEFAULT_NAME, DEFAULT_EMAIL = get_default()
class PaverTemplate(Template):
_template_dir = 'paster-templates/paver_package'
summary = "A basic paver-enabled package"
use_cheetah = True
vars = [
var('version', 'Version (like 0.1)'),
var('description', 'One-line description of the package'),
var('keywords', 'Space-separated keywords/tags'),
var('author', 'Author name', default=DEFAULT_NAME),
var('author_email', 'Author email', default=DEFAULT_EMAIL),
var('url', 'URL of homepage'),
var('license_name',
'license name - GPLv2/GPLv3/LGPLv2/LGPLv3/AGPLv3/BSD/...',
default='BSD'),
var('org', 'Organisation name (required for 3-clauses BSD).'),
]
def pre(self, command, output_dir, vars):
"""
Set extra template variables:
* "year", current year.
* "license_body", license notice of the package.
* "gpl_type", for gpl licenses
"""
vars['year'] = YEAR
add_license_details(vars)
def post(self, command, output_dir, vars):
"""Save the author, author_name and org variables in ~/.pydistutils.cfg,
And copy the gpl license if necessary."""
save_defaults(**vars)
copy_license(vars['license_files'], output_dir)
|
{
"content_hash": "d5f31b0c58e23ade2d9637a8799ddae5",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 82,
"avg_line_length": 41.19469026548673,
"alnum_prop": 0.6817400644468313,
"repo_name": "dinoboff/paver-templates",
"id": "867caa9567dec14bed9b3f9b7d51168bee2e2323",
"size": "9310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pavertemplates/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "69265"
}
],
"symlink_target": ""
}
|
from rest_framework.permissions import BasePermission
from olympia.devhub.utils import UploadRestrictionChecker
class IsSubmissionAllowedFor(BasePermission):
"""
Like is_submission_allowed_for_request, but in Permission form for use in
the API. If the client is disallowed, a message property specifiying the
reason is set on the permission instance to be returned to the client in
the 403 response.
"""
def has_permission(self, request, view):
checker = UploadRestrictionChecker(request)
if not checker.is_submission_allowed():
self.message = checker.get_error_message()
return False
return True
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view)
|
{
"content_hash": "09bd375d9e986ea1bdebaeccd54be907",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.7155612244897959,
"repo_name": "psiinon/addons-server",
"id": "a69a3d7bdcc7551636545ab14454c225c5edefb4",
"size": "784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/devhub/permissions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "752741"
},
{
"name": "Dockerfile",
"bytes": "4089"
},
{
"name": "HTML",
"bytes": "314894"
},
{
"name": "JavaScript",
"bytes": "947557"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "5192809"
},
{
"name": "Shell",
"bytes": "6712"
},
{
"name": "Smarty",
"bytes": "1418"
},
{
"name": "TSQL",
"bytes": "6926"
}
],
"symlink_target": ""
}
|
from celery.task import task
from .models import Mapping
@task
def feedmapper_sync(mapping_id):
"Grab the requested Mapping and parse it."
try:
mapping = Mapping.objects.get(pk=mapping_id)
mapping.parse()
except Mapping.DoesNotExist:
logger = feedmapper_sync.get_logger()
logger.info("feedmapper_sync failed for mapping with ID %s" % mapping_id)
|
{
"content_hash": "6373771e17dcca4d1fa2312ea2f0d250",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 81,
"avg_line_length": 28.071428571428573,
"alnum_prop": 0.6870229007633588,
"repo_name": "richleland/django-feedmapper",
"id": "dd95b992633937eebf81cb397faa33d394a049e7",
"size": "393",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "feedmapper/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import sys
from SALib.analyze import dgsm
from SALib.sample import finite_diff
from SALib.test_functions import Ishigami
from SALib.util import read_param_file
sys.path.append('../..')
# Read the parameter range file and generate samples
problem = read_param_file('../../SALib/test_functions/params/Ishigami.txt')
# Generate samples
param_values = finite_diff.sample(problem, 1000, delta=0.001)
# Run the "model" -- this will happen offline for external models
Y = Ishigami.evaluate(param_values)
# Perform the sensitivity analysis using the model output
# Specify which column of the output file to analyze (zero-indexed)
Si = dgsm.analyze(problem, param_values, Y, conf_level=0.95, print_to_console=False)
# Returns a dictionary with keys 'vi', 'vi_std', 'dgsm', and 'dgsm_conf'
# e.g. Si['vi'] contains the sensitivity measure for each parameter, in
# the same order as the parameter file
# For comparison, Morris mu* < sqrt(v_i)
# and total order S_tot <= dgsm, following Sobol and Kucherenko (2009)
|
{
"content_hash": "639a631ed3358d03bcfe065503c91cda",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 84,
"avg_line_length": 35.93103448275862,
"alnum_prop": 0.727447216890595,
"repo_name": "t2abdulg/SALib",
"id": "a72d52a550cffc2fbc19f9b5e2a80a808a166b33",
"size": "1042",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/dgsm/dgsm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2239408"
}
],
"symlink_target": ""
}
|
"""Advent of Code 2015 - Day 12."""
import json
import numbers
from pathlib import Path
def read_input():
"""Read input file."""
return Path(__file__).with_name('input.json').read_text()
def traverse_json(data, filter=None):
"""Recursively traverse JSON structure yielding all its elements."""
if filter and filter(data):
return
yield data
# Handle nested structures like dicts/lists and yield their values, too.
if isinstance(data, dict):
for item in data.values():
yield from traverse_json(item, filter=filter)
elif isinstance(data, list):
for item in data:
yield from traverse_json(item, filter=filter)
def is_number(item):
"""Check if the item is a number."""
return isinstance(item, numbers.Number)
def is_red_dict(item):
"""Check if the item is a dict that contains a red value."""
return isinstance(item, dict) and 'red' in item.values()
def main():
"""Main entry point of puzzle solution."""
data = json.loads(read_input())
part_one = sum(filter(is_number, traverse_json(data)))
part_two = sum(filter(is_number, traverse_json(data, filter=is_red_dict)))
print('Part One: {}'.format(part_one))
print('Part Two: {}'.format(part_two))
if __name__ == '__main__':
main()
|
{
"content_hash": "66127373f2dfe02916053c87fc42fa4a",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 26.28,
"alnum_prop": 0.6415525114155252,
"repo_name": "UniqMartin/adventofcode-2015",
"id": "acc26b6e440c5fa59ca9d11d097f169cac222d2f",
"size": "1314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day-12/main.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "468"
},
{
"name": "Pony",
"bytes": "971"
},
{
"name": "Python",
"bytes": "28096"
},
{
"name": "Ruby",
"bytes": "53283"
}
],
"symlink_target": ""
}
|
import pytest
@pytest.fixture(scope='session')
def celery_config():
return {
'broker_url': 'amqp://',
'result_backend': 'rpc'
}
|
{
"content_hash": "a621199ca7edb31a546e7b82d3155fe6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 32,
"avg_line_length": 17.11111111111111,
"alnum_prop": 0.564935064935065,
"repo_name": "nathandaddio/puzzle_app",
"id": "1b441a284a80ac35682fbfe3a80db72d89b9aad7",
"size": "154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "puzzle_engine/puzzle_engine/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4050"
},
{
"name": "HTML",
"bytes": "1590"
},
{
"name": "JavaScript",
"bytes": "14719"
},
{
"name": "Python",
"bytes": "87389"
}
],
"symlink_target": ""
}
|
import datetime
from enum import Enum
from unittest import TestCase
from asserts import assert_true, assert_false, assert_is_none, assert_equal, \
assert_raises
from htmlgen.attribute import (
html_attribute,
boolean_html_attribute,
int_html_attribute,
float_html_attribute,
time_html_attribute,
list_html_attribute,
data_attribute,
css_class_attribute,
enum_attribute,
)
from htmlgen.element import Element
class HTMLAttributeTest(TestCase):
def test_regular(self):
class MyElement(Element):
attr = html_attribute("data-attr")
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = "Foo"
assert_equal("Foo", element.attr)
assert_equal('<div data-attr="Foo"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
def test_regular_with_default(self):
class MyElement(Element):
attr = html_attribute("data-attr", default="Bar")
element = MyElement("div")
assert_equal("Bar", element.attr)
assert_equal("<div></div>", str(element))
element.attr = "Foo"
assert_equal("Foo", element.attr)
assert_equal('<div data-attr="Foo"></div>', str(element))
element.attr = "Bar"
assert_equal("Bar", element.attr)
assert_equal("<div></div>", str(element))
element.attr = None
assert_equal("Bar", element.attr)
assert_equal("<div></div>", str(element))
def test_boolean(self):
class MyElement(Element):
attr = boolean_html_attribute("data-attr")
element = MyElement("div")
assert_false(element.attr)
assert_equal("<div></div>", str(element))
element.attr = True
assert_true(element.attr)
assert_equal('<div data-attr="data-attr"></div>', str(element))
element.attr = False
assert_false(element.attr)
assert_equal("<div></div>", str(element))
def test_integer(self):
class MyElement(Element):
attr = int_html_attribute("data-attr")
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = 42
assert_equal(42, element.attr)
assert_equal('<div data-attr="42"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
def test_integer_with_default(self):
class MyElement(Element):
attr = int_html_attribute("data-attr", default=42)
element = MyElement("div")
assert_equal(42, element.attr)
assert_equal("<div></div>", str(element))
element.attr = 4711
assert_equal(4711, element.attr)
assert_equal('<div data-attr="4711"></div>', str(element))
element.attr = 42
assert_equal(42, element.attr)
assert_equal("<div></div>", str(element))
element.attr = None
assert_equal(42, element.attr)
assert_equal("<div></div>", str(element))
def test_float(self):
class MyElement(Element):
attr = float_html_attribute("data-attr")
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = 4.2
assert_equal(4.2, element.attr)
assert_equal('<div data-attr="4.2"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
def test_float_with_default(self):
class MyElement(Element):
attr = float_html_attribute("data-attr", default=4.2)
element = MyElement("div")
assert_equal(4.2, element.attr)
assert_equal("<div></div>", str(element))
element.attr = 47.11
assert_equal(47.11, element.attr)
assert_equal('<div data-attr="47.11"></div>', str(element))
element.attr = 4.2
assert_equal(4.2, element.attr)
assert_equal("<div></div>", str(element))
element.attr = None
assert_equal(4.2, element.attr)
assert_equal("<div></div>", str(element))
def test_time(self):
class MyElement(Element):
attr = time_html_attribute("data-time")
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = datetime.time(14, 13, 9)
assert_equal(datetime.time(14, 13, 9), element.attr)
assert_equal('<div data-time="14:13:09"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.set_attribute("data-time", "09:33:04")
assert_equal(datetime.time(9, 33, 4), element.attr)
def test_time_with_fraction(self):
class MyElement(Element):
attr = time_html_attribute("data-time")
element = MyElement("div")
element.attr = datetime.time(14, 13, 9, 123456)
assert_equal(datetime.time(14, 13, 9, 123456), element.attr)
assert_equal('<div data-time="14:13:09.123456"></div>', str(element))
def test_time__invalid_value(self):
class MyElement(Element):
attr = time_html_attribute("data-time")
element = MyElement("div")
element.set_attribute("data-time", "INVALID")
assert_is_none(element.attr)
def test_time_with_default(self):
class MyElement(Element):
attr = time_html_attribute(
"data-attr", default=datetime.time(12, 9, 34)
)
element = MyElement("div")
assert_equal(datetime.time(12, 9, 34), element.attr)
assert_equal("<div></div>", str(element))
element.attr = datetime.time(12, 9, 34)
assert_equal(datetime.time(12, 9, 34), element.attr)
assert_equal("<div></div>", str(element))
def test_list(self):
class MyElement(Element):
attr = list_html_attribute("data-attr")
element = MyElement("div")
assert_equal([], element.attr)
element.set_attribute("data-attr", "")
assert_equal([], element.attr)
element.set_attribute("data-attr", "foo,bar")
assert_equal(["foo", "bar"], element.attr)
element.attr = []
assert_equal("<div></div>", str(element))
element.attr = ["abc", "def"]
assert_equal(["abc", "def"], element.attr)
element.attr.append("ghi")
assert_equal(["abc", "def"], element.attr)
assert_equal("abc,def", element.get_attribute("data-attr"))
assert_equal('<div data-attr="abc,def"></div>', str(element))
def test_data(self):
class MyElement(Element):
attr = data_attribute("attr")
element = MyElement("div")
assert_is_none(element.get_attribute("data-attr"))
element.attr = "foo"
assert_equal("foo", element.get_attribute("data-attr"))
element.set_attribute("data-attr", "bar")
assert_equal("bar", element.attr)
def test_data_with_default(self):
class MyElement(Element):
attr = data_attribute("attr", "def")
element = MyElement("div")
element.attr = "def"
assert_is_none(element.get_attribute("data-attr"))
def test_css_class(self):
class MyElement(Element):
attr = css_class_attribute("my-class")
element = MyElement("div")
assert_false(element.attr)
element.add_css_classes("other-class")
assert_false(element.attr)
element.add_css_classes("my-class")
assert_true(element.attr)
element.attr = False
assert_false(element.has_css_class("my-class"))
element.attr = False
assert_false(element.has_css_class("my-class"))
element.attr = True
assert_true(element.has_css_class("my-class"))
element.attr = True
assert_true(element.has_css_class("my-class"))
class TestEnum(Enum):
FOO = "foo"
BAR = "bar"
class EnumAttributeTest(TestCase):
def test_enum(self):
class MyElement(Element):
attr = enum_attribute("attr", TestEnum)
element = MyElement("div")
assert_is_none(element.attr)
assert_equal("<div></div>", str(element))
element.attr = TestEnum.BAR
assert_equal(TestEnum.BAR, element.attr)
assert_equal('<div attr="bar"></div>', str(element))
element.attr = None
assert_is_none(element.attr)
assert_equal('<div></div>', str(element))
def test_default(self):
class MyElement(Element):
attr = enum_attribute("attr", TestEnum, default=TestEnum.FOO)
element = MyElement("div")
assert_equal(TestEnum.FOO, element.attr)
assert_equal("<div></div>", str(element))
element.attr = TestEnum.BAR
assert_equal(TestEnum.BAR, element.attr)
assert_equal('<div attr="bar"></div>', str(element))
element.attr = None
assert_equal(TestEnum.FOO, element.attr)
assert_equal("<div></div>", str(element))
def test_not_an_enum(self):
with assert_raises(TypeError):
class MyElement(Element):
attr = enum_attribute("attr", "foo") # type: ignore
def test_invalid_value(self):
class MyElement(Element):
attr = enum_attribute("attr", TestEnum)
element = MyElement("div")
with assert_raises(TypeError):
element.attr = "foo" # type: ignore
|
{
"content_hash": "90542ea2575baffa902c2d4f12e3655b",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 78,
"avg_line_length": 34.89247311827957,
"alnum_prop": 0.5888032871083718,
"repo_name": "srittau/python-htmlgen",
"id": "19af34a60540cbd4ed1cd3b3595dc00133192958",
"size": "9735",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test_htmlgen/attribute.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "179421"
}
],
"symlink_target": ""
}
|
"""pkgsinfo module tests."""
import httplib
import logging
import mox
import stubout
from google.apputils import app
from google.apputils import basetest
from tests.simian.mac.common import test
from simian.mac.munki.handlers import pkgsinfo
class MunkiPackageInfoPlistStrictTest(mox.MoxTestBase):
"""Test MunkiPackageInfoPlistStrict class."""
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
self.mpl = pkgsinfo.MunkiPackageInfoPlistStrict()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testInit(self):
"""Tests that __init__ added the package exists validation hook."""
self.assertTrue(
self.mpl.ValidatePackageExists in self.mpl._validation_hooks)
def testValidatePackageExists(self):
"""Tests the ValidatePackageExists() method."""
package = 'package_location'
self.mpl._plist = {'installer_item_location': package}
self.stubs.Set(
pkgsinfo.pkgs, 'PackageExists', self.mox.CreateMockAnything())
pkgsinfo.pkgs.PackageExists(package).AndReturn(True)
pkgsinfo.pkgs.PackageExists(package).AndReturn(False)
self.mox.ReplayAll()
self.assertEqual(None, self.mpl.ValidatePackageExists())
self.assertRaises(
pkgsinfo.PackageDoesNotExistError, self.mpl.ValidatePackageExists)
self.mox.VerifyAll()
class PackagesInfoTest(test.RequestHandlerTest):
"""Test PackagesInfo webapp request handler."""
def GetTestClassInstance(self):
return pkgsinfo.PackagesInfo()
def GetTestClassModule(self):
return pkgsinfo
def _MockObtainLock(self, lock, obtain=True, timeout=None):
"""Mock ObtainLock().
Args:
lock: str, lock name
obtain: bool, default True, whether to obtain it or not
timeout: int, timeout value to ObtainLock with
"""
if not hasattr(self, '_mock_obtainlock'):
self.mox.StubOutWithMock(pkgsinfo.gae_util, 'ObtainLock')
self._mock_obtainlock = True
if timeout is not None:
pkgsinfo.gae_util.ObtainLock(lock, timeout=timeout).AndReturn(obtain)
else:
pkgsinfo.gae_util.ObtainLock(lock).AndReturn(obtain)
def _MockReleaseLock(self, lock):
"""Mock ReleaseLock().
Args:
lock: str, lock name
"""
if not hasattr(self, '_mock_releaselock'):
self.mox.StubOutWithMock(pkgsinfo.gae_util, 'ReleaseLock')
self._mock_releaselock = True
pkgsinfo.gae_util.ReleaseLock(lock).AndReturn(None)
def testHash(self):
"""Test _Hash()."""
self.stubs.Set(
pkgsinfo.hashlib, 'sha256',
self.mox.CreateMock(pkgsinfo.hashlib.sha256))
s = 'foo'
h = self.mox.CreateMockAnything()
h.hexdigest().AndReturn('hexfoo')
pkgsinfo.hashlib.sha256(s).AndReturn(h)
self.mox.ReplayAll()
self.assertEqual(self.c._Hash(s), 'hexfoo')
self.mox.VerifyAll()
def testGetSuccessWithFilenameAndNoHash(self):
"""Test get() with success with a filename but no hash."""
filename = 'pkg name.dmg'
filename_quoted = 'pkg%20name.dmg'
self.MockDoAnyAuth()
self.request.get('hash').AndReturn('')
pkginfo = self.MockModelStatic('PackageInfo', 'get_by_key_name', filename)
pkginfo.plist = 'plist'
self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
self.response.out.write(pkginfo.plist).AndReturn(None)
self.mox.ReplayAll()
self.c.get(filename_quoted)
self.mox.VerifyAll()
def testGetSuccessWithFilenameAndHash(self):
"""Test get() with success when hash header is requested."""
filename = 'pkg name.dmg'
filename_quoted = 'pkg%20name.dmg'
self.MockDoAnyAuth()
self.request.get('hash').AndReturn('1')
self._MockObtainLock('pkgsinfo_%s' % filename, timeout=5.0)
pkginfo = self.MockModelStatic('PackageInfo', 'get_by_key_name', filename)
pkginfo.plist = 'plist'
self.mox.StubOutWithMock(self.c, '_Hash')
self.c._Hash(pkginfo.plist).AndReturn('hash')
self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
self.response.headers['X-Pkgsinfo-Hash'] = 'hash'
self.response.out.write(pkginfo.plist).AndReturn(None)
self._MockReleaseLock('pkgsinfo_%s' % filename)
self.mox.ReplayAll()
self.c.get(filename_quoted)
self.mox.VerifyAll()
def testGetSuccessWhenHashLockFail(self):
"""Test get() with success when hash header is requested and lock fails."""
filename = 'pkg name.dmg'
filename_quoted = 'pkg%20name.dmg'
self.MockDoAnyAuth()
self.request.get('hash').AndReturn('1')
self._MockObtainLock('pkgsinfo_%s' % filename, timeout=5.0, obtain=False)
self.response.set_status(httplib.FORBIDDEN).AndReturn(None)
self.response.out.write('Could not lock pkgsinfo').AndReturn(None)
self.mox.ReplayAll()
self.c.get(filename_quoted)
self.mox.VerifyAll()
def testGetFailAuth(self):
"""Test get() with auth failure."""
self.MockDoAnyAuth(fail=True)
self.mox.ReplayAll()
self.assertRaises(
pkgsinfo.gaeserver.base.NotAuthenticated,
self.c.get,
'x')
self.mox.VerifyAll()
def testGetFailPackageNoExist(self):
"""Test get() with failure."""
filename = 'pkgnamenotfound.dmg'
self.MockDoAnyAuth()
self.MockModelStaticBase(
'PackageInfo', 'get_by_key_name', filename).AndReturn(None)
self.request.get('hash').AndReturn(None)
self.response.set_status(httplib.NOT_FOUND).AndReturn(None)
self.mox.ReplayAll()
self.c.get(filename)
self.mox.VerifyAll()
def testGetFailPackageAndHashNoExist(self):
"""Test get() with failure."""
filename = 'pkgnamenotfound.dmg'
self.MockDoAnyAuth()
self._MockObtainLock('pkgsinfo_%s' % filename, timeout=5.0, obtain=True)
self.MockModelStaticBase(
'PackageInfo', 'get_by_key_name', filename).AndReturn(None)
self.request.get('hash').AndReturn('1')
self._MockReleaseLock('pkgsinfo_%s' % filename)
self.response.set_status(httplib.NOT_FOUND).AndReturn(None)
self.mox.ReplayAll()
self.c.get(filename)
self.mox.VerifyAll()
def testPutFailAuth(self):
"""Test put() with auth failure."""
self.MockDoMunkiAuth(
fail=True, require_level=pkgsinfo.gaeserver.LEVEL_UPLOADPKG)
self.mox.ReplayAll()
self.assertRaises(
pkgsinfo.gaeserver.NotAuthenticated,
self.c.put,
'x')
self.mox.VerifyAll()
def testGetSuccessWithQueryParams(self):
"""Test get() pkg list with success."""
install_types = ['managed_installs', 'managed_updates']
catalogs = ['stable', 'testing']
mock_pkg = self.mox.CreateMockAnything()
mock_pkg_properties = ['name', 'foo']
mock_pkg.name = 'fooname'
mock_pkg.foo = 'foofoo'
pkgs = [{'name': mock_pkg.name, 'foo': mock_pkg.foo}]
self.mox.StubOutWithMock(pkgsinfo.plist, 'GetXmlStr')
mock_user = self.mox.CreateMockAnything()
self.MockDoAnyAuth(and_return=mock_user)
mock_user.email().AndReturn('foo@example.com')
self.mox.StubOutWithMock(pkgsinfo.auth, 'IsAdminUser')
self.mox.StubOutWithMock(pkgsinfo.auth, 'IsSupportUser')
pkgsinfo.auth.IsAdminUser('foo@example.com').AndReturn(False)
pkgsinfo.auth.IsSupportUser('foo@example.com').AndReturn(True)
mock_query = self.MockModelStatic('PackageInfo', 'all')
self.request.get('filename').AndReturn('')
self.request.get_all('install_types').AndReturn(install_types)
self.request.get_all('catalogs').AndReturn(catalogs)
for t in install_types:
mock_query.filter('install_types =', t)
for t in catalogs:
mock_query.filter('catalogs =', t)
mock_iter = self.mox.CreateMockAnything()
mock_query.__iter__().AndReturn(mock_iter)
mock_iter.next().AndReturn(mock_pkg)
mock_pkg.properties().AndReturn(mock_pkg_properties)
mock_iter.next().AndRaise(StopIteration)
self.response.out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
pkgsinfo.plist.GetXmlStr(pkgs).AndReturn('XML')
self.response.out.write('XML')
self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
self.mox.ReplayAll()
self.c.get(None)
self.mox.VerifyAll()
def testPutFailInputNotParseable(self):
"""Test put() with input that isn't parseable as a plist."""
filename = 'pkgname.dmg'
body = 'junk'
self.request.body = body
self.request.get('hash').AndReturn(None)
self.request.get('catalogs', None).AndReturn('anything')
self.request.get('manifests', None).AndReturn('anything')
self.request.get('install_types').AndReturn('anything')
mock_mpl = self.mox.CreateMockAnything()
self.MockDoMunkiAuth(require_level=pkgsinfo.gaeserver.LEVEL_UPLOADPKG)
self.mox.StubOutWithMock(pkgsinfo, 'MunkiPackageInfoPlistStrict')
pkgsinfo.MunkiPackageInfoPlistStrict(body).AndReturn(mock_mpl)
exc = pkgsinfo.plist.MalformedPlistError('foo error')
mock_mpl.Parse().AndRaise(exc)
self.response.set_status(httplib.BAD_REQUEST).AndReturn(None)
self.response.out.write('foo error').AndReturn(None)
self.mox.ReplayAll()
self.c.put(filename)
self.mox.VerifyAll()
def testPutFailInputMissingFields(self):
"""Test put() with input that isn't parseable as a plist."""
filename = 'pkgname.dmg'
body = 'junk'
self.request.body = body
self.request.get('hash').AndReturn(None)
self.request.get('catalogs', None).AndReturn('anything')
self.request.get('manifests', None).AndReturn('anything')
self.request.get('install_types').AndReturn('anything')
mock_mpl = self.mox.CreateMockAnything()
self.MockDoMunkiAuth(require_level=pkgsinfo.gaeserver.LEVEL_UPLOADPKG)
self.mox.StubOutWithMock(pkgsinfo, 'MunkiPackageInfoPlistStrict')
pkgsinfo.MunkiPackageInfoPlistStrict(body).AndReturn(mock_mpl)
exc = pkgsinfo.plist.InvalidPlistError('foo error')
mock_mpl.Parse().AndRaise(exc)
self.response.set_status(httplib.BAD_REQUEST).AndReturn(None)
self.response.out.write('foo error').AndReturn(None)
self.mox.ReplayAll()
self.c.put(filename)
self.mox.VerifyAll()
def testPutFailPackageNoExist(self):
"""Test put() with valid input params, but package does not exist."""
filename = 'pkgname.dmg'
body = '<fakexml>blabla</fakexml>'
self.request.body = body
self.request.get('hash').AndReturn(None)
self.request.get('catalogs', None).AndReturn('anything')
self.request.get('manifests', None).AndReturn('anything')
self.request.get('install_types').AndReturn('anything')
mock_mpl = self.mox.CreateMockAnything()
self.MockDoMunkiAuth(require_level=pkgsinfo.gaeserver.LEVEL_UPLOADPKG)
self.mox.StubOutWithMock(pkgsinfo, 'MunkiPackageInfoPlistStrict')
pkgsinfo.MunkiPackageInfoPlistStrict(body).AndReturn(mock_mpl)
exc = pkgsinfo.PackageDoesNotExistError('foo error')
mock_mpl.Parse().AndRaise(exc)
self.response.set_status(httplib.BAD_REQUEST).AndReturn(None)
self.response.out.write('foo error').AndReturn(None)
self.mox.ReplayAll()
self.c.put(filename)
self.mox.VerifyAll()
def testPutOnNonexistentPackageInfo(self):
"""Test put() with valid input params, but non-existent pkginfo."""
filename = 'pkgname.dmg'
body = '<fakexml>blabla</fakexml>'
pkgloc = '/package/location.pkg'
self.request.body = body
catalogs = ['catalog1', 'catalog2']
manifests = ['manifest1', 'manifest2']
install_types = ['type1', 'type2']
self.request.get('hash').AndReturn(None)
self.request.get('catalogs', None).AndReturn(','.join(catalogs))
self.request.get('manifests', None).AndReturn(','.join(manifests))
self.request.get('install_types').AndReturn(','.join(install_types))
mock_mpl = self.mox.CreateMockAnything()
self.MockDoMunkiAuth(require_level=pkgsinfo.gaeserver.LEVEL_UPLOADPKG)
self.mox.StubOutWithMock(pkgsinfo, 'MunkiPackageInfoPlistStrict')
pkgsinfo.MunkiPackageInfoPlistStrict(body).AndReturn(mock_mpl)
mock_mpl.Parse().AndReturn(None)
self._MockObtainLock('pkgsinfo_%s' % filename, timeout=5.0)
self.MockModelStaticBase(
'PackageInfo', 'get_by_key_name', filename).AndReturn(None)
self.response.set_status(httplib.FORBIDDEN).AndReturn(None)
self.response.out.write('Only updates supported')
self._MockReleaseLock('pkgsinfo_%s' % filename)
self.mox.ReplayAll()
self.c.put(filename)
self.mox.VerifyAll()
def testPutSuccess(self):
"""Test put() with valid input params, giving success."""
filename = 'pkg name.dmg'
filename_quoted = 'pkg%20name.dmg'
name = 'foo pkg name'
body = '<fakexml>blabla</fakexml>'
self.request.body = body
catalogs = ['catalog1', 'catalog2']
manifests = ['manifest1', 'manifest2']
install_types = ['type1', 'type2']
user = 'foouser'
self.request.get('hash').AndReturn(None)
self.request.get('catalogs', None).AndReturn(','.join(catalogs))
self.request.get('manifests', None).AndReturn(','.join(manifests))
self.request.get('install_types').AndReturn(','.join(install_types))
mock_mpl = self.mox.CreateMockAnything()
session = self.mox.CreateMockAnything()
session.uuid = user
self.MockDoMunkiAuth(
require_level=pkgsinfo.gaeserver.LEVEL_UPLOADPKG, and_return=session)
self.mox.StubOutWithMock(pkgsinfo, 'MunkiPackageInfoPlistStrict')
pkgsinfo.MunkiPackageInfoPlistStrict(body).AndReturn(mock_mpl)
mock_mpl.Parse().AndReturn(None)
self._MockObtainLock('pkgsinfo_%s' % filename, timeout=5.0)
pkginfo = self.MockModelStatic('PackageInfo', 'get_by_key_name', filename)
pkginfo.IsSafeToModify().AndReturn(True)
pkginfo.name = mock_mpl.GetPackageName().AndReturn(name)
pkginfo.put()
self._MockReleaseLock('pkgsinfo_%s' % filename)
self.mox.StubOutWithMock(pkgsinfo.models.Catalog, 'Generate')
for catalog in catalogs:
pkgsinfo.models.Catalog.Generate(catalog, delay=1).AndReturn(None)
mock_mpl.GetXml().AndReturn(body)
mock_log = self.MockModel(
'AdminPackageLog', user=user, action='pkginfo', filename=filename,
catalogs=catalogs, manifests=manifests, install_types=install_types,
plist=body)
mock_log.put().AndReturn(None)
self.mox.ReplayAll()
self.c.put(filename_quoted)
self.assertEqual(pkginfo.plist, mock_mpl)
self.assertEqual(pkginfo.name, name)
self.assertEqual(pkginfo.catalogs, catalogs)
self.assertEqual(pkginfo.manifests, manifests)
self.assertEqual(pkginfo.install_types, install_types)
self.mox.VerifyAll()
def testPutSuccessWhenManifestsIsIntentionalEmptyList(self):
"""Test put() with a manifest value that sets the manifest list to []."""
filename = 'pkg name.dmg'
filename_quoted = 'pkg%20name.dmg'
name = 'foo pkg name'
body = '<fakexml>blabla</fakexml>'
self.request.body = body
catalogs = ['catalog1', 'catalog2']
manifests = [] # this pkg is in no manifests
install_types = ['type1', 'type2']
user = 'foouser'
self.request.get('hash').AndReturn(None)
self.request.get('catalogs', None).AndReturn(','.join(catalogs))
self.request.get('manifests', None).AndReturn('') # == NO manifests
self.request.get('install_types').AndReturn(','.join(install_types))
mock_mpl = self.mox.CreateMockAnything()
session = self.mox.CreateMockAnything()
session.uuid = user
self.MockDoMunkiAuth(
require_level=pkgsinfo.gaeserver.LEVEL_UPLOADPKG, and_return=session)
self.mox.StubOutWithMock(pkgsinfo, 'MunkiPackageInfoPlistStrict')
pkgsinfo.MunkiPackageInfoPlistStrict(body).AndReturn(mock_mpl)
mock_mpl.Parse().AndReturn(None)
self._MockObtainLock('pkgsinfo_%s' % filename, timeout=5.0)
pkginfo = self.MockModelStatic('PackageInfo', 'get_by_key_name', filename)
pkginfo.IsSafeToModify().AndReturn(True)
pkginfo.name = mock_mpl.GetPackageName().AndReturn(name)
pkginfo.put()
self._MockReleaseLock('pkgsinfo_%s' % filename)
self.mox.StubOutWithMock(pkgsinfo.models.Catalog, 'Generate')
for catalog in catalogs:
pkgsinfo.models.Catalog.Generate(catalog, delay=1).AndReturn(None)
mock_mpl.GetXml().AndReturn(body)
mock_log = self.MockModel(
'AdminPackageLog', user=user, action='pkginfo', filename=filename,
catalogs=catalogs, manifests=manifests, install_types=install_types,
plist=body)
mock_log.put().AndReturn(None)
self.mox.ReplayAll()
self.c.put(filename_quoted)
self.assertEqual(pkginfo.plist, mock_mpl)
self.assertEqual(pkginfo.name, name)
self.assertEqual(pkginfo.catalogs, catalogs)
self.assertEqual(pkginfo.manifests, manifests)
self.assertEqual(pkginfo.install_types, install_types)
self.mox.VerifyAll()
def testPutSuccessWhenNoManifestsValueSpecified(self):
"""Test put() with no manifest value specified, resulting in no change."""
filename = 'pkg name.dmg'
filename_quoted = 'pkg%20name.dmg'
name = 'foo pkg name'
body = '<fakexml>blabla</fakexml>'
self.request.body = body
catalogs = ['catalog1', 'catalog2']
manifests = None
install_types = ['type1', 'type2']
user = 'foouser'
self.request.get('hash').AndReturn(None)
self.request.get('catalogs', None).AndReturn(','.join(catalogs))
self.request.get('manifests', None).AndReturn(None) # == no value provided
self.request.get('install_types').AndReturn(','.join(install_types))
mock_mpl = self.mox.CreateMockAnything()
session = self.mox.CreateMockAnything()
session.uuid = user
self.MockDoMunkiAuth(
require_level=pkgsinfo.gaeserver.LEVEL_UPLOADPKG, and_return=session)
self.mox.StubOutWithMock(pkgsinfo, 'MunkiPackageInfoPlistStrict')
pkgsinfo.MunkiPackageInfoPlistStrict(body).AndReturn(mock_mpl)
mock_mpl.Parse().AndReturn(None)
self._MockObtainLock('pkgsinfo_%s' % filename, timeout=5.0)
pkginfo = self.MockModelStatic('PackageInfo', 'get_by_key_name', filename)
pkginfo.IsSafeToModify().AndReturn(True)
pkginfo.name = mock_mpl.GetPackageName().AndReturn(name)
pkginfo.put()
self._MockReleaseLock('pkgsinfo_%s' % filename)
self.mox.StubOutWithMock(pkgsinfo.models.Catalog, 'Generate')
for catalog in catalogs:
pkgsinfo.models.Catalog.Generate(catalog, delay=1).AndReturn(None)
mock_mpl.GetXml().AndReturn(body)
mock_log = self.MockModel(
'AdminPackageLog', user=user, action='pkginfo', filename=filename,
catalogs=catalogs, manifests=pkginfo.manifests,
install_types=install_types, plist=body)
mock_log.put().AndReturn(None)
self.mox.ReplayAll()
self.c.put(filename_quoted)
self.assertEqual(pkginfo.plist, mock_mpl)
self.assertEqual(pkginfo.name, name)
self.assertEqual(pkginfo.catalogs, catalogs)
# since the tested function did not set a pkginfo.manifests value
# then this value is still MockMethod since pkginfo is a mock.
self.assertNotEqual(pkginfo.manifests, manifests)
self.assertEqual(pkginfo.install_types, install_types)
self.mox.VerifyAll()
def testPutSuccessWhenHash(self):
"""Test put() with valid input params, giving success."""
filename = 'pkg name.dmg'
filename_quoted = 'pkg%20name.dmg'
name = 'foo pkg name'
body = '<fakexml>blabla</fakexml>'
self.request.body = body
catalogs = ['catalog1', 'catalog2']
manifests = ['manifest1', 'manifest2']
install_types = ['type1', 'type2']
user = 'foouser'
self.request.get('hash').AndReturn('goodhash')
self.request.get('catalogs', None).AndReturn(','.join(catalogs))
self.request.get('manifests', None).AndReturn(','.join(manifests))
self.request.get('install_types').AndReturn(','.join(install_types))
mock_mpl = self.mox.CreateMockAnything()
session = self.mox.CreateMockAnything()
session.uuid = user
self.MockDoMunkiAuth(
require_level=pkgsinfo.gaeserver.LEVEL_UPLOADPKG, and_return=session)
self.mox.StubOutWithMock(pkgsinfo, 'MunkiPackageInfoPlistStrict')
pkgsinfo.MunkiPackageInfoPlistStrict(body).AndReturn(mock_mpl)
mock_mpl.Parse().AndReturn(None)
self._MockObtainLock('pkgsinfo_%s' % filename, timeout=5.0)
pkginfo = self.MockModelStatic('PackageInfo', 'get_by_key_name', filename)
pkginfo.IsSafeToModify().AndReturn(True)
self.mox.StubOutWithMock(self.c, '_Hash')
self.c._Hash(pkginfo.plist).AndReturn('goodhash')
pkginfo.name = mock_mpl.GetPackageName().AndReturn(name)
pkginfo.put()
self._MockReleaseLock('pkgsinfo_%s' % filename)
self.mox.StubOutWithMock(pkgsinfo.models.Catalog, 'Generate')
for catalog in catalogs:
pkgsinfo.models.Catalog.Generate(catalog, delay=1).AndReturn(None)
mock_mpl.GetXml().AndReturn(body)
mock_log = self.MockModel(
'AdminPackageLog', user=user, action='pkginfo', filename=filename,
catalogs=catalogs, manifests=manifests, install_types=install_types,
plist=body)
mock_log.put().AndReturn(None)
self.mox.ReplayAll()
self.c.put(filename_quoted)
self.assertEqual(pkginfo.plist, mock_mpl)
self.assertEqual(pkginfo.name, name)
self.assertEqual(pkginfo.catalogs, catalogs)
self.assertEqual(pkginfo.manifests, manifests)
self.assertEqual(pkginfo.install_types, install_types)
self.mox.VerifyAll()
def testPutWhenHashFail(self):
"""Test put() with valid input params, giving success."""
filename = 'pkg name.dmg'
filename_quoted = 'pkg%20name.dmg'
body = '<fakexml>blabla</fakexml>'
self.request.body = body
catalogs = ['catalog1', 'catalog2']
manifests = ['manifest1', 'manifest2']
install_types = ['type1', 'type2']
self.request.get('hash').AndReturn('goodhash')
self.request.get('catalogs', None).AndReturn(','.join(catalogs))
self.request.get('manifests', None).AndReturn(','.join(manifests))
self.request.get('install_types').AndReturn(','.join(install_types))
mock_mpl = self.mox.CreateMockAnything()
self.MockDoMunkiAuth(require_level=pkgsinfo.gaeserver.LEVEL_UPLOADPKG)
self.mox.StubOutWithMock(pkgsinfo, 'MunkiPackageInfoPlistStrict')
pkgsinfo.MunkiPackageInfoPlistStrict(body).AndReturn(mock_mpl)
mock_mpl.Parse().AndReturn(None)
self._MockObtainLock('pkgsinfo_%s' % filename, timeout=5.0)
pkginfo = self.MockModelStatic('PackageInfo', 'get_by_key_name', filename)
pkginfo.IsSafeToModify().AndReturn(True)
self.mox.StubOutWithMock(self.c, '_Hash')
pkginfo.plist = 'foo'
self.c._Hash(pkginfo.plist).AndReturn('otherhash')
self.response.set_status(httplib.CONFLICT).AndReturn(None)
self.response.out.write('Update hash does not match').AndReturn(None)
self._MockReleaseLock('pkgsinfo_%s' % filename)
self.mox.ReplayAll()
self.c.put(filename_quoted)
self.mox.VerifyAll()
def testPutWhenNotModifiableAndPkginfoChanged(self):
"""Test put() when pkginfo is not modifiable and pkginfo changed."""
filename = 'pkg name.dmg'
filename_quoted = 'pkg%20name.dmg'
name = 'foo pkg name'
body = '<fakexml>blabla</fakexml>'
pkgloc = '/package/location.pkg'
pkgdict = {'installer_item_location': pkgloc}
self.request.body = body
catalogs = ['catalog1', 'catalog2']
manifests = ['manifest1', 'manifest2']
install_types = ['type1', 'type2']
self.request.get('hash').AndReturn('goodhash')
self.request.get('catalogs', None).AndReturn(','.join(catalogs))
self.request.get('manifests', None).AndReturn(','.join(manifests))
self.request.get('install_types').AndReturn(','.join(install_types))
mock_mpl = self.mox.CreateMockAnything()
self.MockDoMunkiAuth(require_level=pkgsinfo.gaeserver.LEVEL_UPLOADPKG)
self.mox.StubOutWithMock(pkgsinfo, 'MunkiPackageInfoPlistStrict')
pkgsinfo.MunkiPackageInfoPlistStrict(body).AndReturn(mock_mpl)
mock_mpl.Parse().AndReturn(None)
self._MockObtainLock('pkgsinfo_%s' % filename, timeout=5.0)
pkginfo = self.MockModelStatic('PackageInfo', 'get_by_key_name', filename)
pkginfo.plist = 'foo'
pkginfo.IsSafeToModify().AndReturn(False)
mock_mpl.EqualIgnoringManifestsAndCatalogs(pkginfo.plist).AndReturn(
False)
self.response.set_status(httplib.FORBIDDEN).AndReturn(None)
self.response.out.write('Changes to pkginfo not allowed').AndReturn(
None)
self._MockReleaseLock('pkgsinfo_%s' % filename)
self.mox.ReplayAll()
self.c.put(filename_quoted)
self.mox.VerifyAll()
def testPutWhenNotModifiableButOnlyManifestsChanged(self):
"""Test put() when pkginfo is not modifiable but only manifests changed."""
filename = 'pkg name.dmg'
filename_quoted = 'pkg%20name.dmg'
name = 'foo pkg name'
body = '<fakexml>blabla</fakexml>'
pkgloc = '/package/location.pkg'
pkgdict = {'installer_item_location': pkgloc}
self.request.body = body
catalogs = ['catalog1', 'catalog2']
manifests = ['manifest1', 'manifest2']
install_types = ['type1', 'type2']
self.request.get('hash').AndReturn('goodhash')
self.request.get('catalogs', None).AndReturn(','.join(catalogs))
self.request.get('manifests', None).AndReturn(','.join(manifests))
self.request.get('install_types').AndReturn(','.join(install_types))
mock_mpl = self.mox.CreateMockAnything()
self.MockDoMunkiAuth(require_level=pkgsinfo.gaeserver.LEVEL_UPLOADPKG)
self.mox.StubOutWithMock(pkgsinfo, 'MunkiPackageInfoPlistStrict')
pkgsinfo.MunkiPackageInfoPlistStrict(body).AndReturn(mock_mpl)
mock_mpl.Parse().AndReturn(None)
self._MockObtainLock('pkgsinfo_%s' % filename, timeout=5.0)
pkginfo = self.MockModelStatic('PackageInfo', 'get_by_key_name', filename)
pkginfo.plist = 'foo'
pkginfo.IsSafeToModify().AndReturn(False)
mock_mpl.EqualIgnoringManifestsAndCatalogs(pkginfo.plist).AndReturn(
True)
# we've previously tested past hash check, so bail there.
self.mox.StubOutWithMock(self.c, '_Hash')
self.c._Hash(pkginfo.plist).AndReturn('otherhash')
self.response.set_status(httplib.CONFLICT).AndReturn(None)
self.response.out.write('Update hash does not match').AndReturn(None)
self._MockReleaseLock('pkgsinfo_%s' % filename)
self.mox.ReplayAll()
self.c.put(filename_quoted)
self.mox.VerifyAll()
logging.basicConfig(filename='/dev/null')
def main(unused_argv):
test.main(unused_argv)
if __name__ == '__main__':
app.run()
|
{
"content_hash": "861e5f4bc2a0528983aa883f4a8e1393",
"timestamp": "",
"source": "github",
"line_count": 673,
"max_line_length": 79,
"avg_line_length": 39.21693907875186,
"alnum_prop": 0.7008297654681166,
"repo_name": "sillywilly42/simian",
"id": "4ae1b2a2e42dde9d6670772a6b7bf0d8c7e0d0bd",
"size": "27015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/simian/mac/munki/handlers/pkgsinfo_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "38117"
},
{
"name": "HTML",
"bytes": "96126"
},
{
"name": "JavaScript",
"bytes": "34481"
},
{
"name": "Makefile",
"bytes": "7246"
},
{
"name": "Python",
"bytes": "1402979"
},
{
"name": "Shell",
"bytes": "20790"
}
],
"symlink_target": ""
}
|
"""
We are given the head node root of a binary tree, where additionally every
node's value is either a 0 or a 1.
Return the same tree where every subtree (of the given tree) not containing a 1
has been removed.
(Recall that the subtree of a node X is X, plus every node that is a descendant
of X.)
Example 1:
Input: [1,null,0,0,1]
Output: [1,null,0,null,1]
Explanation:
Only the red nodes satisfy the property "every subtree not containing a 1".
The diagram on the right represents the answer.
Example 2:
Input: [1,0,1,0,0,0,1]
Output: [1,null,1,null,1]
Example 3:
Input: [1,1,0,1,1,0,1,0]
Output: [1,1,0,1,1,null,1]
Note:
The binary tree will have at most 100 nodes.
The value of each node will only be 0 or 1.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from typing import Tuple
class Solution:
def pruneTree(self, root: TreeNode) -> TreeNode:
root, _ = self.prune(root)
return root
def prune(self, node) -> Tuple[TreeNode, bool]:
if not node:
return None, False
node.left, contain_left = self.prune(node.left)
node.right, contain_right = self.prune(node.right)
if not contain_left and not contain_right and node.val == 0:
return None, False
return node, True
|
{
"content_hash": "8e0e8e76f401f9cff4357c99797cd52b",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 79,
"avg_line_length": 21.640625,
"alnum_prop": 0.6548736462093863,
"repo_name": "algorhythms/LeetCode",
"id": "7a969e1f03107a24ef958c95ecaa494fbc927671",
"size": "1404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "814 Binary Tree Pruning.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1444167"
}
],
"symlink_target": ""
}
|
import os
import unittest
from argparse import Namespace
from contextlib import contextmanager
from datetime import datetime
from airflow.utils import cli, cli_action_loggers
class CliUtilTest(unittest.TestCase):
def test_metrics_build(self):
func_name = 'test'
exec_date = datetime.utcnow()
ns = Namespace(dag_id='foo', task_id='bar',
subcommand='test', execution_date=exec_date)
metrics = cli._build_metrics(func_name, ns)
expected = {'user': os.environ.get('USER'),
'sub_command': 'test',
'dag_id': 'foo',
'task_id': 'bar',
'execution_date': exec_date}
for k, v in expected.items():
self.assertEqual(v, metrics.get(k))
self.assertTrue(metrics.get('start_datetime') <= datetime.utcnow())
self.assertTrue(metrics.get('full_command'))
log_dao = metrics.get('log')
self.assertTrue(log_dao)
self.assertEqual(log_dao.dag_id, metrics.get('dag_id'))
self.assertEqual(log_dao.task_id, metrics.get('task_id'))
self.assertEqual(log_dao.execution_date, metrics.get('execution_date'))
self.assertEqual(log_dao.owner, metrics.get('user'))
def test_fail_function(self):
"""
Actual function is failing and fail needs to be propagated.
:return:
"""
with self.assertRaises(NotImplementedError):
fail_func(Namespace())
def test_success_function(self):
"""
Test success function but with failing callback.
In this case, failure should not propagate.
:return:
"""
with fail_action_logger_callback():
success_func(Namespace())
@contextmanager
def fail_action_logger_callback():
"""
Adding failing callback and revert it back when closed.
:return:
"""
tmp = cli_action_loggers.__pre_exec_callbacks[:]
def fail_callback(**_):
raise NotImplementedError
cli_action_loggers.register_pre_exec_callback(fail_callback)
yield
cli_action_loggers.__pre_exec_callbacks = tmp
@cli.action_logging
def fail_func(_):
raise NotImplementedError
@cli.action_logging
def success_func(_):
pass
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "e52e8e1bb8b58e95626419e3226010bc",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 28.329268292682926,
"alnum_prop": 0.6121394748170469,
"repo_name": "fenglu-g/incubator-airflow",
"id": "6a411faba7ed5deecefed6d141a8a23fd8626b08",
"size": "3137",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/utils/test_cli_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "3634"
},
{
"name": "HTML",
"bytes": "129454"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5852162"
},
{
"name": "Shell",
"bytes": "41793"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20170316_1029'),
]
operations = [
migrations.AlterField(
model_name='author',
name='name',
field=models.CharField(default='Unknow', max_length=100),
),
migrations.AlterField(
model_name='book',
name='publication_date',
field=models.DateField(blank=True, null=True, verbose_name='Publication date'),
),
]
|
{
"content_hash": "7cb54777bbb7b12c9dbed4ad84aef9ef",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 91,
"avg_line_length": 25.82608695652174,
"alnum_prop": 0.5841750841750841,
"repo_name": "jbuisine/MediaLibrary-RestFullAPI",
"id": "d23945a8e79deee4ecdadb98c130252d7796b602",
"size": "667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/apps/core/migrations/0003_auto_20170329_1732.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "460"
},
{
"name": "HTML",
"bytes": "12364"
},
{
"name": "JavaScript",
"bytes": "4514"
},
{
"name": "Python",
"bytes": "14713"
}
],
"symlink_target": ""
}
|
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import PrincipalPermissionsMixin
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.util.decorators import strict_classproperty
from indico.util.string import format_repr
class TrackPrincipal(PrincipalPermissionsMixin, db.Model):
__tablename__ = 'track_principals'
principal_backref_name = 'in_track_acls'
principal_for = 'Track'
unique_columns = ('track_id',)
allow_event_roles = True
allow_category_roles = True
@declared_attr
def __table_args__(cls):
return auto_table_args(cls, schema='events')
@strict_classproperty
@staticmethod
def __auto_table_args():
return (
db.CheckConstraint('NOT read_access', 'no_read_access'),
db.CheckConstraint('NOT full_access', 'no_full_access')
)
id = db.Column(
db.Integer,
primary_key=True
)
track_id = db.Column(
db.Integer,
db.ForeignKey('events.tracks.id'),
nullable=False,
index=True
)
# relationship backrefs:
# - track (Track.acl_entries)
def __repr__(self):
return format_repr(self, 'id', 'track_id', 'principal', permissions=[])
|
{
"content_hash": "e6e9900fcd0feefb11587b22d9b4482d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 29.155555555555555,
"alnum_prop": 0.6585365853658537,
"repo_name": "DirkHoffmann/indico",
"id": "36694fb2cea9971afd802d9214806e77f60f6bc4",
"size": "1526",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/modules/events/tracks/models/principals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33249"
},
{
"name": "HTML",
"bytes": "1398354"
},
{
"name": "JavaScript",
"bytes": "2295843"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5426206"
},
{
"name": "SCSS",
"bytes": "496904"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23435"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SliderImage'
db.create_table('cmsplugin_nivoslider_sliderimage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=150, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('order', self.gf('django.db.models.fields.PositiveIntegerField')(default=100)),
))
db.send_create_signal('cmsplugin_nivoslider', ['SliderImage'])
# Adding model 'SliderAlbum'
db.create_table('cmsplugin_nivoslider_slideralbum', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=150)),
))
db.send_create_signal('cmsplugin_nivoslider', ['SliderAlbum'])
# Adding M2M table for field images on 'SliderAlbum'
db.create_table('cmsplugin_nivoslider_slideralbum_images', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('slideralbum', models.ForeignKey(orm['cmsplugin_nivoslider.slideralbum'], null=False)),
('sliderimage', models.ForeignKey(orm['cmsplugin_nivoslider.sliderimage'], null=False))
))
db.create_unique('cmsplugin_nivoslider_slideralbum_images', ['slideralbum_id', 'sliderimage_id'])
# Adding model 'SliderPlugin'
db.create_table('cmsplugin_sliderplugin', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('album', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cmsplugin_nivoslider.SliderAlbum'])),
))
db.send_create_signal('cmsplugin_nivoslider', ['SliderPlugin'])
def backwards(self, orm):
# Deleting model 'SliderImage'
db.delete_table('cmsplugin_nivoslider_sliderimage')
# Deleting model 'SliderAlbum'
db.delete_table('cmsplugin_nivoslider_slideralbum')
# Removing M2M table for field images on 'SliderAlbum'
db.delete_table('cmsplugin_nivoslider_slideralbum_images')
# Deleting model 'SliderPlugin'
db.delete_table('cmsplugin_sliderplugin')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_nivoslider.slideralbum': {
'Meta': {'object_name': 'SliderAlbum'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmsplugin_nivoslider.SliderImage']", 'symmetrical': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'cmsplugin_nivoslider.sliderimage': {
'Meta': {'ordering': "('order', 'name')", 'object_name': 'SliderImage'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'})
},
'cmsplugin_nivoslider.sliderplugin': {
'Meta': {'object_name': 'SliderPlugin', 'db_table': "'cmsplugin_sliderplugin'", '_ormbases': ['cms.CMSPlugin']},
'album': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmsplugin_nivoslider.SliderAlbum']"}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['cmsplugin_nivoslider']
|
{
"content_hash": "172a2bfd543f043fed1d1cba80beafc2",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 170,
"avg_line_length": 59.76470588235294,
"alnum_prop": 0.598261154855643,
"repo_name": "samirasnoun/django_cms_gallery_image",
"id": "e8b993517f3b5b57d317992367ca852ee4a28f5b",
"size": "6114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_nivoslider/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "245718"
},
{
"name": "JavaScript",
"bytes": "1060264"
},
{
"name": "Makefile",
"bytes": "2973"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3309714"
},
{
"name": "Ruby",
"bytes": "1980"
},
{
"name": "XSLT",
"bytes": "10244"
}
],
"symlink_target": ""
}
|
import argparse
import codecs
import os
import re
import sys
from common import read_json_file
_NEWLINE_PATTERN = re.compile('[\n\r]')
def string_is_ascii(s):
try:
# This approach is better for compatibility
return all(ord(c) < 128 for c in s)
except TypeError:
return False
def load_constants(filename):
"""Read in constants file, which must be output in every language."""
constant_defs = read_json_file(filename);
constants_text = '\n'
for key in constant_defs:
value = constant_defs[key]
value = value.replace('"', '\\"')
constants_text += u'\nBlockly.Msg["{0}"] = \"{1}\";'.format(
key, value)
return constants_text
def main():
"""Generate .js files defining Blockly core and language messages."""
# Process command-line arguments.
parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
parser.add_argument('--source_lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--source_lang_file',
default=os.path.join('json', 'en.json'),
help='Path to .json file for source language')
parser.add_argument('--source_synonym_file',
default=os.path.join('json', 'synonyms.json'),
help='Path to .json file with synonym definitions')
parser.add_argument('--source_constants_file',
default=os.path.join('json', 'constants.json'),
help='Path to .json file with constant definitions')
parser.add_argument('--output_dir', default='js/',
help='relative directory for output files')
parser.add_argument('--key_file', default='keys.json',
help='relative path to input keys file')
parser.add_argument('--quiet', action='store_true', default=False,
help='do not write anything to standard output')
parser.add_argument('files', nargs='+', help='input files')
args = parser.parse_args()
if not args.output_dir.endswith(os.path.sep):
args.output_dir += os.path.sep
# Read in source language .json file, which provides any values missing
# in target languages' .json files.
source_defs = read_json_file(os.path.join(os.curdir, args.source_lang_file))
# Make sure the source file doesn't contain a newline or carriage return.
for key, value in source_defs.items():
if _NEWLINE_PATTERN.search(value):
print('ERROR: definition of {0} in {1} contained a newline character.'.
format(key, args.source_lang_file))
sys.exit(1)
sorted_keys = sorted(source_defs.keys())
# Read in synonyms file, which must be output in every language.
synonym_defs = read_json_file(os.path.join(
os.curdir, args.source_synonym_file))
# synonym_defs is also being sorted to ensure the same order is kept
synonym_text = '\n'.join([u'Blockly.Msg["{0}"] = Blockly.Msg["{1}"];'
.format(key, synonym_defs[key]) for key in sorted(synonym_defs)])
# Read in constants file, which must be output in every language.
constants_text = load_constants(os.path.join(os.curdir, args.source_constants_file))
# Create each output file.
for arg_file in args.files:
(_, filename) = os.path.split(arg_file)
target_lang = filename[:filename.index('.')]
if target_lang not in ('qqq', 'keys', 'synonyms', 'constants'):
target_defs = read_json_file(os.path.join(os.curdir, arg_file))
# Verify that keys are 'ascii'
bad_keys = [key for key in target_defs if not string_is_ascii(key)]
if bad_keys:
print(u'These keys in {0} contain non ascii characters: {1}'.format(
filename, ', '.join(bad_keys)))
# If there's a '\n' or '\r', remove it and print a warning.
for key, value in target_defs.items():
if _NEWLINE_PATTERN.search(value):
print(u'WARNING: definition of {0} in {1} contained '
'a newline character.'.
format(key, arg_file))
target_defs[key] = _NEWLINE_PATTERN.sub(' ', value)
# Output file.
outname = os.path.join(os.curdir, args.output_dir, target_lang + '.js')
with codecs.open(outname, 'w', 'utf-8') as outfile:
outfile.write(
"""// This file was automatically generated. Do not modify.
'use strict';
goog.provide('Blockly.Msg.{0}');
goog.require('Blockly.Msg');
""".format(target_lang.replace('-', '.')))
# For each key in the source language file, output the target value
# if present; otherwise, output the source language value with a
# warning comment.
for key in sorted_keys:
if key in target_defs:
value = target_defs[key]
comment = ''
del target_defs[key]
else:
value = source_defs[key]
comment = ' // untranslated'
value = value.replace('"', '\\"')
outfile.write(u'Blockly.Msg["{0}"] = "{1}";{2}\n'
.format(key, value, comment))
# Announce any keys defined only for target language.
if target_defs:
extra_keys = [key for key in target_defs if key not in synonym_defs]
synonym_keys = [key for key in target_defs if key in synonym_defs]
if not args.quiet:
if extra_keys:
print(u'These extra keys appeared in {0}: {1}'.format(
filename, ', '.join(extra_keys)))
if synonym_keys:
print(u'These synonym keys appeared in {0}: {1}'.format(
filename, ', '.join(synonym_keys)))
outfile.write(synonym_text)
outfile.write(constants_text)
if not args.quiet:
print('Created {0}.'.format(outname))
if __name__ == '__main__':
main()
|
{
"content_hash": "16aa888dbc177be1ecec9afd10bace75",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 86,
"avg_line_length": 39.054054054054056,
"alnum_prop": 0.6110726643598616,
"repo_name": "twodee/blockly",
"id": "8a93fbbc71368a0532394ea7abc9400479942b44",
"size": "6482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "i18n/create_messages.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1629"
},
{
"name": "Dart",
"bytes": "64881"
},
{
"name": "HTML",
"bytes": "89833"
},
{
"name": "JavaScript",
"bytes": "5568614"
},
{
"name": "Lua",
"bytes": "60773"
},
{
"name": "PHP",
"bytes": "82776"
},
{
"name": "Python",
"bytes": "139946"
},
{
"name": "Shell",
"bytes": "13555"
}
],
"symlink_target": ""
}
|
import os
import sys
import transaction
from sqlalchemy import engine_from_config
from pyramid.paster import get_appsettings, setup_logging
from models import DBSession, Base
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) != 2:
usage(argv)
config_uri = argv[1]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
|
{
"content_hash": "314e5807fe1ef10265a3b6f68bba04f5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 57,
"avg_line_length": 23.814814814814813,
"alnum_prop": 0.6796267496111975,
"repo_name": "stxnext/intranet-open",
"id": "fdb5b8f2b10fa6a0d0cc5746db82fe08a109db2b",
"size": "643",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/intranet3/intranet3/initializedb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "209248"
},
{
"name": "JavaScript",
"bytes": "67808"
},
{
"name": "Python",
"bytes": "535298"
},
{
"name": "SQL",
"bytes": "5168"
}
],
"symlink_target": ""
}
|
import sys
import time
from django.db import connection, transaction
from django.conf import settings
from django_comments.models import Comment
from django.core.management.base import BaseCommand, CommandError
from hcomments.models import HComment
class Command(BaseCommand):
help = "Import the standard comments into hcomments"
@transaction.commit_on_success
def handle(self, *args, **options):
"""
Converts all legacy Comment objects into HComment objects.
"""
sql = """
INSERT INTO hcomments_hcomment(comment_ptr_id, parent_id, lft, rght, tree_id, level)
VALUES (%s, NULL, 1, 2, %s, 0)
"""
hcomments = dict(( (c.id, c) for c in HComment.objects.all() ))
comments = dict(( (c.id, c) for c in Comment.objects.all() if c.id not in hcomments ))
cursor = connection.cursor()
print len(comments), 'comments found'
for ix, comment in enumerate(comments.values()):
print comment.user_name + ': ', repr(comment.comment[:50])
params = (comment.id, ix+1)
cursor.execute(sql, params)
transaction.set_dirty()
|
{
"content_hash": "fc95cd5627770c2bd8c72fe40e7ac0e5",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 94,
"avg_line_length": 34.205882352941174,
"alnum_prop": 0.6423043852106621,
"repo_name": "matrixise/hcomments",
"id": "033b6b1fb19b4d17949c104ee1f5fff6a34b43c8",
"size": "1163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hcomments/management/commands/import_standard_comments.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "2074"
},
{
"name": "JavaScript",
"bytes": "29461"
},
{
"name": "Python",
"bytes": "25372"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.binaries.binary_util import BinaryUtil
from pants.task.console_task import ConsoleTask
from pants.util.contextutil import temporary_dir
from pants.util.process_handler import subprocess
class CountLinesOfCode(ConsoleTask):
"""Print counts of lines of code."""
@classmethod
def subsystem_dependencies(cls):
return super(CountLinesOfCode, cls).subsystem_dependencies() + (BinaryUtil.Factory,)
@classmethod
def register_options(cls, register):
super(CountLinesOfCode, cls).register_options(register)
register('--version', advanced=True, fingerprint=True, default='1.66', help='Version of cloc.')
register('--transitive', type=bool, fingerprint=True, default=True,
help='Operate on the transitive dependencies of the specified targets. '
'Unset to operate only on the specified targets.')
register('--ignored', type=bool, fingerprint=True,
help='Show information about files ignored by cloc.')
def _get_cloc_script(self):
binary_util = BinaryUtil.Factory.create()
return binary_util.select_script('scripts/cloc', self.get_options().version, 'cloc')
def console_output(self, targets):
if not self.get_options().transitive:
targets = self.context.target_roots
buildroot = get_buildroot()
with temporary_dir() as tmpdir:
# Write the paths of all files we want cloc to process to the so-called 'list file'.
list_file = os.path.join(tmpdir, 'list_file')
with open(list_file, 'w') as list_file_out:
for target in targets:
for source in target.sources_relative_to_buildroot():
list_file_out.write(os.path.join(buildroot, source))
list_file_out.write(b'\n')
report_file = os.path.join(tmpdir, 'report_file')
ignored_file = os.path.join(tmpdir, 'ignored')
cloc_script = self._get_cloc_script()
# See http://cloc.sourceforge.net/#options for cloc cmd-line options.
cmd = [cloc_script,
'--skip-uniqueness',
'--ignored={}'.format(ignored_file),
'--list-file={}'.format(list_file),
'--report-file={}'.format(report_file)]
with self.context.new_workunit(name='cloc',
labels=[WorkUnitLabel.TOOL],
cmd=' '.join(cmd)) as workunit:
result = subprocess.call(cmd,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'))
if result != 0:
raise TaskError('{} ... exited non-zero ({}).'.format(' '.join(cmd), result))
with open(report_file, 'r') as report_file_in:
for line in report_file_in.read().split('\n'):
yield line
if self.get_options().ignored:
yield 'Ignored the following files:'
with open(ignored_file, 'r') as ignored_file_in:
for line in ignored_file_in.read().split('\n'):
yield line
|
{
"content_hash": "a1169c0b7dd0a6bf7f161759fe60ab68",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 99,
"avg_line_length": 42.81818181818182,
"alnum_prop": 0.6369426751592356,
"repo_name": "15Dkatz/pants",
"id": "911e66360b87f563df49f24252a3ca29643df989",
"size": "3444",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/graph_info/tasks/cloc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1805"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "481460"
},
{
"name": "JavaScript",
"bytes": "35417"
},
{
"name": "Python",
"bytes": "5884798"
},
{
"name": "Rust",
"bytes": "212512"
},
{
"name": "Scala",
"bytes": "76124"
},
{
"name": "Shell",
"bytes": "67399"
},
{
"name": "Thrift",
"bytes": "2795"
}
],
"symlink_target": ""
}
|
from frysauce import app
from flask import render_template, request, flash
@app.route("/users/")
def home():
template_name = "home.html"
return render_template(template_name, users=users)
@app.route("/users/create", methods=['GET', 'POST'])
def create_user_form():
"""
Displays a form for creating a user
"""
template_name = "create_user.html"
users = []
print request.form
flash(request.form['username'])
flash(request.form['email'])
return render_template(template_name, users=users)
@app.route("/users/<int:id>")
def user_detail():
return
@app.route("/api/users/<int:id>")
def user_api_list():
return
@app.route("/api/users/<int:id>")
def user_api_detail():
return
|
{
"content_hash": "4c888eeef7169b38014996d9542be4a2",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 54,
"avg_line_length": 21.529411764705884,
"alnum_prop": 0.6530054644808743,
"repo_name": "chriskennard/lucky_flask",
"id": "83f50b5db224186b86a8e8d1410549fbaf0d9345",
"size": "757",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "apps/03_frysauce/frysauce/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4094"
},
{
"name": "Python",
"bytes": "2760"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
class Solution(object):
# Op1: Sliding window O(n)
# coped from q159
def lengthOfLongestSubstringKDistinct(self, s, k):
start = end = 0
maxLen = 0
counter = 0
dic = defaultdict(int)
for end in range(len(s)):
if dic[s[end]] == 0:
counter += 1
dic[s[end]] += 1
while counter > k:
dic[s[start]] -= 1
if dic[s[start]] == 0:
counter -= 1
start += 1
maxLen = max(maxLen, end + 1 - start)
return maxLen
s = 'eceba'
k = 2
test = Solution()
print(test.lengthOfLongestSubstringKDistinct(s, k))
|
{
"content_hash": "b0d437c3840f40b91c5eda1cb89acfd8",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 54,
"avg_line_length": 24.724137931034484,
"alnum_prop": 0.497907949790795,
"repo_name": "rx2130/Leetcode",
"id": "58d4abbe96a3b10ad5aa80e06396794042762dee",
"size": "717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/340 Longest Substring with At Most K Distinct Characters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "277012"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Hosts'
db.create_table(u'main_hosts', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('fqdn', self.gf('django.db.models.fields.CharField')(max_length=256)),
('update_secret', self.gf('django.db.models.fields.CharField')(max_length=256)),
('last_update', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal(u'main', ['Hosts'])
def backwards(self, orm):
# Deleting model 'Hosts'
db.delete_table(u'main_hosts')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'main.hosts': {
'Meta': {'object_name': 'Hosts'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'fqdn': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'update_secret': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['main']
|
{
"content_hash": "7fd24209c6b4c263d1a89576d2b15496",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 187,
"avg_line_length": 64.36,
"alnum_prop": 0.5618396519577378,
"repo_name": "asmaps/nsupdate.info",
"id": "8e24b83fca166ef2d36861cc3bee61f6e375fcce",
"size": "4851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nsupdate/main/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "428"
},
{
"name": "Python",
"bytes": "126476"
},
{
"name": "Shell",
"bytes": "5108"
}
],
"symlink_target": ""
}
|
"""some functions that may be useful for various checkers
"""
import re
import sys
import string
import astroid
from astroid import scoped_nodes
from logilab.common.compat import builtins
BUILTINS_NAME = builtins.__name__
COMP_NODE_TYPES = astroid.ListComp, astroid.SetComp, astroid.DictComp, astroid.GenExpr
PY3K = sys.version_info[0] == 3
if not PY3K:
EXCEPTIONS_MODULE = "exceptions"
else:
EXCEPTIONS_MODULE = "builtins"
class NoSuchArgumentError(Exception):
pass
def is_inside_except(node):
"""Returns true if node is inside the name of an except handler."""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
return current and current is current.parent.name
def get_all_elements(node):
"""Recursively returns all atoms in nested lists and tuples."""
if isinstance(node, (astroid.Tuple, astroid.List)):
for child in node.elts:
for e in get_all_elements(child):
yield e
else:
yield node
def clobber_in_except(node):
"""Checks if an assignment node in an except handler clobbers an existing
variable.
Returns (True, args for W0623) if assignment clobbers an existing variable,
(False, None) otherwise.
"""
if isinstance(node, astroid.AssAttr):
return (True, (node.attrname, 'object %r' % (node.expr.as_string(),)))
elif isinstance(node, astroid.AssName):
name = node.name
if is_builtin(name):
return (True, (name, 'builtins'))
else:
stmts = node.lookup(name)[1]
if (stmts and not isinstance(stmts[0].ass_type(),
(astroid.Assign, astroid.AugAssign,
astroid.ExceptHandler))):
return (True, (name, 'outer scope (line %s)' % stmts[0].fromlineno))
return (False, None)
def safe_infer(node):
"""return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred)
"""
try:
inferit = node.infer()
value = next(inferit)
except astroid.InferenceError:
return
try:
next(inferit)
return # None if there is ambiguity on the inferred node
except astroid.InferenceError:
return # there is some kind of ambiguity
except StopIteration:
return value
def is_super(node):
"""return True if the node is referencing the "super" builtin function
"""
if getattr(node, 'name', None) == 'super' and \
node.root().name == BUILTINS_NAME:
return True
return False
def is_error(node):
"""return true if the function does nothing but raising an exception"""
for child_node in node.get_children():
if isinstance(child_node, astroid.Raise):
return True
return False
def is_raising(body):
"""return true if the given statement node raise an exception"""
for node in body:
if isinstance(node, astroid.Raise):
return True
return False
def is_empty(body):
"""return true if the given node does nothing but 'pass'"""
return len(body) == 1 and isinstance(body[0], astroid.Pass)
builtins = builtins.__dict__.copy()
SPECIAL_BUILTINS = ('__builtins__',) # '__path__', '__file__')
def is_builtin_object(node):
"""Returns True if the given node is an object from the __builtin__ module."""
return node and node.root().name == BUILTINS_NAME
def is_builtin(name): # was is_native_builtin
"""return true if <name> could be considered as a builtin defined by python
"""
if name in builtins:
return True
if name in SPECIAL_BUILTINS:
return True
return False
def is_defined_before(var_node):
"""return True if the variable node is defined by a parent node (list,
set, dict, or generator comprehension, lambda) or in a previous sibling
node on the same line (statement_defining ; statement_using)
"""
varname = var_node.name
_node = var_node.parent
while _node:
if isinstance(_node, COMP_NODE_TYPES):
for ass_node in _node.nodes_of_class(astroid.AssName):
if ass_node.name == varname:
return True
elif isinstance(_node, astroid.For):
for ass_node in _node.target.nodes_of_class(astroid.AssName):
if ass_node.name == varname:
return True
elif isinstance(_node, astroid.With):
for expr, ids in _node.items:
if expr.parent_of(var_node):
break
if (ids and
isinstance(ids, astroid.AssName) and
ids.name == varname):
return True
elif isinstance(_node, (astroid.Lambda, astroid.Function)):
if _node.args.is_argument(varname):
return True
if getattr(_node, 'name', None) == varname:
return True
break
elif isinstance(_node, astroid.ExceptHandler):
if isinstance(_node.name, astroid.AssName):
ass_node = _node.name
if ass_node.name == varname:
return True
_node = _node.parent
# possibly multiple statements on the same line using semi colon separator
stmt = var_node.statement()
_node = stmt.previous_sibling()
lineno = stmt.fromlineno
while _node and _node.fromlineno == lineno:
for ass_node in _node.nodes_of_class(astroid.AssName):
if ass_node.name == varname:
return True
for imp_node in _node.nodes_of_class((astroid.From, astroid.Import)):
if varname in [name[1] or name[0] for name in imp_node.names]:
return True
_node = _node.previous_sibling()
return False
def is_func_default(node):
"""return true if the given Name node is used in function default argument's
value
"""
parent = node.scope()
if isinstance(parent, astroid.Function):
for default_node in parent.args.defaults:
for default_name_node in default_node.nodes_of_class(astroid.Name):
if default_name_node is node:
return True
return False
def is_func_decorator(node):
"""return true if the name is used in function decorator"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Decorators):
return True
if (parent.is_statement or
isinstance(parent, astroid.Lambda) or
isinstance(parent, (scoped_nodes.ComprehensionScope,
scoped_nodes.ListComp))):
break
parent = parent.parent
return False
def is_ancestor_name(frame, node):
"""return True if `frame` is a astroid.Class node with `node` in the
subtree of its bases attribute
"""
try:
bases = frame.bases
except AttributeError:
return False
for base in bases:
if node in base.nodes_of_class(astroid.Name):
return True
return False
def assign_parent(node):
"""return the higher parent which is not an AssName, Tuple or List node
"""
while node and isinstance(node, (astroid.AssName,
astroid.Tuple,
astroid.List)):
node = node.parent
return node
def overrides_an_abstract_method(class_node, name):
"""return True if pnode is a parent of node"""
for ancestor in class_node.ancestors():
if name in ancestor and isinstance(ancestor[name], astroid.Function) and \
ancestor[name].is_abstract(pass_is_abstract=False):
return True
return False
def overrides_a_method(class_node, name):
"""return True if <name> is a method overridden from an ancestor"""
for ancestor in class_node.ancestors():
if name in ancestor and isinstance(ancestor[name], astroid.Function):
return True
return False
PYMETHODS = set(('__new__', '__init__', '__del__', '__hash__',
'__str__', '__repr__',
'__len__', '__iter__',
'__delete__', '__get__', '__set__',
'__getitem__', '__setitem__', '__delitem__', '__contains__',
'__getattribute__', '__getattr__', '__setattr__', '__delattr__',
'__call__',
'__enter__', '__exit__',
'__cmp__', '__ge__', '__gt__', '__le__', '__lt__', '__eq__',
'__nonzero__', '__neg__', '__invert__',
'__mul__', '__imul__', '__rmul__',
'__div__', '__idiv__', '__rdiv__',
'__add__', '__iadd__', '__radd__',
'__sub__', '__isub__', '__rsub__',
'__pow__', '__ipow__', '__rpow__',
'__mod__', '__imod__', '__rmod__',
'__and__', '__iand__', '__rand__',
'__or__', '__ior__', '__ror__',
'__xor__', '__ixor__', '__rxor__',
# XXX To be continued
))
def check_messages(*messages):
"""decorator to store messages that are handled by a checker method"""
def store_messages(func):
func.checks_msgs = messages
return func
return store_messages
class IncompleteFormatString(Exception):
"""A format string ended in the middle of a format specifier."""
pass
class UnsupportedFormatCharacter(Exception):
"""A format character in a format string is not one of the supported
format characters."""
def __init__(self, index):
Exception.__init__(self, index)
self.index = index
def parse_format_string(format_string):
"""Parses a format string, returning a tuple of (keys, num_args), where keys
is the set of mapping keys in the format string, and num_args is the number
of arguments required by the format string. Raises
IncompleteFormatString or UnsupportedFormatCharacter if a
parse error occurs."""
keys = set()
num_args = 0
def next_char(i):
i += 1
if i == len(format_string):
raise IncompleteFormatString
return (i, format_string[i])
i = 0
while i < len(format_string):
char = format_string[i]
if char == '%':
i, char = next_char(i)
# Parse the mapping key (optional).
key = None
if char == '(':
depth = 1
i, char = next_char(i)
key_start = i
while depth != 0:
if char == '(':
depth += 1
elif char == ')':
depth -= 1
i, char = next_char(i)
key_end = i - 1
key = format_string[key_start:key_end]
# Parse the conversion flags (optional).
while char in '#0- +':
i, char = next_char(i)
# Parse the minimum field width (optional).
if char == '*':
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the precision (optional).
if char == '.':
i, char = next_char(i)
if char == '*':
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the length modifier (optional).
if char in 'hlL':
i, char = next_char(i)
# Parse the conversion type (mandatory).
if PY3K:
flags = 'diouxXeEfFgGcrs%a'
else:
flags = 'diouxXeEfFgGcrs%'
if char not in flags:
raise UnsupportedFormatCharacter(i)
if key:
keys.add(key)
elif char != '%':
num_args += 1
i += 1
return keys, num_args
def is_attr_protected(attrname):
"""return True if attribute name is protected (start with _ and some other
details), False otherwise.
"""
return attrname[0] == '_' and not attrname == '_' and not (
attrname.startswith('__') and attrname.endswith('__'))
def node_frame_class(node):
"""return klass node for a method node (or a staticmethod or a
classmethod), return null otherwise
"""
klass = node.frame()
while klass is not None and not isinstance(klass, astroid.Class):
if klass.parent is None:
klass = None
else:
klass = klass.parent.frame()
return klass
def is_super_call(expr):
"""return True if expression node is a function call and if function name
is super. Check before that you're in a method.
"""
return (isinstance(expr, astroid.CallFunc) and
isinstance(expr.func, astroid.Name) and
expr.func.name == 'super')
def is_attr_private(attrname):
"""Check that attribute name is private (at least two leading underscores,
at most one trailing underscore)
"""
regex = re.compile('^_{2,}.*[^_]+_?$')
return regex.match(attrname)
def get_argument_from_call(callfunc_node, position=None, keyword=None):
"""Returns the specified argument from a function call.
:param callfunc_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError('Must specify at least one of: position or keyword.')
try:
if position is not None and not isinstance(callfunc_node.args[position], astroid.Keyword):
return callfunc_node.args[position]
except IndexError as error:
raise NoSuchArgumentError(error)
if keyword:
for arg in callfunc_node.args:
if isinstance(arg, astroid.Keyword) and arg.arg == keyword:
return arg.value
raise NoSuchArgumentError
def inherit_from_std_ex(node):
"""
Return true if the given class node is subclass of
exceptions.Exception.
"""
if node.name in ('Exception', 'BaseException') \
and node.root().name == EXCEPTIONS_MODULE:
return True
return any(inherit_from_std_ex(parent)
for parent in node.ancestors(recurs=False))
def is_import_error(handler):
"""
Check if the given exception handler catches
ImportError.
:param handler: A node, representing an ExceptHandler node.
:returns: True if the handler catches ImportError, False otherwise.
"""
names = None
if isinstance(handler.type, astroid.Tuple):
names = [name for name in handler.type.elts
if isinstance(name, astroid.Name)]
elif isinstance(handler.type, astroid.Name):
names = [handler.type]
else:
# Don't try to infer that.
return
for name in names:
try:
for infered in name.infer():
if (isinstance(infered, astroid.Class) and
inherit_from_std_ex(infered) and
infered.name == 'ImportError'):
return True
except astroid.InferenceError:
continue
def has_known_bases(klass):
"""Returns true if all base classes of a class could be inferred."""
try:
return klass._all_bases_known
except AttributeError:
pass
for base in klass.bases:
result = safe_infer(base)
# TODO: check for A->B->A->B pattern in class structure too?
if (not isinstance(result, astroid.Class) or
result is klass or
not has_known_bases(result)):
klass._all_bases_known = False
return False
klass._all_bases_known = True
return True
def decorated_with_property(node):
""" Detect if the given function node is decorated with a property. """
if not node.decorators:
return False
for decorator in node.decorators.nodes:
if not isinstance(decorator, astroid.Name):
continue
try:
for infered in decorator.infer():
if isinstance(infered, astroid.Class):
if (infered.root().name == BUILTINS_NAME and
infered.name == 'property'):
return True
for ancestor in infered.ancestors():
if (ancestor.name == 'property' and
ancestor.root().name == BUILTINS_NAME):
return True
except astroid.InferenceError:
pass
|
{
"content_hash": "6051fe0bc0dd7bf67d7de59e48d2e6a3",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 98,
"avg_line_length": 35.62603305785124,
"alnum_prop": 0.5675346517427362,
"repo_name": "Titulacion-Sistemas/PythonTitulacion-EV",
"id": "f3a7d17625b4b69480be26f9336da7fd34ba5b27",
"size": "18083",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Lib/site-packages/pylint/checkers/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "2117"
},
{
"name": "C",
"bytes": "469338"
},
{
"name": "C++",
"bytes": "93276"
},
{
"name": "CSS",
"bytes": "173812"
},
{
"name": "JavaScript",
"bytes": "203291"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "17198855"
},
{
"name": "Shell",
"bytes": "2237"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "Visual Basic",
"bytes": "904"
},
{
"name": "XSLT",
"bytes": "154751"
}
],
"symlink_target": ""
}
|
import logging
import os
import sys
logging.basicConfig(level=logging.ERROR)
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
sys.path.insert(0, top_dir)
import taskflow.engines
from taskflow.patterns import graph_flow as gf
from taskflow.patterns import linear_flow as lf
from taskflow import task
# In this example there are complex dependencies between tasks that are used to
# perform a simple set of linear equations.
#
# As you will see below the tasks just define what they require as input
# and produce as output (named values). Then the user doesn't care about
# ordering the TASKS (in this case the tasks calculate pieces of the overall
# equation).
#
# As you will notice graph_flow resolves dependencies automatically using the
# tasks requirements and provided values and no ordering dependency has to be
# manually created.
#
# Also notice that flows of any types can be nested into a graph_flow; subflows
# dependencies will be resolved too!! Pretty cool right!
class Adder(task.Task):
def execute(self, x, y):
return x + y
flow = gf.Flow('root').add(
lf.Flow('nested_linear').add(
# x2 = y3+y4 = 12
Adder("add2", provides='x2', rebind=['y3', 'y4']),
# x1 = y1+y2 = 4
Adder("add1", provides='x1', rebind=['y1', 'y2'])
),
# x5 = x1+x3 = 20
Adder("add5", provides='x5', rebind=['x1', 'x3']),
# x3 = x1+x2 = 16
Adder("add3", provides='x3', rebind=['x1', 'x2']),
# x4 = x2+y5 = 21
Adder("add4", provides='x4', rebind=['x2', 'y5']),
# x6 = x5+x4 = 41
Adder("add6", provides='x6', rebind=['x5', 'x4']),
# x7 = x6+x6 = 82
Adder("add7", provides='x7', rebind=['x6', 'x6']))
# Provide the initial variable inputs using a storage dictionary.
store = {
"y1": 1,
"y2": 3,
"y3": 5,
"y4": 7,
"y5": 9,
}
result = taskflow.engines.run(
flow, engine_conf='serial', store=store)
print("Single threaded engine result %s" % result)
result = taskflow.engines.run(
flow, engine_conf='parallel', store=store)
print("Multi threaded engine result %s" % result)
|
{
"content_hash": "c8ccfacc3914e59c37ad43d559486f2d",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 29.4,
"alnum_prop": 0.6331065759637188,
"repo_name": "varunarya10/taskflow",
"id": "fd96d24a8e24db7ab79a2d9bf2ee851242a92603",
"size": "2867",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "taskflow/examples/graph_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "734087"
},
{
"name": "Shell",
"bytes": "1988"
}
],
"symlink_target": ""
}
|
from flanker.mime.message.utils import to_unicode
def convert_to_unicode(charset, value):
#in case of unicode we have nothing to do
if isinstance(value, unicode):
return value
charset = _translate_charset(charset)
return to_unicode(value, charset=charset)
def _translate_charset(charset):
"""Translates crappy charset into Python analogue (if supported).
Otherwise returns unmodified.
"""
# ev: (ticket #2819)
if "sjis" in charset.lower():
return 'shift_jis'
# cp874 looks to be an alias for windows-874
if "windows-874" == charset.lower():
return "cp874"
if 'koi8-r' in charset.lower():
return 'koi8_r'
if 'utf-8' in charset.lower() or charset.lower() == 'x-unknown':
return 'utf-8'
return charset
|
{
"content_hash": "2db2879c1f19f0b5fd5ba925f8deb337",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 69,
"avg_line_length": 25.15625,
"alnum_prop": 0.6459627329192547,
"repo_name": "duo-labs/isthislegit",
"id": "7ecded7ae0c841826fb4f53e3790a91ced41cbe7",
"size": "805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/lib/flanker/mime/message/charsets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "89903"
},
{
"name": "Dockerfile",
"bytes": "1356"
},
{
"name": "HTML",
"bytes": "39168"
},
{
"name": "JavaScript",
"bytes": "47642"
},
{
"name": "Python",
"bytes": "315671"
}
],
"symlink_target": ""
}
|
from f5_heat.resources import f5_sys_save
from heat.common import exception
from heat.common import template_format
from heat.engine.hot.template import HOTemplate20150430
from heat.engine import rsrc_defn
from heat.engine import template
import mock
import pytest
save_defn = '''
heat_template_version: 2015-04-30
description: Testing iAppService plugin
resources:
bigip_rsrc:
type: F5::BigIP::Device
properties:
ip: 10.0.0.1
username: admin
password: admin
save_rsrc:
type: F5::Sys::Save
properties:
bigip_server: bigip_rsrc
'''
versions = ('2015-04-30', '2015-04-30')
@mock.patch.object(template, 'get_version', return_value=versions)
@mock.patch.object(
template,
'get_template_class',
return_value=HOTemplate20150430
)
def mock_template(templ_vers, templ_class, test_templ=save_defn):
'''Mock a Heat template for the Kilo version.'''
templ_dict = template_format.parse(test_templ)
return templ_dict
def create_resource_definition(templ_dict):
'''Create resource definition.'''
rsrc_def = rsrc_defn.ResourceDefinition(
'test_stack',
templ_dict['resources']['save_rsrc']['type'],
properties=templ_dict['resources']['save_rsrc']['properties']
)
return rsrc_def
@pytest.fixture
def F5SysSave():
'''Instantiate the F5SysSave resource.'''
template_dict = mock_template()
rsrc_def = create_resource_definition(template_dict)
return f5_sys_save.F5SysSave(
"testing_save", rsrc_def, mock.MagicMock()
)
@pytest.fixture
def CreateSaveSideEffect(F5SysSave):
F5SysSave.get_bigip()
F5SysSave.bigip.tm.sys.config.exec_cmd.side_effect = Exception()
return F5SysSave
# Tests
def test_handle_create(F5SysSave):
create_result = F5SysSave.handle_create()
assert create_result is None
assert F5SysSave.bigip.tm.sys.config.exec_cmd.call_args == \
mock.call('save')
def test_handle_create_error(CreateSaveSideEffect):
'''Currently, test exists to satisfy 100% code coverage.'''
with pytest.raises(exception.ResourceFailure):
CreateSaveSideEffect.handle_create()
def test_handle_delete(F5SysSave):
delete_result = F5SysSave.handle_delete()
assert delete_result is True
def test_resource_mapping():
rsrc_map = f5_sys_save.resource_mapping()
assert rsrc_map == {
'F5::Sys::Save': f5_sys_save.F5SysSave
}
|
{
"content_hash": "2a5d6e46e9e6a99270a6e63270c4b547",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 69,
"avg_line_length": 25.442105263157895,
"alnum_prop": 0.6975589573851882,
"repo_name": "F5Networks/f5-openstack-heat-plugins",
"id": "25d01a0fb2b572d934ba2e3f03b6e032d37a8cfa",
"size": "2999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f5_heat/resources/test/test_f5_sys_save.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "644"
},
{
"name": "Python",
"bytes": "120753"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_noop as _
LOG_STATUS_UPDATE = _('Update node status')
LOG_NODE_CREATE = _('Create compute node')
LOG_NODE_UPDATE = _('Update compute node')
LOG_DEF_UPDATE = _('Update node definition')
LOG_DEF_DELETE = _('Delete node definition')
LOG_IMG_IMPORT = _('Import image on node storage')
LOG_IMG_DELETE = _('Remove image from node storage')
LOG_NS_CREATE = _('Create node storage')
LOG_NS_UPDATE = _('Update node storage')
LOG_NS_DELETE = _('Delete node storage')
LOG_VM_HARVEST = _('Harvest servers')
LOG_NS_SNAPS_SYNC = _('Synchronize server snapshots on node storage')
|
{
"content_hash": "3d643fcd010a9d7eefc33126e3df54e3",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 69,
"avg_line_length": 29.476190476190474,
"alnum_prop": 0.7075928917609047,
"repo_name": "erigones/esdc-ce",
"id": "84b2facf879f62b3e05929d4927604acacb13707",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/node/messages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2728"
},
{
"name": "C",
"bytes": "8581"
},
{
"name": "CSS",
"bytes": "146461"
},
{
"name": "DTrace",
"bytes": "2250"
},
{
"name": "Erlang",
"bytes": "18842"
},
{
"name": "HTML",
"bytes": "473343"
},
{
"name": "JavaScript",
"bytes": "679240"
},
{
"name": "Jinja",
"bytes": "29584"
},
{
"name": "PLpgSQL",
"bytes": "17954"
},
{
"name": "Perl",
"bytes": "93955"
},
{
"name": "Python",
"bytes": "3124524"
},
{
"name": "Ruby",
"bytes": "56"
},
{
"name": "SCSS",
"bytes": "82814"
},
{
"name": "Shell",
"bytes": "281885"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from numba import types, utils
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def is_in_mandelbrot(c):
i = 0
z = 0.0j
for i in range(100):
z = z ** 2 + c
if (z.real * z.real + z.imag * z.imag) >= 4:
return False
return True
class TestMandelbrot(unittest.TestCase):
def test_mandelbrot(self):
pyfunc = is_in_mandelbrot
cr = compile_isolated(pyfunc, (types.complex64,))
cfunc = cr.entry_point
points = [0+0j, 1+0j, 0+1j, 1+1j, 0.1+0.1j]
for p in points:
self.assertEqual(cfunc(p), pyfunc(p))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "dcbb36f1f372b80a1aca888807f42f3d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 57,
"avg_line_length": 23.54054054054054,
"alnum_prop": 0.618828932261768,
"repo_name": "stefanseefeld/numba",
"id": "c32511aebdcf9808cddbd5e49be9b7e4223ce841",
"size": "871",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numba/tests/test_mandelbrot.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5535"
},
{
"name": "C",
"bytes": "303376"
},
{
"name": "C++",
"bytes": "17024"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "Jupyter Notebook",
"bytes": "110325"
},
{
"name": "Python",
"bytes": "3946372"
},
{
"name": "Shell",
"bytes": "2414"
}
],
"symlink_target": ""
}
|
from point_generator import SimplePointGenerator, ChessboardPointGenerator, HexagonPointGenerator
from edge_generator import ChessboardEdgeGenerator, CrossedChessboardEdgeGenerator, LimitedQuantityEdgeGenerator, LimitedRangeEdgeGenerator
from edge_cost_generator import DistanceEdgeCostGenerator, ThresholdDistanceFromCornerEdgeCostGenerator, TrueDistanceFromCornersEdgeCostGenerator, RandomCoefficientEdgeCostGenerator, SlantedBlockadeEdgeCostGenerator
from util import Corner
from world_generator import SimpleWorldGenerator
class AbstractWorldGeneratorFactory(object):
""" generates N-dimensional worlds """
@classmethod
def create_world_generator(cls, number_of_dimensions, *args, **kwargs):
raise NotImplementedError()
class DistanceCostWorldGeneratorFactory(AbstractWorldGeneratorFactory):
""" generates worlds with edge costs equal to distance between the points """
@classmethod
def _get_edge_cost_generator(cls):
return DistanceEdgeCostGenerator()
class CertainPointNumberWorldGeneratorFactory(AbstractWorldGeneratorFactory):
EDGE_GENERATOR_CLASS = None
@classmethod
def create_world_generator(cls, number_of_dimensions, number_of_points):
point_generator = cls._get_point_generator(number_of_dimensions, number_of_points)
edge_generator = cls.EDGE_GENERATOR_CLASS()
edge_cost_generator = cls._get_edge_cost_generator()
return SimpleWorldGenerator(number_of_dimensions, point_generator, edge_generator, edge_cost_generator, number_of_points)
class SimpleWorldGeneratorFactory(DistanceCostWorldGeneratorFactory, CertainPointNumberWorldGeneratorFactory):
EDGE_GENERATOR_CLASS = LimitedQuantityEdgeGenerator
@classmethod
def _get_point_generator(cls, number_of_dimensions, number_of_points):
return SimplePointGenerator(number_of_dimensions, 0, 100)
class SlightlyRandomizedWorldGeneratorFactory(SimpleWorldGeneratorFactory):
""" generates worlds with edge costs only slightly different than the distance between the points """
@classmethod
def _get_edge_cost_generator(cls):
return RandomCoefficientEdgeCostGenerator(0.8, 1.2)
class AbstractCustomWidthWorldGeneratorFactory(DistanceCostWorldGeneratorFactory):
EDGE_GENERATOR_CLASS = None
POINT_GENERATOR_CLASS = None
@classmethod
def _get_number_of_points(cls, number_of_dimensions, width):
return width ** number_of_dimensions
@classmethod
def create_world_generator(cls, number_of_dimensions, width):
point_generator = cls.POINT_GENERATOR_CLASS(number_of_dimensions, 0, width)
edge_generator = cls.EDGE_GENERATOR_CLASS()
edge_cost_generator = cls._get_edge_cost_generator()
return SimpleWorldGenerator(number_of_dimensions, point_generator, edge_generator, edge_cost_generator, cls._get_number_of_points(number_of_dimensions, width))
class ChessboardWorldGeneratorFactory(AbstractCustomWidthWorldGeneratorFactory):
""" generates n-dimensional raster-shaped worlds with equal edge costs equal to the distance between the points, like this:
*----*----*
| | |
| | |
| | |
| | |
*----*----*
| | |
| | |
| | |
| | |
*----*----*
"""
EDGE_GENERATOR_CLASS = ChessboardEdgeGenerator
POINT_GENERATOR_CLASS = ChessboardPointGenerator
class CrossedChessboardWorldGeneratorFactory(ChessboardWorldGeneratorFactory):
""" generates 2-dimensional raster-shaped worlds with equal edge costs equal to the distance between the points, like this:
*----*----*
|\ /|\ /|
| \/ | \/ |
| /\ | /\ |
|/ \|/ \|
*----*----*
|\ /|\ /|
| \/ | \/ |
| /\ | /\ |
|/ \|/ \|
*----*----*
"""
EDGE_GENERATOR_CLASS = CrossedChessboardEdgeGenerator
class UpperLeftCornerTrueDistanceCrossedChessboardWorldGeneratorFactory(CrossedChessboardWorldGeneratorFactory):
@classmethod
def _get_edge_cost_generator(cls):
return TrueDistanceFromCornersEdgeCostGenerator([Corner.UPPER_LEFT], exponent=2)
class UpperLeftCornerThresholdDistanceCrossedChessboardWorldGeneratorFactory(CrossedChessboardWorldGeneratorFactory):
@classmethod
def _get_edge_cost_generator(cls):
return ThresholdDistanceFromCornerEdgeCostGenerator([[Corner.UPPER_LEFT, Corner.LOWER_RIGHT]])
class UpperLeftCornerBlockadeCrossedChessboardWorldGeneratorFactory(CrossedChessboardWorldGeneratorFactory):
@classmethod
def _get_edge_cost_generator(cls):
return SlantedBlockadeEdgeCostGenerator([[Corner.UPPER_LEFT, Corner.LOWER_RIGHT]])
class AbstractHexagonWorldGeneratorFactory(AbstractCustomWidthWorldGeneratorFactory):
EDGE_GENERATOR_CLASS = LimitedRangeEdgeGenerator
POINT_GENERATOR_CLASS = HexagonPointGenerator
@classmethod
def _get_number_of_points(cls, number_of_dimensions, width):
return 9
class HexagonWorldGeneratorFactory(AbstractHexagonWorldGeneratorFactory):
@classmethod
def _get_edge_cost_generator(cls):
return RandomCoefficientEdgeCostGenerator(0.5, 2.0)
class UpperLeftCornerTrueDistanceHexagonWorldGeneratorFactory(AbstractHexagonWorldGeneratorFactory):
@classmethod
def _get_edge_cost_generator(cls):
return TrueDistanceFromCornersEdgeCostGenerator([Corner.UPPER_LEFT], exponent=2)
class UpperLeftCornerThresholdDistanceHexagonWorldGeneratorFactory(AbstractHexagonWorldGeneratorFactory):
@classmethod
def _get_edge_cost_generator(cls):
return ThresholdDistanceFromCornerEdgeCostGenerator([[Corner.UPPER_LEFT, Corner.LOWER_RIGHT]])
class UpperLeftCornerBlockadeHexagonWorldGeneratorFactory(AbstractHexagonWorldGeneratorFactory):
@classmethod
def _get_edge_cost_generator(cls):
return SlantedBlockadeEdgeCostGenerator([[Corner.UPPER_LEFT, Corner.LOWER_RIGHT]])
|
{
"content_hash": "84ef2fea8293fbbc7365a88c593a1074",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 215,
"avg_line_length": 46.960629921259844,
"alnum_prop": 0.738430583501006,
"repo_name": "ppolewicz/ant-colony",
"id": "103485381484dfa2cd73c55ff1fec8b1aa341baa",
"size": "5964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "antcolony/world_generator_factory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "102557"
},
{
"name": "Shell",
"bytes": "422"
},
{
"name": "XSLT",
"bytes": "15049"
}
],
"symlink_target": ""
}
|
import logging
from django.core.management.base import BaseCommand, CommandError
from django.utils import six
from oscar.core import customisation
class Command(BaseCommand):
help = (
"Create a local version of one of Oscar's app so it can "
"be customised")
def add_arguments(self, parser):
parser.add_argument('app_label', help='The application to fork')
parser.add_argument('target_path', help='The path to copy the files to')
def handle(self, *args, **options):
# Use a stdout logger
logger = logging.getLogger(__name__)
stream = logging.StreamHandler(self.stdout)
logger.addHandler(stream)
logger.setLevel(logging.DEBUG)
app_label, folder_path = options['app_label'], options['target_path']
try:
customisation.fork_app(app_label, folder_path, logger)
except Exception as e:
raise CommandError(six.text_type(e))
|
{
"content_hash": "b066c9a180b0b9501cbf695e792dfec8",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 80,
"avg_line_length": 32.93103448275862,
"alnum_prop": 0.6586387434554973,
"repo_name": "vicky2135/lucious",
"id": "93c9cd8255009d2e9170344830e3fe67700e0734",
"size": "955",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/oscar/management/commands/oscar_fork_app.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "896683"
},
{
"name": "C++",
"bytes": "52230"
},
{
"name": "CSS",
"bytes": "1169533"
},
{
"name": "HTML",
"bytes": "1104983"
},
{
"name": "JavaScript",
"bytes": "1055140"
},
{
"name": "Makefile",
"bytes": "145238"
},
{
"name": "Python",
"bytes": "55993261"
},
{
"name": "Shell",
"bytes": "40487"
}
],
"symlink_target": ""
}
|
import sys
import socket
import struct
EXTERNAL_NETID = 65535
__version__ = 'pna_parser_0.3.0-py'
# struct lengths with names
CHAR = 'c'
U_INT1 = 'B'
U_INT2 = 'H'
U_INT4 = 'I'
class PNALogParser(object):
"""This class parses a log file and returns the data contained within
that file."""
_header = {'v1a': (('start_time', U_INT4), ('end_time', U_INT4),
('nentries', U_INT4)),
'v2': (('magic0', CHAR), ('magic1', CHAR), ('magic2', CHAR),
('version', U_INT1), ('start_time', U_INT4),
('end_time', U_INT4), ('nentries', U_INT4))}
_entry = {'v1': (('local_ip', U_INT4), ('remote_ip', U_INT4),
('local_port', U_INT2), ('remote_port', U_INT2),
('packets_out', U_INT4), ('packets_in', U_INT4),
('octets_out', U_INT4), ('octets_in', U_INT4),
('begin_time', U_INT4),
('l4_protocol', U_INT1),
('first_direction', U_INT1),
('blank0', U_INT1), ('blank1', U_INT1)),
'v1a': (('local_ip', U_INT4), ('remote_ip', U_INT4),
('local_port', U_INT2), ('remote_port', U_INT2),
('local_netid', U_INT2), ('remote_netid', U_INT2),
('packets_out', U_INT4), ('packets_in', U_INT4),
('octets_out', U_INT4), ('octets_in', U_INT4),
('begin_time', U_INT4),
('l4_protocol', U_INT1),
('first_direction', U_INT1),
('blank0', U_INT1), ('blank1', U_INT1)),
'v2': (('local_ip', U_INT4), ('remote_ip', U_INT4),
('local_port', U_INT2), ('remote_port', U_INT2),
('local_netid', U_INT2), ('remote_netid', U_INT2),
('packets_out', U_INT4), ('packets_in', U_INT4),
('octets_out', U_INT4), ('octets_in', U_INT4),
('local_flags', U_INT2), ('remote_flags', U_INT2),
('begin_time', U_INT4), ('end_time', U_INT4),
('l4_protocol', U_INT1),
('first_direction', U_INT1),
('blank0', U_INT1), ('blank1', U_INT1))}
def __init__(self, filename):
# open up the file descriptor for reading
with open(filename, 'r') as f:
self.data = f.read()
self.data_len = len(self.data)
self.position = 0
# Figure out what log version this is
if self.data[0:3] != "PNA":
self.version = 'v1a'
else:
self.version = 'v%d' % ord(self.data[3])
# Read in all the version fields
self._set_header_type()
self._set_entry_type()
# Parse the header for useful info
self.header = self.parse_header()
self.entries_seen = 0
# peek at the first entry to determine v1 or v1a
if self.version == 'v1a':
entry = self._ent_struct.unpack_from(self.data, self.position)
entry = dict(zip(self._ent_names, entry))
# if this is actually v1, the blanks are pushed to local_ip
if entry['blank0'] != 0 or entry['blank1'] != 0:
self.version = 'v1'
self._set_entry_type()
def _set_header_type(self):
header = self._header[self.version]
self._hdr_names = map(lambda x: x[0], header)
header_format = map(lambda x: x[1], header)
self._hdr_struct = struct.Struct(''.join(header_format))
def _set_entry_type(self):
entry = self._entry[self.version]
self._ent_names = map(lambda x: x[0], entry)
entry_format = map(lambda x: x[1], entry)
self._ent_struct = struct.Struct(''.join(entry_format))
def parse_header(self):
"""Parse only the header data, don't parse the file."""
# read the header data first
data = self._hdr_struct.unpack_from(self.data, self.position)
self.position += self._hdr_struct.size
return dict(zip(self._hdr_names, data))
def parse_entry(self):
"""Parse a single entry from the file."""
# read an entry
entry = self._ent_struct.unpack_from(self.data, self.position)
self.position += self._ent_struct.size
# format the entry
entry = dict(zip(self._ent_names, entry))
if self.version in ('v1', 'v1a'):
entry['l3_protocol'] = socket.IPPROTO_IPIP
entry['local_flags'] = 0
entry['remote_flags'] = 0
entry['end_time'] = self.header['end_time']
if self.version == 'v1':
# version 1 does not have netids, so mimic what was expected
entry['local_netid'] = 1
entry['remote_netid'] = EXTERNAL_NETID
return entry
def parse(self):
"""Parse all entries, building a list."""
sessions = []
while self.position < self.data_len:
sessions.append(self.parse_entry())
return sessions
def parse_cb(self, callback):
"""Parse all entries using specified callback function."""
while self.position < self.data_len:
callback(self.parse_entry())
def parse_iter(self):
"""Parse all entries using generator pattern."""
while self.position < self.data_len:
yield self.parse_entry()
# simple command line version
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'version:', __version__
print 'usage: %s <list of files>' % sys.argv[0]
sys.exit(1)
sessions = []
for f in sys.argv[1:]:
parser = PNALogParser(f)
sessions.extend(parser.parse())
print sessions
|
{
"content_hash": "85c88bcc6ee47b73334d3a290f2e76cb",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 76,
"avg_line_length": 39.51020408163265,
"alnum_prop": 0.5084366391184573,
"repo_name": "pcrowley/PNA",
"id": "9faaf8b0e9d491a2edd4a3da9e346eb6fd531d8b",
"size": "6430",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "util/intop/parse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "73438"
},
{
"name": "Perl",
"bytes": "974"
},
{
"name": "Python",
"bytes": "65403"
},
{
"name": "Shell",
"bytes": "8281"
}
],
"symlink_target": ""
}
|
"""Module to interact with objects in a Java Virtual Machine from a
Python Virtual Machine.
Variables that might clash with the JVM start with an underscore
(Java Naming Convention do not recommend to start with an underscore
so clashes become unlikely).
Created on Dec 3, 2009
:author: Barthelemy Dagenais
"""
from __future__ import unicode_literals, absolute_import
from collections import deque
import logging
import os
from pydoc import pager
import select
import socket
from subprocess import Popen, PIPE
import sys
from threading import Thread, RLock
import weakref
from py4j.compat import (
range, hasattr2, basestring, CompatThread, Queue, WeakSet)
from py4j.finalizer import ThreadSafeFinalizer
from py4j import protocol as proto
from py4j.protocol import (
Py4JError, Py4JNetworkError, escape_new_line, get_command_part,
get_return_value, is_error, register_output_converter, smart_decode)
from py4j.version import __version__
class NullHandler(logging.Handler):
def emit(self, record):
pass
null_handler = NullHandler()
logging.getLogger("py4j").addHandler(null_handler)
logger = logging.getLogger("py4j.java_gateway")
BUFFER_SIZE = 4096
DEFAULT_ADDRESS = "127.0.0.1"
DEFAULT_PORT = 25333
DEFAULT_PYTHON_PROXY_PORT = 25334
DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT = 5
PY4J_SKIP_COLLECTIONS = "PY4J_SKIP_COLLECTIONS"
PY4J_TRUE = set(["yes", "y", "t", "true"])
def set_default_callback_accept_timeout(accept_timeout):
"""Sets default accept timeout of callback server.
TODO: Create a CallbackServer parameter for this value. Because it is only
used during testing, this is not a major issue for now.
"""
global DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT
DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT = accept_timeout
def deprecated(name, last_version, use_instead="", level=logging.DEBUG,
raise_exc=False):
if not use_instead:
msg = "{0} is deprecated and will be removed in version {1}"\
.format(name, last_version)
else:
msg = "{0} is deprecated and will be removed in version {1}. "\
"Use {2} instead."\
.format(name, last_version, use_instead)
logger.log(level, msg)
if raise_exc:
raise DeprecationWarning(msg)
def java_import(jvm_view, import_str):
"""Imports the package or class specified by `import_str` in the
jvm view namespace.
:param jvm_view: The jvm_view in which to import a class/package.
:import_str: The class (e.g., java.util.List) or the package
(e.g., java.io.*) to import
"""
gateway_client = jvm_view._gateway_client
command = proto.JVMVIEW_COMMAND_NAME + proto.JVM_IMPORT_SUB_COMMAND_NAME +\
jvm_view._id + "\n" + escape_new_line(import_str) + "\n" +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
return_value = get_return_value(answer, gateway_client, None, None)
return return_value
def find_jar_path():
"""Tries to find the path where the py4j jar is located.
"""
paths = []
jar_file = "py4j{0}.jar".format(__version__)
maven_jar_file = "py4j-{0}.jar".format(__version__)
paths.append(jar_file)
# ant
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../../../py4j-java/" + jar_file))
# maven
paths.append(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../../../py4j-java/target" + maven_jar_file))
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../share/py4j/" + jar_file))
paths.append("../../../current-release/" + jar_file)
paths.append(os.path.join(sys.prefix, "share/py4j/" + jar_file))
for path in paths:
if os.path.exists(path):
return path
return ""
def launch_gateway(port=0, jarpath="", classpath="", javaopts=[],
die_on_exit=False, redirect_stdout=None,
redirect_stderr=None, daemonize_redirect=True):
"""Launch a `Gateway` in a new Java process.
The redirect parameters accept file-like objects, Queue, or deque. When
text lines are sent to the stdout or stderr of the child JVM, these lines
are redirected to the file-like object (``write(line)``), the Queue
(``put(line)``), or the deque (``appendleft(line)``).
The text line will contain a newline character.
Only text output is accepted on stdout and stderr. If you wish to
communicate with the child JVM through bytes, you need to create your own
helper function.
:param port: the port to launch the Java Gateway on. If no port is
specified then an ephemeral port is used.
:param jarpath: the path to the Py4J jar. Only necessary if the jar
was installed at a non-standard location or if Python is using
a different `sys.prefix` than the one that Py4J was installed
under.
:param classpath: the classpath used to launch the Java Gateway.
:param javaopts: an array of extra options to pass to Java (the classpath
should be specified using the `classpath` parameter, not `javaopts`.)
:param die_on_exit: if `True`, the Java gateway process will die when
this Python process exits or is killed.
:param redirect_stdout: where to redirect the JVM stdout. If None (default)
stdout is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param redirect_stderr: where to redirect the JVM stdout. If None (default)
stderr is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time to
these objects.
:param daemonize_redirect: if True, the consumer threads will be daemonized
and will not prevent the main Python process from exiting. This means
the file descriptors (stderr, stdout, redirect_stderr, redirect_stdout)
might not be properly closed. This is not usually a problem, but in
case of errors related to file descriptors, set this flag to False.
:rtype: the port number of the `Gateway` server.
"""
if not jarpath:
jarpath = find_jar_path()
# Fail if the jar does not exist.
if not os.path.exists(jarpath):
raise Py4JError("Could not find py4j jar at {0}".format(jarpath))
# Launch the server in a subprocess.
classpath = os.pathsep.join((jarpath, classpath))
command = ["java", "-classpath", classpath] + javaopts + \
["py4j.GatewayServer"]
if die_on_exit:
command.append("--die-on-broken-pipe")
command.append(str(port))
logger.debug("Launching gateway with command {0}".format(command))
# stderr redirection
if redirect_stderr is None:
stderr = open(os.devnull, "w")
elif isinstance(redirect_stderr, Queue) or\
isinstance(redirect_stderr, deque):
stderr = PIPE
else:
stderr = redirect_stderr
# we don't need this anymore
redirect_stderr = None
# stdout redirection
if redirect_stdout is None:
redirect_stdout = open(os.devnull, "w")
proc = Popen(command, stdout=PIPE, stdin=PIPE, stderr=stderr)
# Determine which port the server started on (needed to support
# ephemeral ports)
_port = int(proc.stdout.readline())
# Start consumer threads so process does not deadlock/hangs
OutputConsumer(
redirect_stdout, proc.stdout, daemon=daemonize_redirect).start()
if redirect_stderr is not None:
OutputConsumer(
redirect_stderr, proc.stderr, daemon=daemonize_redirect).start()
ProcessConsumer(proc, [redirect_stdout], daemon=daemonize_redirect).start()
return _port
def get_field(java_object, field_name):
"""Retrieves the field named `field_name` from the `java_object`.
This function is useful when `auto_field=false` in a gateway or
Java object.
:param java_object: the instance containing the field
:param field_name: the name of the field to retrieve
"""
command = proto.FIELD_COMMAND_NAME + proto.FIELD_GET_SUBCOMMAND_NAME +\
java_object._target_id + "\n" + field_name + "\n" +\
proto.END_COMMAND_PART
answer = java_object._gateway_client.send_command(command)
if answer == proto.NO_MEMBER_COMMAND or is_error(answer)[0]:
raise Py4JError("no field {0} in object {1}".format(
field_name, java_object._target_id))
else:
return get_return_value(
answer, java_object._gateway_client, java_object._target_id,
field_name)
def set_field(java_object, field_name, value):
"""Sets the field named `field_name` of `java_object` to `value`.
This function is the only way to set a field because the assignment
operator in Python cannot be overloaded.
:param java_object: the instance containing the field
:param field_name: the name of the field to set
:param value: the value to assign to the field
"""
command_part = get_command_part(
value,
java_object._gateway_client.gateway_property.pool)
command = proto.FIELD_COMMAND_NAME + proto.FIELD_SET_SUBCOMMAND_NAME +\
java_object._target_id + "\n" + field_name + "\n" +\
command_part + "\n" + proto.END_COMMAND_PART
answer = java_object._gateway_client.send_command(command)
if answer == proto.NO_MEMBER_COMMAND or is_error(answer)[0]:
raise Py4JError("no field {0} in object {1}".format(
field_name, java_object._target_id))
return get_return_value(
answer, java_object._gateway_client, java_object._target_id,
field_name)
def get_method(java_object, method_name):
"""Retrieves a reference to the method of an object.
This function is useful when `auto_field=true` and an instance field has
the same name as a method. The full signature of the method is not
required: it is determined when the method is called.
:param java_object: the instance containing the method
:param method_name: the name of the method to retrieve
"""
return JavaMember(
method_name, java_object, java_object._target_id,
java_object._gateway_client)
def is_instance_of(gateway, java_object, java_class):
"""Indicates whether a java object is an instance of the provided
java_class.
:param gateway: the JavaGateway instance
:param java_object: the JavaObject instance
:param java_class: can be a string (fully qualified name), a JavaClass
instance, or a JavaObject instance)
"""
if isinstance(java_class, basestring):
param = java_class
elif isinstance(java_class, JavaClass):
param = java_class._fqn
elif isinstance(java_class, JavaObject):
param = java_class.getClass()
else:
raise Py4JError(
"java_class must be a string, a JavaClass, or a JavaObject")
return gateway.jvm.py4j.reflection.TypeUtil.isInstanceOf(
param, java_object)
def quiet_close(closable):
"""Quietly closes a closable object without throwing an exception.
:param closable: Object with a ``close`` method.
"""
if closable is None:
# Do not attempt to close a None. This logs unecessary exceptions.
return
try:
closable.close()
except Exception:
logger.debug("Exception while closing", exc_info=True)
def quiet_shutdown(socket_instance):
"""Quietly shuts down a socket without throwing an exception.
:param socket_instance: Socket with ``shutdown`` method.
"""
if socket_instance is None:
# Do not attempt to close a None. This logs unecessary exceptions.
return
try:
socket_instance.shutdown(socket.SHUT_RDWR)
except Exception:
logger.debug("Exception while shutting down a socket", exc_info=True)
def gateway_help(gateway_client, var, pattern=None, short_name=True,
display=True):
"""Displays a help page about a class or an object.
:param gateway_client: The gatway client
:param var: JavaObject, JavaClass or JavaMember for which a help page
will be generated.
:param pattern: Star-pattern used to filter the members. For example
"get*Foo" may return getMyFoo, getFoo, getFooBar, but not bargetFoo.
The pattern is matched against the entire signature. To match only
the name of a method, use "methodName(*".
:param short_name: If True, only the simple name of the parameter
types and return types will be displayed. If False, the fully
qualified name of the types will be displayed.
:param display: If True, the help page is displayed in an interactive
page similar to the `help` command in Python. If False, the page is
returned as a string.
"""
if hasattr2(var, "_get_object_id"):
command = proto.HELP_COMMAND_NAME +\
proto.HELP_OBJECT_SUBCOMMAND_NAME +\
var._get_object_id() + "\n" +\
get_command_part(pattern) +\
get_command_part(short_name) +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
elif hasattr2(var, "_fqn"):
command = proto.HELP_COMMAND_NAME +\
proto.HELP_CLASS_SUBCOMMAND_NAME +\
var._fqn + "\n" +\
get_command_part(pattern) +\
get_command_part(short_name) +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
elif hasattr2(var, "container") and hasattr2(var, "name"):
if pattern is not None:
raise Py4JError("pattern should be None with var is a JavaMember")
pattern = var.name + "(*"
var = var.container
return gateway_help(
gateway_client, var, pattern, short_name=short_name,
display=display)
else:
raise Py4JError(
"var is none of Java Object, Java Class or Java Member")
help_page = get_return_value(answer, gateway_client, None, None)
if (display):
pager(help_page)
else:
return help_page
def _garbage_collect_object(gateway_client, target_id):
try:
ThreadSafeFinalizer.remove_finalizer(
smart_decode(gateway_client.address) +
smart_decode(gateway_client.port) +
target_id)
if target_id != proto.ENTRY_POINT_OBJECT_ID and\
target_id != proto.GATEWAY_SERVER_OBJECT_ID and\
gateway_client.is_connected:
try:
gateway_client.send_command(
proto.MEMORY_COMMAND_NAME +
proto.MEMORY_DEL_SUBCOMMAND_NAME +
target_id +
"\ne\n")
except Exception:
logger.debug("Exception while garbage collecting an object",
exc_info=True)
except Exception:
logger.debug("Exception while garbage collecting an object",
exc_info=True)
def _garbage_collect_connection(socket_instance):
"""Closes the socket if auto_delete is True and the socket is opened.
This is an acceptable practice if you know that your Python VM implements
garbage collection and closing sockets immediately is not a concern.
Otherwise, it is always better (because it is predictable) to explicitly
close the socket by calling `GatewayConnection.close()`.
"""
if socket_instance is not None:
quiet_shutdown(socket_instance)
quiet_close(socket_instance)
class OutputConsumer(CompatThread):
"""Thread that consumes output
"""
def __init__(self, redirect, stream, *args, **kwargs):
super(OutputConsumer, self).__init__(*args, **kwargs)
self.redirect = redirect
self.stream = stream
if isinstance(redirect, Queue):
self.redirect_func = self._pipe_queue
if isinstance(redirect, deque):
self.redirect_func = self._pipe_deque
if hasattr2(redirect, "write"):
self.redirect_func = self._pipe_fd
def _pipe_queue(self, line):
self.redirect.put(line)
def _pipe_deque(self, line):
self.redirect.appendleft(line)
def _pipe_fd(self, line):
self.redirect.write(line)
def run(self):
lines_iterator = iter(self.stream.readline, b"")
for line in lines_iterator:
self.redirect_func(smart_decode(line))
class ProcessConsumer(CompatThread):
"""Thread that ensures process stdout and stderr are properly closed.
"""
def __init__(self, proc, closable_list, *args, **kwargs):
super(ProcessConsumer, self).__init__(*args, **kwargs)
self.proc = proc
if closable_list:
# We don't care if it contains queues or deques, quiet_close will
# just ignore them.
self.closable_list = closable_list
else:
self.closable_list = []
def run(self):
self.proc.wait()
quiet_close(self.proc.stdout)
quiet_close(self.proc.stderr)
for closable in self.closable_list:
quiet_close(closable)
class GatewayParameters(object):
"""Wrapper class that contains all parameters that can be passed to
configure a `JavaGateway`
"""
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PORT, auto_field=False,
auto_close=True, auto_convert=False, eager_load=False,
ssl_context=None):
"""
:param address: the address to which the client will request a
connection. If you're assing a `SSLContext` with `check_hostname=True`
then this address must match (one of) the hostname(s) in the
certificate the gateway server presents.
:param port: the port to which the client will request a connection.
Default is 25333.
:param auto_field: if `False`, each object accessed through this
gateway won"t try to lookup fields (they will be accessible only by
calling get_field). If `True`, fields will be automatically looked
up, possibly hiding methods of the same name and making method calls
less efficient.
:param auto_close: if `True`, the connections created by the client
close the socket when they are garbage collected.
:param auto_convert: if `True`, try to automatically convert Python
objects like sequences and maps to Java Objects. Default value is
`False` to improve performance and because it is still possible to
explicitly perform this conversion.
:param eager_load: if `True`, the gateway tries to connect to the JVM
by calling System.currentTimeMillis. If the gateway cannot connect to
the JVM, it shuts down itself and raises an exception.
:param ssl_context: if not None, SSL connections will be made using
this SSLContext
"""
self.address = address
self.port = port
self.auto_field = auto_field
self.auto_close = auto_close
self.auto_convert = auto_convert
self.eager_load = eager_load
self.ssl_context = ssl_context
class CallbackServerParameters(object):
"""Wrapper class that contains all parameters that can be passed to
configure a `CallbackServer`
"""
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PYTHON_PROXY_PORT,
daemonize=False, daemonize_connections=False, eager_load=True,
ssl_context=None):
"""
:param address: the address to which the client will request a
connection
:param port: the port to which the client will request a connection.
Default is 25333.
:param daemonize: If `True`, will set the daemon property of the server
thread to True. The callback server will exit automatically if all
the other threads exit.
:param daemonize_connections: If `True`, callback server connections
are executed in daemonized threads and will not block the exit of a
program if non daemonized threads are finished.
:param eager_load: If `True`, the callback server is automatically
started when the JavaGateway is created.
:param ssl_context: if not None, the SSLContext's certificate will be
presented to callback connections.
"""
self.address = address
self.port = port
self.daemonize = daemonize
self.daemonize_connections = daemonize_connections
self.eager_load = eager_load
self.ssl_context = ssl_context
class DummyRLock(object):
def __init__(self):
pass
def acquire(self, blocking=1):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, tb):
pass
class GatewayConnectionGuard(object):
def __init__(self, client, connection):
self._client = client
self._connection = connection
def __enter__(self):
return self
def read(self, hint=-1):
return self._connection.stream.read(hint)
def __exit__(self, type, value, traceback):
if value is None:
self._client._give_back_connection(self._connection)
else:
self._connection.close()
class GatewayClient(object):
"""Responsible for managing connections to the JavaGateway.
This implementation is thread-safe and connections are created on-demand.
This means that Py4J-Python can be accessed by multiple threads and
messages are sent to and processed concurrently by the Java Gateway.
When creating a custom :class:`JavaGateway`, it is recommended to pass an
instance of :class:`GatewayClient` instead of a :class:`GatewayConnection`:
both have the same interface, but the client supports multiple threads and
connections, which is essential when using callbacks. """
def __init__(self, address=DEFAULT_ADDRESS, port=25333, auto_close=True,
gateway_property=None, ssl_context=None):
"""
:param address: the address to which the client will request a
connection
:param port: the port to which the client will request a connection.
Default is 25333.
:param auto_close: if `True`, the connections created by the client
close the socket when they are garbage collected.
:param gateway_property: used to keep gateway preferences without a
cycle with the gateway
:param ssl_context: if not None, SSL connections will be made using
this SSLContext
"""
self.address = address
self.port = port
self.is_connected = True
self.auto_close = auto_close
self.gateway_property = gateway_property
self.ssl_context = ssl_context
self.deque = deque()
def _get_connection(self):
if not self.is_connected:
raise Py4JNetworkError("Gateway is not connected.")
try:
connection = self.deque.pop()
except IndexError:
connection = self._create_connection()
return connection
def _create_connection(self):
connection = GatewayConnection(
self.address, self.port, self.auto_close, self.gateway_property,
self.ssl_context)
connection.start()
return connection
def _give_back_connection(self, connection):
try:
self.deque.append(connection)
except Exception:
logger.warning(
"Exception while giving back connection", exc_info=True)
def shutdown_gateway(self):
"""Sends a shutdown command to the gateway. This will close the
gateway server: all active connections will be closed. This may
be useful if the lifecycle of the Java program must be tied to
the Python program.
"""
connection = self._get_connection()
try:
connection.shutdown_gateway()
self.close()
self.is_connected = False
except Py4JNetworkError:
logger.debug("Error while shutting down gateway.", exc_info=True)
self.shutdown_gateway()
def send_command(self, command, retry=True, binary=False):
"""Sends a command to the JVM. This method is not intended to be
called directly by Py4J users. It is usually called by
:class:`JavaMember` instances.
:param command: the `string` command to send to the JVM. The command
must follow the Py4J protocol.
:param retry: if `True`, the GatewayClient tries to resend a message
if it fails.
:param binary: if `True`, we won't wait for a Py4J-protocol response
from the other end; we'll just return the raw connection to the
caller. The caller becomes the owner of the connection, and is
responsible for closing the connection (or returning it this
`GatewayClient` pool using `_give_back_connection`).
:rtype: the `string` answer received from the JVM (The answer follows
the Py4J protocol). The guarded `GatewayConnection` is also returned
if `binary` is `True`.
"""
connection = self._get_connection()
try:
response = connection.send_command(command)
if binary:
return response, self._create_connection_guard(connection)
else:
self._give_back_connection(connection)
except Py4JNetworkError:
if connection:
connection.close()
if self._should_retry(retry, connection):
logging.info("Exception while sending command.", exc_info=True)
response = self.send_command(command, binary=binary)
else:
logging.exception(
"Exception while sending command.")
response = proto.ERROR
return response
def _create_connection_guard(self, connection):
return GatewayConnectionGuard(self, connection)
def _should_retry(self, retry, connection):
return retry
def close(self):
"""Closes all currently opened connections.
This operation is not thread safe and is only a best effort strategy
to close active connections.
All connections are guaranteed to be closed only if no other thread
is accessing the client and no call is pending.
"""
size = len(self.deque)
for _ in range(0, size):
try:
connection = self.deque.pop()
quiet_close(connection)
except IndexError:
pass
class GatewayConnection(object):
"""Default gateway connection (socket based) responsible for communicating
with the Java Virtual Machine."""
def __init__(self, address=DEFAULT_ADDRESS, port=25333, auto_close=True,
gateway_property=None, ssl_context=None):
"""
:param address: the address to which the connection will be established
:param port: the port to which the connection will be established.
Default is 25333.
:param auto_close: if `True`, the connection closes the socket when it
is garbage collected.
:param gateway_property: contains gateway preferences to avoid a cycle
with gateway
:param ssl_context: if not None, SSL connections will be made using
this SSLContext
"""
self.address = address
self.port = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if ssl_context:
self.socket = ssl_context.wrap_socket(
self.socket, server_hostname=address)
self.is_connected = False
self.auto_close = auto_close
self.gateway_property = gateway_property
self.wr = weakref.ref(
self,
lambda wr, socket_instance=self.socket:
_garbage_collect_connection and
_garbage_collect_connection(socket_instance))
def start(self):
"""Starts the connection by connecting to the `address` and the `port`
"""
try:
self.socket.connect((self.address, self.port))
self.is_connected = True
self.stream = self.socket.makefile("rb", 0)
except Exception as e:
msg = "An error occurred while trying to connect to the Java "\
"server ({0}:{1})".format(self.address, self.port)
logger.exception(msg)
raise Py4JNetworkError(msg, e)
def close(self):
"""Closes the connection by closing the socket."""
quiet_close(self.stream)
quiet_shutdown(self.socket)
quiet_close(self.socket)
self.is_connected = False
def shutdown_gateway(self):
"""Sends a shutdown command to the gateway. This will close the gateway
server: all active connections will be closed. This may be useful
if the lifecycle of the Java program must be tied to the Python
program.
"""
if not self.is_connected:
raise Py4JError("Gateway must be connected to send shutdown cmd.")
try:
quiet_close(self.stream)
self.socket.sendall(
proto.SHUTDOWN_GATEWAY_COMMAND_NAME.encode("utf-8"))
quiet_close(self.socket)
self.is_connected = False
except Exception:
# Do nothing! Exceptions might occur anyway.
logger.debug("Exception occurred while shutting down gateway",
exc_info=True)
def send_command(self, command):
"""Sends a command to the JVM. This method is not intended to be
called directly by Py4J users: it is usually called by JavaMember
instances.
:param command: the `string` command to send to the JVM. The command
must follow the Py4J protocol.
:rtype: the `string` answer received from the JVM (The answer follows
the Py4J protocol).
"""
logger.debug("Command to send: {0}".format(command))
try:
self.socket.sendall(command.encode("utf-8"))
answer = smart_decode(self.stream.readline()[:-1])
logger.debug("Answer received: {0}".format(answer))
if answer.startswith(proto.RETURN_MESSAGE):
answer = answer[1:]
# Happens when a the other end is dead. There might be an empty
# answer before the socket raises an error.
if answer.strip() == "":
self.close()
raise Py4JError("Answer from Java side is empty")
return answer
except Exception as e:
logger.exception("Error while sending or receiving.")
raise Py4JNetworkError("Error while sending or receiving", e)
class JavaMember(object):
"""Represents a member (i.e., method) of a :class:`JavaObject`. For now,
only methods are supported. Fields are retrieved directly and are not
contained in a JavaMember.
"""
def __init__(self, name, container, target_id, gateway_client):
self.name = name
self.container = container
self.target_id = target_id
self.gateway_client = gateway_client
self.command_header = self.target_id + "\n" + self.name + "\n"
self.pool = self.gateway_client.gateway_property.pool
self.converters = self.gateway_client.converters
self._gateway_doc = None
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self.gateway_client, self, display=False)
return self._gateway_doc
def _get_args(self, args):
temp_args = []
new_args = []
for arg in args:
if not isinstance(arg, JavaObject):
for converter in self.gateway_client.converters:
if converter.can_convert(arg):
temp_arg = converter.convert(arg, self.gateway_client)
temp_args.append(temp_arg)
new_args.append(temp_arg)
break
else:
new_args.append(arg)
else:
new_args.append(arg)
return (new_args, temp_args)
def _build_args(self, *args):
if self.converters is not None and len(self.converters) > 0:
(new_args, temp_args) = self._get_args(args)
else:
new_args = args
temp_args = []
args_command = "".join(
[get_command_part(arg, self.pool) for arg in new_args])
return args_command, temp_args
def stream(self, *args):
"""
Call the method using the 'binary' protocol.
:rtype: The `GatewayConnection` that the call command was sent to.
"""
args_command, temp_args = self._build_args(*args)
command = proto.STREAM_COMMAND_NAME +\
self.command_header +\
args_command +\
proto.END_COMMAND_PART
answer, connection = self.gateway_client.send_command(
command, binary=True)
# parse the return value to throw an exception if necessary
get_return_value(
answer, self.gateway_client, self.target_id, self.name)
for temp_arg in temp_args:
temp_arg._detach()
return connection
def __call__(self, *args):
args_command, temp_args = self._build_args(*args)
command = proto.CALL_COMMAND_NAME +\
self.command_header +\
args_command +\
proto.END_COMMAND_PART
answer = self.gateway_client.send_command(command)
return_value = get_return_value(
answer, self.gateway_client, self.target_id, self.name)
for temp_arg in temp_args:
temp_arg._detach()
return return_value
class JavaObject(object):
"""Represents a Java object from which you can call methods or access
fields."""
def __init__(self, target_id, gateway_client):
"""
:param target_id: the identifier of the object on the JVM side. Given
by the JVM.
:param gateway_client: the gateway client used to communicate with
the JVM.
"""
self._target_id = target_id
self._gateway_client = gateway_client
self._auto_field = gateway_client.gateway_property.auto_field
self._methods = {}
self._field_names = set()
self._fully_populated = False
self._gateway_doc = None
key = smart_decode(self._gateway_client.address) +\
smart_decode(self._gateway_client.port) +\
self._target_id
value = weakref.ref(
self,
lambda wr, cc=self._gateway_client, id=self._target_id:
_garbage_collect_object and _garbage_collect_object(cc, id))
ThreadSafeFinalizer.add_finalizer(key, value)
def _detach(self):
_garbage_collect_object(self._gateway_client, self._target_id)
def _get_object_id(self):
return self._target_id
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self._gateway_client, self, display=False)
return self._gateway_doc
def __getattr__(self, name):
if name == "__call__":
# Provide an explicit definition for __call__ so that a JavaMember
# does not get created for it. This serves two purposes:
# 1) IPython (and others?) stop showing incorrect help indicating
# that this is callable
# 2) A TypeError(object not callable) is raised if someone does try
# to call here
raise AttributeError
if name not in self._methods:
if (self._auto_field):
(is_field, return_value) = self._get_field(name)
if (is_field):
self._field_names.add(name)
return return_value
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
self._methods[name] = JavaMember(
name, self, self._target_id, self._gateway_client)
# The name is a method
return self._methods[name]
def __dir__(self):
self._populate_fields()
return list(set(self._methods.keys()) | self._field_names)
def _populate_fields(self):
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
if not self._fully_populated:
if self._auto_field:
command = proto.DIR_COMMAND_NAME +\
proto.DIR_FIELDS_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._target_id, "__dir__")
self._field_names.update(return_value.split("\n"))
command = proto.DIR_COMMAND_NAME +\
proto.DIR_METHODS_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._target_id, "__dir__")
names = return_value.split("\n")
for name in names:
if name not in self._methods:
self._methods[name] = JavaMember(
name, self, self._target_id, self._gateway_client)
self._fully_populated = True
def _get_field(self, name):
command = proto.FIELD_COMMAND_NAME +\
proto.FIELD_GET_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
name + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if answer == proto.NO_MEMBER_COMMAND or is_error(answer)[0]:
return (False, None)
else:
return_value = get_return_value(
answer, self._gateway_client, self._target_id, name)
return (True, return_value)
def __eq__(self, other):
if other is None:
return False
elif (hasattr2(other, "_get_object_id")):
return self.equals(other)
else:
return other.__eq__(self)
def __hash__(self):
return self.hashCode()
def __str__(self):
return self.toString()
def __repr__(self):
# For now...
return "JavaObject id=" + self._target_id
class JavaClass(object):
"""A `JavaClass` represents a Java Class from which static members can be
retrieved. `JavaClass` instances are also needed to initialize an array.
Usually, `JavaClass` are not initialized using their constructor, but
they are created while accessing the `jvm` property of a gateway, e.g.,
`gateway.jvm.java.lang.String`.
"""
def __init__(self, fqn, gateway_client):
self._fqn = fqn
self._gateway_client = gateway_client
self._pool = self._gateway_client.gateway_property.pool
self._command_header = fqn + "\n"
self._converters = self._gateway_client.converters
self._gateway_doc = None
self._statics = None
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self._gateway_client, self, display=False)
return self._gateway_doc
def __dir__(self):
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
if self._statics is None:
command = proto.DIR_COMMAND_NAME +\
proto.DIR_STATIC_SUBCOMMAND_NAME +\
self._fqn + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._fqn, "__dir__")
self._statics = return_value.split("\n")
return self._statics[:]
def __getattr__(self, name):
if name in ["__str__", "__repr__"]:
raise AttributeError
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_MEMBER_SUB_COMMAND_NAME +\
self._fqn + "\n" +\
name + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if len(answer) > 1 and answer[0] == proto.SUCCESS:
if answer[1] == proto.METHOD_TYPE:
return JavaMember(
name, None, proto.STATIC_PREFIX + self._fqn,
self._gateway_client)
elif answer[1].startswith(proto.CLASS_TYPE):
return JavaClass(
self._fqn + "$" + name, self._gateway_client)
else:
return get_return_value(
answer, self._gateway_client, self._fqn, name)
else:
raise Py4JError(
"{0}.{1} does not exist in the JVM".format(self._fqn, name))
def _get_args(self, args):
temp_args = []
new_args = []
for arg in args:
if not isinstance(arg, JavaObject):
for converter in self._converters:
if converter.can_convert(arg):
temp_arg = converter.convert(arg, self._gateway_client)
temp_args.append(temp_arg)
new_args.append(temp_arg)
break
else:
new_args.append(arg)
else:
new_args.append(arg)
return (new_args, temp_args)
def __call__(self, *args):
# TODO Refactor to use a mixin shared by JavaMember and JavaClass
if self._converters is not None and len(self._converters) > 0:
(new_args, temp_args) = self._get_args(args)
else:
new_args = args
temp_args = []
args_command = "".join(
[get_command_part(arg, self._pool) for arg in new_args])
command = proto.CONSTRUCTOR_COMMAND_NAME +\
self._command_header +\
args_command +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, None, self._fqn)
for temp_arg in temp_args:
temp_arg._detach()
return return_value
class UserHelpAutoCompletion(object):
"""
Type a package name or a class name.
For example with a JVMView called view:
>>> o = view.Object() # create a java.lang.Object
>>> random = view.jvm.java.util.Random() # create a java.util.Random
The default JVMView is in the gateway and is called:
>>> gateway.jvm
By default, java.lang.* is available in the view. To
add additional Classes/Packages, do:
>>> from py4j.java_gateway import java_import
>>> java_import(gateway.jvm, "com.example.Class1")
>>> instance = gateway.jvm.Class1()
Package and class completions are only available for
explicitly imported Java classes. For example, if you
java_import(gateway.jvm, "com.example.Class1")
then Class1 will appear in the completions.
"""
KEY = "<package or class name>"
class JavaPackage(object):
"""A `JavaPackage` represents part of a Java package from which Java
classes can be accessed.
Usually, `JavaPackage` are not initialized using their constructor, but
they are created while accessing the `jvm` property of a gateway, e.g.,
`gateway.jvm.java.lang`.
"""
def __init__(self, fqn, gateway_client, jvm_id=None):
self._fqn = fqn
self._gateway_client = gateway_client
if jvm_id is None:
self._jvm_id = proto.DEFAULT_JVM_ID
self._jvm_id = jvm_id
def __dir__(self):
return [UserHelpAutoCompletion.KEY]
def __getattr__(self, name):
if name == UserHelpAutoCompletion.KEY:
return UserHelpAutoCompletion
if name in ["__str__", "__repr__"]:
raise AttributeError
if name == "__call__":
raise Py4JError("Trying to call a package.")
new_fqn = self._fqn + "." + name
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_UNKNOWN_SUB_COMMAND_NAME +\
new_fqn + "\n" +\
self._jvm_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if answer == proto.SUCCESS_PACKAGE:
return JavaPackage(new_fqn, self._gateway_client, self._jvm_id)
elif answer.startswith(proto.SUCCESS_CLASS):
return JavaClass(
answer[proto.CLASS_FQN_START:], self._gateway_client)
else:
raise Py4JError("{0} does not exist in the JVM".format(new_fqn))
class JVMView(object):
"""A `JVMView` allows access to the Java Virtual Machine of a
`JavaGateway`.
This can be used to reference static members (fields and methods) and
to call constructors.
"""
def __init__(self, gateway_client, jvm_name, id=None, jvm_object=None):
self._gateway_client = gateway_client
self._jvm_name = jvm_name
if id is not None:
self._id = id
elif jvm_object is not None:
self._id = proto.REFERENCE_TYPE + jvm_object._get_object_id()
# So that both JVMView instances (on Python and Java) have the
# same lifecycle. Theoretically, JVMView could inherit from
# JavaObject, but I would like to avoid the use of reflection
# for regular Py4J classes.
self._jvm_object = jvm_object
self._dir_sequence_and_cache = (None, [])
def __dir__(self):
command = proto.DIR_COMMAND_NAME +\
proto.DIR_JVMVIEW_SUBCOMMAND_NAME +\
self._id + "\n" +\
get_command_part(self._dir_sequence_and_cache[0]) + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._fqn, "__dir__")
if return_value is not None:
result = return_value.split("\n")
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
self._dir_sequence_and_cache = (
result[0], result[1:] + [UserHelpAutoCompletion.KEY])
return self._dir_sequence_and_cache[1][:]
def __getattr__(self, name):
if name == UserHelpAutoCompletion.KEY:
return UserHelpAutoCompletion()
answer = self._gateway_client.send_command(
proto.REFLECTION_COMMAND_NAME +
proto.REFL_GET_UNKNOWN_SUB_COMMAND_NAME + name + "\n" + self._id +
"\n" + proto.END_COMMAND_PART)
if answer == proto.SUCCESS_PACKAGE:
return JavaPackage(name, self._gateway_client, jvm_id=self._id)
elif answer.startswith(proto.SUCCESS_CLASS):
return JavaClass(
answer[proto.CLASS_FQN_START:], self._gateway_client)
else:
raise Py4JError("{0} does not exist in the JVM".format(name))
class GatewayProperty(object):
"""Object shared by callbackserver, gateway, and connections.
"""
def __init__(self, auto_field, pool):
self.auto_field = auto_field
self.pool = pool
class JavaGateway(object):
"""A `JavaGateway` is the main interaction point between a Python VM and
a JVM.
* A `JavaGateway` instance is connected to a `Gateway` instance on the
Java side.
* The `entry_point` field of a `JavaGateway` instance is connected to
the `Gateway.entryPoint` instance on the Java side.
* The `java_gateway_server` field of a `JavaGateway` instance is connected
to the `GatewayServer` instance on the Java side.
* The `jvm` field of `JavaGateway` enables user to access classes, static
members (fields and methods) and call constructors.
Methods that are not defined by `JavaGateway` are always redirected to
`entry_point`. For example, ``gateway.doThat()`` is equivalent to
``gateway.entry_point.doThat()``. This is a trade-off between convenience
and potential confusion.
"""
def __init__(
self, gateway_client=None, auto_field=False,
python_proxy_port=DEFAULT_PYTHON_PROXY_PORT,
start_callback_server=False, auto_convert=False, eager_load=False,
gateway_parameters=None, callback_server_parameters=None):
"""
:param gateway_parameters: An instance of `GatewayParameters` used to
configure the various options of the gateway.
:param callback_server_parameters: An instance of
`CallbackServerParameters` used to configure various options of the
gateway server. Must be provided to start a gateway server.
Otherwise, callbacks won"t be available.
"""
self.gateway_parameters = gateway_parameters
if not gateway_parameters:
self.gateway_parameters = GatewayParameters(
auto_field=auto_field, auto_convert=auto_convert,
eager_load=eager_load)
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
# No parameters were provided so do not autostart callback server.
self.callback_server_parameters = CallbackServerParameters(
port=python_proxy_port, eager_load=False)
# Check for deprecation warnings
if auto_field:
deprecated("JavaGateway.auto_field", "1.0", "GatewayParameters")
if auto_convert:
deprecated("JavaGateway.auto_convert", "1.0", "GatewayParameters")
if eager_load:
deprecated("JavaGateway.eager_load", "1.0", "GatewayParameters")
if start_callback_server:
deprecated(
"JavaGateway.start_callback_server and python_proxy_port",
"1.0", "CallbackServerParameters")
self.callback_server_parameters.eager_load = True
if gateway_client:
deprecated("JavaGateway.gateway_client", "1.0",
"GatewayParameters")
else:
gateway_client = self._create_gateway_client()
self.gateway_property = self._create_gateway_property()
self._python_proxy_port = python_proxy_port
# Setup gateway client
self.set_gateway_client(gateway_client)
# Setup callback server property
self._callback_server = None
if self.gateway_parameters.eager_load:
self._eager_load()
if self.callback_server_parameters.eager_load:
self.start_callback_server(self.callback_server_parameters)
def _create_gateway_client(self):
gateway_client = GatewayClient(
address=self.gateway_parameters.address,
port=self.gateway_parameters.port,
auto_close=self.gateway_parameters.auto_close,
ssl_context=self.gateway_parameters.ssl_context)
return gateway_client
def _create_gateway_property(self):
gateway_property = GatewayProperty(
self.gateway_parameters.auto_field, PythonProxyPool())
return gateway_property
def set_gateway_client(self, gateway_client):
"""Sets the gateway client for this JavaGateway. This sets the
appropriate gateway_property and resets the main jvm view (self.jvm).
This is for advanced usage only. And should only be set before the
gateway is loaded.
"""
if self.gateway_parameters.auto_convert:
gateway_client.converters = proto.INPUT_CONVERTER
else:
gateway_client.converters = None
gateway_client.gateway_property = self.gateway_property
self._gateway_client = gateway_client
self.entry_point = JavaObject(
proto.ENTRY_POINT_OBJECT_ID, self._gateway_client)
self.java_gateway_server = JavaObject(
proto.GATEWAY_SERVER_OBJECT_ID, self._gateway_client)
self.jvm = JVMView(
self._gateway_client, jvm_name=proto.DEFAULT_JVM_NAME,
id=proto.DEFAULT_JVM_ID)
def __getattr__(self, name):
return self.entry_point.__getattr__(name)
def _eager_load(self):
try:
self.jvm.System.currentTimeMillis()
except Exception:
self.shutdown()
raise
def get_callback_server(self):
return self._callback_server
def start_callback_server(self, callback_server_parameters=None):
"""Starts the callback server.
:param callback_server_parameters: parameters to use to start the
server. If not provided, it will use the gateway callback server
parameters.
:rtype: Returns True if the server was started by this call or False if
it was already started (you cannot have more than one started
callback server).
"""
if self._callback_server:
return False
if not callback_server_parameters:
callback_server_parameters = self.callback_server_parameters
self._callback_server = self._create_callback_server(
callback_server_parameters)
try:
self._callback_server.start()
except Py4JNetworkError:
# Clean up ourselves before raising the exception.
self.shutdown()
self._callback_server = None
raise
return True
def _create_callback_server(self, callback_server_parameters):
callback_server = CallbackServer(
self.gateway_property.pool, self._gateway_client,
callback_server_parameters=callback_server_parameters)
return callback_server
def new_jvm_view(self, name="custom jvm"):
"""Creates a new JVM view with its own imports. A JVM view ensures
that the import made in one view does not conflict with the import
of another view.
Generally, each Python module should have its own view (to replicate
Java behavior).
:param name: Optional name of the jvm view. Does not need to be
unique, i.e., two distinct views can have the same name
(internally, they will have a distinct id).
:rtype: A JVMView instance (same class as the gateway.jvm instance).
"""
command = proto.JVMVIEW_COMMAND_NAME +\
proto.JVM_CREATE_VIEW_SUB_COMMAND_NAME +\
get_command_part(name) +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
java_object = get_return_value(answer, self._gateway_client)
return JVMView(
gateway_client=self._gateway_client, jvm_name=name,
jvm_object=java_object)
def new_array(self, java_class, *dimensions):
"""Creates a Java array of type `java_class` of `dimensions`
:param java_class: The :class:`JavaClass` instance representing the
type of the array.
:param dimensions: A list of dimensions of the array. For example
`[1,2]` would produce an `array[1][2]`.
:rtype: A :class:`JavaArray <py4j.java_collections.JavaArray>`
instance.
"""
if len(dimensions) == 0:
raise Py4JError("new arrays must have at least one dimension")
command = proto.ARRAY_COMMAND_NAME +\
proto.ARRAY_CREATE_SUB_COMMAND_NAME +\
get_command_part(java_class._fqn)
for dimension in dimensions:
command += get_command_part(dimension)
command += proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return get_return_value(answer, self._gateway_client)
def shutdown(self, raise_exception=False):
"""Shuts down the :class:`GatewayClient` and the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`.
:param raise_exception: If `True`, raise an exception if an error
occurs while shutting down (very likely with sockets).
"""
try:
self._gateway_client.shutdown_gateway()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while shutting down callback server",
exc_info=True)
self.shutdown_callback_server()
def shutdown_callback_server(self, raise_exception=False):
"""Shuts down the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`.
:param raise_exception: If `True`, raise an exception if an error
occurs while shutting down (very likely with sockets).
"""
if self._callback_server is None:
# Nothing to shutdown
return
try:
self._callback_server.shutdown()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while shutting down callback server",
exc_info=True)
def restart_callback_server(self):
"""Shuts down the callback server (if started) and restarts a new one.
"""
self.shutdown_callback_server()
self._callback_server = None
self.start_callback_server(self.callback_server_parameters)
def close(self, keep_callback_server=False):
"""Closes all gateway connections. A connection will be reopened if
necessary (e.g., if a :class:`JavaMethod` is called).
:param keep_callback_server: if `True`, the callback server is not
shut down.
"""
self._gateway_client.close()
if not keep_callback_server:
self.shutdown_callback_server()
def detach(self, java_object):
"""Makes the Java Gateway dereference this object.
The equivalent of this method is called when a JavaObject instance
is garbage collected on the Python side. This method, or gc.collect()
should still be invoked when memory is limited or when too many objects
are created on the Java side.
:param java_object: The JavaObject instance to dereference (free) on
the Java side.
"""
java_object._detach()
def help(self, var, pattern=None, short_name=True, display=True):
"""Displays a help page about a class or an object.
:param var: JavaObject, JavaClass or JavaMember for which a help page
will be generated.
:param pattern: Star-pattern used to filter the members. For example
"get\*Foo" may return getMyFoo, getFoo, getFooBar, but not
bargetFoo. The pattern is matched against the entire signature.
To match only the name of a method, use "methodName(\*".
:param short_name: If True, only the simple name of the parameter
types and return types will be displayed. If False, the fully
qualified name of the types will be displayed.
:param display: If True, the help page is displayed in an interactive
page similar to the `help` command in Python. If False, the page is
returned as a string.
"""
return gateway_help(
self._gateway_client, var, pattern, short_name, display)
@classmethod
def launch_gateway(
cls, port=0, jarpath="", classpath="", javaopts=[],
die_on_exit=False, redirect_stdout=None,
redirect_stderr=None, daemonize_redirect=True):
"""Launch a `Gateway` in a new Java process and create a default
:class:`JavaGateway <py4j.java_gateway.JavaGateway>` to connect to
it.
See :func:`launch_gateway <py4j.java_gateway.launch_gateway>` for more
information about this function.
:param port: the port to launch the Java Gateway on. If no port is
specified then an ephemeral port is used.
:param jarpath: the path to the Py4J jar. Only necessary if the jar
was installed at a non-standard location or if Python is using
a different `sys.prefix` than the one that Py4J was installed
under.
:param classpath: the classpath used to launch the Java Gateway.
:param javaopts: an array of extra options to pass to Java (the
classpath should be specified using the `classpath` parameter,
not `javaopts`.)
:param die_on_exit: if `True`, the Java gateway process will die when
this Python process exits or is killed.
:param redirect_stdout: where to redirect the JVM stdout.
If None (default)
stdout is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param redirect_stderr: where to redirect the JVM stdout.
If None (default)
stderr is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param daemonize_redirect: if True, the consumer threads will be
daemonized and will not prevent the main Python process from
exiting. This means the file descriptors (stderr, stdout,
redirect_stderr, redirect_stdout) might not be properly closed.
This is not usually a problem, but in case of errors related
to file descriptors, set this flag to False.
:rtype: a :class:`JavaGateway <py4j.java_gateway.JavaGateway>`
connected to the `Gateway` server.
"""
_port = launch_gateway(
port, jarpath, classpath, javaopts, die_on_exit,
redirect_stdout=redirect_stdout, redirect_stderr=redirect_stderr,
daemonize_redirect=daemonize_redirect)
gateway = JavaGateway(gateway_parameters=GatewayParameters(port=_port))
return gateway
# CALLBACK SPECIFIC
class CallbackServer(object):
"""The CallbackServer is responsible for receiving call back connection
requests from the JVM. Usually connections are reused on the Java side,
but there is at least one connection per concurrent thread.
"""
def __init__(
self, pool, gateway_client, port=DEFAULT_PYTHON_PROXY_PORT,
address=DEFAULT_ADDRESS, callback_server_parameters=None):
"""
:param pool: the pool responsible of tracking Python objects passed to
the Java side.
:param gateway_client: the gateway client used to call Java objects.
:param callback_server_parameters: An instance of
`CallbackServerParameters` used to configure various options of the
callback server.
"""
self.gateway_client = gateway_client
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
deprecated(
"CallbackServer.port and address", "1.0",
"CallbackServerParameters")
self.callback_server_parameters = CallbackServerParameters(
address=address, port=port)
self.port = self.callback_server_parameters.port
self.address = self.callback_server_parameters.address
self.ssl_context = self.callback_server_parameters.ssl_context
self.pool = pool
self.connections = WeakSet()
# Lock is used to isolate critical region like connection creation.
# Some code can produce exceptions when ran in parallel, but
# They will be caught and dealt with.
self.lock = RLock()
self.is_shutdown = False
def start(self):
"""Starts the CallbackServer. This method should be called by the
client instead of run()."""
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
self.server_socket.bind((self.address, self.port))
self._listening_address, self._listening_port = \
self.server_socket.getsockname()
except Exception as e:
msg = "An error occurred while trying to start the callback "\
"server ({0}:{1})".format(self.address, self.port)
logger.exception(msg)
raise Py4JNetworkError(msg, e)
# Maybe thread needs to be cleanup up?
self.thread = Thread(target=self.run)
# Default is False
self.thread.daemon = self.callback_server_parameters.daemonize
self.thread.start()
def get_listening_port(self):
"""Returns the port on which the callback server is listening to.
Different than `port` when port is 0.
"""
return self._listening_port
def get_listening_address(self):
"""Returns the address on which the callback server is listening to.
May be different than `address` if `address` was an alias (e.g.,
localhost).
"""
return self._listening_address
def run(self):
"""Starts listening and accepting connection requests.
This method is called when invoking `CallbackServer.start()`. A
CallbackServer instance is created and started automatically when
a :class:`JavaGateway <py4j.java_gateway.JavaGateway>` instance is
created.
"""
try:
with self.lock:
self.is_shutdown = False
logger.info("Callback Server Starting")
self.server_socket.listen(5)
logger.info(
"Socket listening on {0}".
format(smart_decode(self.server_socket.getsockname())))
read_list = [self.server_socket]
while not self.is_shutdown:
readable, writable, errored = select.select(
read_list, [], [], DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT)
if self.is_shutdown:
break
for s in readable:
socket_instance, _ = self.server_socket.accept()
if self.ssl_context:
socket_instance = self.ssl_context.wrap_socket(
socket_instance, server_side=True)
input = socket_instance.makefile("rb", 0)
connection = self._create_connection(
socket_instance, input)
with self.lock:
if not self.is_shutdown:
self.connections.add(connection)
connection.start()
else:
quiet_shutdown(connection.socket)
quiet_close(connection.socket)
except Exception:
if self.is_shutdown:
logger.info("Error while waiting for a connection.")
else:
logger.exception("Error while waiting for a connection.")
def _create_connection(self, socket_instance, stream):
connection = CallbackConnection(
self.pool, stream, socket_instance, self.gateway_client,
self.callback_server_parameters)
return connection
def shutdown(self):
"""Stops listening and accepting connection requests. All live
connections are closed.
This method can safely be called by another thread.
"""
logger.info("Callback Server Shutting Down")
with self.lock:
self.is_shutdown = True
quiet_shutdown(self.server_socket)
quiet_close(self.server_socket)
self.server_socket = None
for connection in self.connections:
quiet_shutdown(connection.socket)
quiet_close(connection.socket)
self.pool.clear()
self.thread.join()
self.thread = None
class CallbackConnection(Thread):
"""A `CallbackConnection` receives callbacks and garbage collection
requests from the Java side.
"""
def __init__(
self, pool, input, socket_instance, gateway_client,
callback_server_parameters):
super(CallbackConnection, self).__init__()
self.pool = pool
self.input = input
self.socket = socket_instance
self.gateway_client = gateway_client
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
self.callback_server_parameters = CallbackServerParameters()
self.daemon = self.callback_server_parameters.daemonize_connections
def run(self):
logger.info("Callback Connection ready to receive messages")
try:
while True:
command = smart_decode(self.input.readline())[:-1]
obj_id = smart_decode(self.input.readline())[:-1]
logger.info(
"Received command {0} on object id {1}".
format(command, obj_id))
if obj_id is None or len(obj_id.strip()) == 0:
break
if command == proto.CALL_PROXY_COMMAND_NAME:
return_message = self._call_proxy(obj_id, self.input)
self.socket.sendall(return_message.encode("utf-8"))
elif command == proto.GARBAGE_COLLECT_PROXY_COMMAND_NAME:
self.input.readline()
del(self.pool[obj_id])
self.socket.sendall(
proto.SUCCESS_RETURN_MESSAGE.encode("utf-8"))
else:
logger.error("Unknown command {0}".format(command))
# We're sending something to prevent blokincg, but at this
# point, the protocol is broken.
self.socket.sendall(
proto.ERROR_RETURN_MESSAGE.encode("utf-8"))
except Exception:
# This is a normal exception...
logger.info(
"Error while callback connection was waiting for"
"a message", exc_info=True)
logger.info("Closing down connection")
quiet_shutdown(self.socket)
quiet_close(self.socket)
def _call_proxy(self, obj_id, input):
return_message = proto.ERROR_RETURN_MESSAGE
if obj_id in self.pool:
try:
method = smart_decode(input.readline())[:-1]
params = self._get_params(input)
return_value = getattr(self.pool[obj_id], method)(*params)
return_message = proto.RETURN_MESSAGE + proto.SUCCESS +\
get_command_part(return_value, self.pool)
except Exception:
logger.exception("There was an exception while executing the "
"Python Proxy on the Python Side.")
return return_message
def _get_params(self, input):
params = []
temp = smart_decode(input.readline())[:-1]
while temp != proto.END:
param = get_return_value("y" + temp, self.gateway_client)
params.append(param)
temp = smart_decode(input.readline())[:-1]
return params
class PythonProxyPool(object):
"""A `PythonProxyPool` manages proxies that are passed to the Java side.
A proxy is a Python class that implements a Java interface.
A proxy has an internal class named `Java` with a member named
`implements` which is a list of fully qualified names (string) of the
implemented interfaces.
The `PythonProxyPool` implements a subset of the dict interface:
`pool[id]`, `del(pool[id])`, `pool.put(proxy)`, `pool.clear()`,
`id in pool`, `len(pool)`.
The `PythonProxyPool` is thread-safe.
"""
def __init__(self):
self.lock = RLock()
self.dict = {}
self.next_id = 0
def put(self, object, force_id=None):
"""Adds a proxy to the pool.
:param object: The proxy to add to the pool.
:rtype: A unique identifier associated with the object.
"""
with self.lock:
if force_id:
id = force_id
else:
id = proto.PYTHON_PROXY_PREFIX + smart_decode(self.next_id)
self.next_id += 1
self.dict[id] = object
return id
def __getitem__(self, key):
with self.lock:
return self.dict[key]
def __delitem__(self, key):
with self.lock:
del(self.dict[key])
def clear(self):
with self.lock:
self.dict.clear()
def __contains__(self, key):
with self.lock:
return key in self.dict
def __len__(self):
with self.lock:
return len(self.dict)
# Basic registration
register_output_converter(
proto.REFERENCE_TYPE,
lambda target_id, gateway_client: JavaObject(target_id, gateway_client))
if PY4J_SKIP_COLLECTIONS not in os.environ or\
os.environ[PY4J_SKIP_COLLECTIONS].lower() not in PY4J_TRUE:
__import__("py4j.java_collections")
|
{
"content_hash": "afde791a53086a7fa6d9ffc97be867e4",
"timestamp": "",
"source": "github",
"line_count": 1979,
"max_line_length": 79,
"avg_line_length": 37.77210712481051,
"alnum_prop": 0.611376436435633,
"repo_name": "jonahkichwacoders/py4j",
"id": "c00ef43b4d5fbc33eb3ae5b2429a1947d733f2db",
"size": "74775",
"binary": false,
"copies": "1",
"ref": "refs/heads/py4j-ease",
"path": "py4j-python/src/py4j/java_gateway.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2955"
},
{
"name": "CSS",
"bytes": "6557"
},
{
"name": "HTML",
"bytes": "9855"
},
{
"name": "Java",
"bytes": "551657"
},
{
"name": "Makefile",
"bytes": "2989"
},
{
"name": "Python",
"bytes": "237609"
}
],
"symlink_target": ""
}
|
import datetime
import time
import re
import redis
from collections import defaultdict
from operator import itemgetter
from pprint import pprint
from utils import log as logging
from utils import json_functions as json
from django.db import models, IntegrityError
from django.db.models import Q, F
from django.db.models import Count
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.template.defaultfilters import slugify
from mongoengine.queryset import OperationError
from mongoengine.queryset import NotUniqueError
from apps.reader.managers import UserSubscriptionManager
from apps.rss_feeds.models import Feed, MStory, DuplicateFeed
from apps.rss_feeds.tasks import NewFeeds
from apps.analyzer.models import MClassifierFeed, MClassifierAuthor, MClassifierTag, MClassifierTitle
from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds, apply_classifier_authors, apply_classifier_tags
from apps.analyzer.tfidf import tfidf
from utils.feed_functions import add_object_to_folder, chunks
class UserSubscription(models.Model):
"""
A feed which a user has subscribed to. Carries all of the cached information
about the subscription, including unread counts of the three primary scores.
Also has a dirty flag (needs_unread_recalc) which means that the unread counts
are not accurate and need to be calculated with `self.calculate_feed_scores()`.
"""
UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
user = models.ForeignKey(User, related_name='subscriptions')
feed = models.ForeignKey(Feed, related_name='subscribers')
user_title = models.CharField(max_length=255, null=True, blank=True)
active = models.BooleanField(default=False)
last_read_date = models.DateTimeField(default=UNREAD_CUTOFF)
mark_read_date = models.DateTimeField(default=UNREAD_CUTOFF)
unread_count_neutral = models.IntegerField(default=0)
unread_count_positive = models.IntegerField(default=0)
unread_count_negative = models.IntegerField(default=0)
unread_count_updated = models.DateTimeField(default=datetime.datetime.now)
oldest_unread_story_date = models.DateTimeField(default=datetime.datetime.now)
needs_unread_recalc = models.BooleanField(default=False)
feed_opens = models.IntegerField(default=0)
is_trained = models.BooleanField(default=False)
objects = UserSubscriptionManager()
def __unicode__(self):
return '[%s (%s): %s (%s)] ' % (self.user.username, self.user.pk,
self.feed.feed_title, self.feed.pk)
class Meta:
unique_together = ("user", "feed")
def canonical(self, full=False, include_favicon=True, classifiers=None):
feed = self.feed.canonical(full=full, include_favicon=include_favicon)
feed['feed_title'] = self.user_title or feed['feed_title']
feed['ps'] = self.unread_count_positive
feed['nt'] = self.unread_count_neutral
feed['ng'] = self.unread_count_negative
feed['active'] = self.active
feed['feed_opens'] = self.feed_opens
feed['subscribed'] = True
if classifiers:
feed['classifiers'] = classifiers
return feed
def save(self, *args, **kwargs):
user_title_max = self._meta.get_field('user_title').max_length
if self.user_title and len(self.user_title) > user_title_max:
self.user_title = self.user_title[:user_title_max]
try:
super(UserSubscription, self).save(*args, **kwargs)
except IntegrityError:
duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id=self.feed_id)
for duplicate_feed in duplicate_feeds:
already_subscribed = UserSubscription.objects.filter(user=self.user, feed=duplicate_feed.feed)
if not already_subscribed:
self.feed = duplicate_feed.feed
super(UserSubscription, self).save(*args, **kwargs)
break
else:
if self: self.delete()
@classmethod
def subs_for_feeds(cls, user_id, feed_ids=None, read_filter="unread"):
usersubs = cls.objects
if read_filter == "unread":
usersubs = usersubs.filter(Q(unread_count_neutral__gt=0) |
Q(unread_count_positive__gt=0))
if not feed_ids:
usersubs = usersubs.filter(user=user_id,
active=True).only('feed', 'mark_read_date', 'is_trained')
else:
usersubs = usersubs.filter(user=user_id,
active=True,
feed__in=feed_ids).only('feed', 'mark_read_date', 'is_trained')
return usersubs
@classmethod
def story_hashes(cls, user_id, feed_ids=None, usersubs=None, read_filter="unread", order="newest",
include_timestamps=False, group_by_feed=True, cutoff_date=None,
across_all_feeds=True):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
pipeline = r.pipeline()
story_hashes = {} if group_by_feed else []
if not feed_ids and not across_all_feeds:
return story_hashes
if not usersubs:
usersubs = cls.subs_for_feeds(user_id, feed_ids=feed_ids, read_filter=read_filter)
feed_ids = [sub.feed_id for sub in usersubs]
if not feed_ids:
return story_hashes
read_dates = dict((us.feed_id, int(us.mark_read_date.strftime('%s'))) for us in usersubs)
current_time = int(time.time() + 60*60*24)
if not cutoff_date:
cutoff_date = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
unread_timestamp = int(time.mktime(cutoff_date.timetuple()))-1000
feed_counter = 0
for feed_id_group in chunks(feed_ids, 20):
pipeline = r.pipeline()
for feed_id in feed_id_group:
stories_key = 'F:%s' % feed_id
sorted_stories_key = 'zF:%s' % feed_id
read_stories_key = 'RS:%s:%s' % (user_id, feed_id)
unread_stories_key = 'U:%s:%s' % (user_id, feed_id)
unread_ranked_stories_key = 'zU:%s:%s' % (user_id, feed_id)
expire_unread_stories_key = False
max_score = current_time
if read_filter == 'unread':
# +1 for the intersection b/w zF and F, which carries an implicit score of 1.
min_score = read_dates[feed_id] + 1
pipeline.sdiffstore(unread_stories_key, stories_key, read_stories_key)
expire_unread_stories_key = True
else:
min_score = unread_timestamp
unread_stories_key = stories_key
if order == 'oldest':
byscorefunc = pipeline.zrangebyscore
else:
byscorefunc = pipeline.zrevrangebyscore
min_score, max_score = max_score, min_score
pipeline.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
byscorefunc(unread_ranked_stories_key, min_score, max_score, withscores=include_timestamps)
pipeline.delete(unread_ranked_stories_key)
if expire_unread_stories_key:
pipeline.delete(unread_stories_key)
results = pipeline.execute()
for hashes in results:
if not isinstance(hashes, list): continue
if group_by_feed:
story_hashes[feed_ids[feed_counter]] = hashes
feed_counter += 1
else:
story_hashes.extend(hashes)
return story_hashes
def get_stories(self, offset=0, limit=6, order='newest', read_filter='all', withscores=False,
hashes_only=False, cutoff_date=None, default_cutoff_date=None):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
rt = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_TEMP_POOL)
ignore_user_stories = False
stories_key = 'F:%s' % (self.feed_id)
read_stories_key = 'RS:%s:%s' % (self.user_id, self.feed_id)
unread_stories_key = 'U:%s:%s' % (self.user_id, self.feed_id)
unread_ranked_stories_key = 'z%sU:%s:%s' % ('h' if hashes_only else '',
self.user_id, self.feed_id)
if withscores or not offset or not rt.exists(unread_ranked_stories_key):
rt.delete(unread_ranked_stories_key)
if not r.exists(stories_key):
# print " ---> No stories on feed: %s" % self
return []
elif read_filter == 'all' or not r.exists(read_stories_key):
ignore_user_stories = True
unread_stories_key = stories_key
else:
r.sdiffstore(unread_stories_key, stories_key, read_stories_key)
sorted_stories_key = 'zF:%s' % (self.feed_id)
r.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
if not ignore_user_stories:
r.delete(unread_stories_key)
dump = r.dump(unread_ranked_stories_key)
if dump:
pipeline = rt.pipeline()
pipeline.delete(unread_ranked_stories_key)
pipeline.restore(unread_ranked_stories_key, 1*60*60*1000, dump)
pipeline.execute()
r.delete(unread_ranked_stories_key)
current_time = int(time.time() + 60*60*24)
if not cutoff_date:
if read_filter == "unread":
cutoff_date = self.mark_read_date
elif default_cutoff_date:
cutoff_date = default_cutoff_date
else:
cutoff_date = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
if order == 'oldest':
byscorefunc = rt.zrangebyscore
if read_filter == 'unread':
min_score = int(time.mktime(cutoff_date.timetuple())) + 1
else:
min_score = int(time.mktime(cutoff_date.timetuple())) - 1000
max_score = current_time
else:
byscorefunc = rt.zrevrangebyscore
min_score = current_time
if read_filter == 'unread':
# +1 for the intersection b/w zF and F, which carries an implicit score of 1.
max_score = int(time.mktime(cutoff_date.timetuple())) + 1
else:
max_score = 0
if settings.DEBUG and False:
debug_stories = rt.zrevrange(unread_ranked_stories_key, 0, -1, withscores=True)
print " ---> Unread all stories (%s - %s) %s stories: %s" % (
min_score,
max_score,
len(debug_stories),
debug_stories)
story_ids = byscorefunc(unread_ranked_stories_key, min_score,
max_score, start=offset, num=500,
withscores=withscores)[:limit]
if withscores:
story_ids = [(s[0], int(s[1])) for s in story_ids]
if withscores or hashes_only:
return story_ids
elif story_ids:
story_date_order = "%sstory_date" % ('' if order == 'oldest' else '-')
mstories = MStory.objects(story_hash__in=story_ids).order_by(story_date_order)
stories = Feed.format_stories(mstories)
return stories
else:
return []
@classmethod
def feed_stories(cls, user_id, feed_ids=None, offset=0, limit=6,
order='newest', read_filter='all', usersubs=None, cutoff_date=None,
all_feed_ids=None, cache_prefix=""):
rt = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_TEMP_POOL)
across_all_feeds = False
if order == 'oldest':
range_func = rt.zrange
else:
range_func = rt.zrevrange
if feed_ids is None:
across_all_feeds = True
feed_ids = []
if not all_feed_ids:
all_feed_ids = [f for f in feed_ids]
# feeds_string = ""
feeds_string = ','.join(str(f) for f in sorted(all_feed_ids))[:30]
ranked_stories_keys = '%szU:%s:feeds:%s' % (cache_prefix, user_id, feeds_string)
unread_ranked_stories_keys = '%szhU:%s:feeds:%s' % (cache_prefix, user_id, feeds_string)
stories_cached = rt.exists(ranked_stories_keys)
unreads_cached = True if read_filter == "unread" else rt.exists(unread_ranked_stories_keys)
if offset and stories_cached and unreads_cached:
story_hashes = range_func(ranked_stories_keys, offset, limit)
if read_filter == "unread":
unread_story_hashes = story_hashes
else:
unread_story_hashes = range_func(unread_ranked_stories_keys, 0, offset+limit)
return story_hashes, unread_story_hashes
else:
rt.delete(ranked_stories_keys)
rt.delete(unread_ranked_stories_keys)
story_hashes = cls.story_hashes(user_id, feed_ids=feed_ids,
read_filter=read_filter, order=order,
include_timestamps=True,
group_by_feed=False,
usersubs=usersubs,
cutoff_date=cutoff_date,
across_all_feeds=across_all_feeds)
if not story_hashes:
return [], []
pipeline = rt.pipeline()
for story_hash_group in chunks(story_hashes, 100):
pipeline.zadd(ranked_stories_keys, **dict(story_hash_group))
pipeline.execute()
story_hashes = range_func(ranked_stories_keys, offset, limit)
if read_filter == "unread":
unread_feed_story_hashes = story_hashes
rt.zunionstore(unread_ranked_stories_keys, [ranked_stories_keys])
else:
unread_story_hashes = cls.story_hashes(user_id, feed_ids=feed_ids,
read_filter="unread", order=order,
include_timestamps=True,
group_by_feed=False,
cutoff_date=cutoff_date)
if unread_story_hashes:
for unread_story_hash_group in chunks(unread_story_hashes, 100):
rt.zadd(unread_ranked_stories_keys, **dict(unread_story_hash_group))
unread_feed_story_hashes = range_func(unread_ranked_stories_keys, offset, limit)
rt.expire(ranked_stories_keys, 60*60)
rt.expire(unread_ranked_stories_keys, 60*60)
return story_hashes, unread_feed_story_hashes
@classmethod
def add_subscription(cls, user, feed_address, folder=None, bookmarklet=False, auto_active=True,
skip_fetch=False):
feed = None
us = None
logging.user(user, "~FRAdding URL: ~SB%s (in %s) %s" % (feed_address, folder,
"~FCAUTO-ADD" if not auto_active else ""))
feed = Feed.get_feed_from_url(feed_address)
if not feed:
code = -1
if bookmarklet:
message = "This site does not have an RSS feed. Nothing is linked to from this page."
else:
message = "This address does not point to an RSS feed or a website with an RSS feed."
else:
us, subscription_created = cls.objects.get_or_create(
feed=feed,
user=user,
defaults={
'needs_unread_recalc': True,
'active': auto_active,
}
)
code = 1
message = ""
if us:
user_sub_folders_object, created = UserSubscriptionFolders.objects.get_or_create(
user=user,
defaults={'folders': '[]'}
)
if created:
user_sub_folders = []
else:
user_sub_folders = json.decode(user_sub_folders_object.folders)
user_sub_folders = add_object_to_folder(feed.pk, folder, user_sub_folders)
user_sub_folders_object.folders = json.encode(user_sub_folders)
user_sub_folders_object.save()
if auto_active or user.profile.is_premium:
us.active = True
us.save()
if not skip_fetch and feed.last_update < datetime.datetime.utcnow() - datetime.timedelta(days=1):
feed = feed.update()
from apps.social.models import MActivity
MActivity.new_feed_subscription(user_id=user.pk, feed_id=feed.pk, feed_title=feed.title)
feed.setup_feed_for_premium_subscribers()
return code, message, us
@classmethod
def feeds_with_updated_counts(cls, user, feed_ids=None, check_fetch_status=False, force=False):
feeds = {}
# Get subscriptions for user
user_subs = cls.objects.select_related('feed').filter(user=user, active=True)
feed_ids = [f for f in feed_ids if f and not f.startswith('river')]
if feed_ids:
user_subs = user_subs.filter(feed__in=feed_ids)
for i, sub in enumerate(user_subs):
# Count unreads if subscription is stale.
if (force or
sub.needs_unread_recalc or
sub.unread_count_updated < user.profile.unread_cutoff or
sub.oldest_unread_story_date < user.profile.unread_cutoff):
sub = sub.calculate_feed_scores(silent=True, force=force)
if not sub: continue # TODO: Figure out the correct sub and give it a new feed_id
feed_id = sub.feed_id
feeds[feed_id] = {
'ps': sub.unread_count_positive,
'nt': sub.unread_count_neutral,
'ng': sub.unread_count_negative,
'id': feed_id,
}
if not sub.feed.fetched_once or check_fetch_status:
feeds[feed_id]['fetched_once'] = sub.feed.fetched_once
feeds[feed_id]['not_yet_fetched'] = not sub.feed.fetched_once # Legacy. Dammit.
if sub.feed.favicon_fetching:
feeds[feed_id]['favicon_fetching'] = True
if sub.feed.has_feed_exception or sub.feed.has_page_exception:
feeds[feed_id]['has_exception'] = True
feeds[feed_id]['exception_type'] = 'feed' if sub.feed.has_feed_exception else 'page'
feeds[feed_id]['feed_address'] = sub.feed.feed_address
feeds[feed_id]['exception_code'] = sub.feed.exception_code
return feeds
@classmethod
def queue_new_feeds(cls, user, new_feeds=None):
if not isinstance(user, User):
user = User.objects.get(pk=user)
if not new_feeds:
new_feeds = cls.objects.filter(user=user,
feed__fetched_once=False,
active=True).values('feed_id')
new_feeds = list(set([f['feed_id'] for f in new_feeds]))
if not new_feeds:
return
logging.user(user, "~BB~FW~SBQueueing NewFeeds: ~FC(%s) %s" % (len(new_feeds), new_feeds))
size = 4
for t in (new_feeds[pos:pos + size] for pos in xrange(0, len(new_feeds), size)):
NewFeeds.apply_async(args=(t,), queue="new_feeds")
@classmethod
def refresh_stale_feeds(cls, user, exclude_new=False):
if not isinstance(user, User):
user = User.objects.get(pk=user)
stale_cutoff = datetime.datetime.now() - datetime.timedelta(days=settings.SUBSCRIBER_EXPIRE)
# TODO: Refactor below using last_update from REDIS_FEED_UPDATE_POOL
stale_feeds = UserSubscription.objects.filter(user=user, active=True, feed__last_update__lte=stale_cutoff)
if exclude_new:
stale_feeds = stale_feeds.filter(feed__fetched_once=True)
all_feeds = UserSubscription.objects.filter(user=user, active=True)
logging.user(user, "~FG~BBRefreshing stale feeds: ~SB%s/%s" % (
stale_feeds.count(), all_feeds.count()))
for sub in stale_feeds:
sub.feed.fetched_once = False
sub.feed.save()
if stale_feeds:
stale_feeds = list(set([f.feed_id for f in stale_feeds]))
cls.queue_new_feeds(user, new_feeds=stale_feeds)
@classmethod
def identify_deleted_feed_users(cls, old_feed_id):
users = UserSubscriptionFolders.objects.filter(folders__contains=old_feed_id).only('user')
user_ids = [usf.user_id for usf in users]
f = open('utils/backups/users.txt', 'w')
f.write('\n'.join([str(u) for u in user_ids]))
return user_ids
@classmethod
def recreate_deleted_feed(cls, new_feed_id, old_feed_id=None, skip=0):
user_ids = sorted([int(u) for u in open('utils/backups/users.txt').read().split('\n') if u])
count = len(user_ids)
for i, user_id in enumerate(user_ids):
if i < skip: continue
if i % 1000 == 0:
print "\n\n ------------------------------------------------"
print "\n ---> %s/%s (%s%%)" % (i, count, round(float(i)/count))
print "\n ------------------------------------------------\n"
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
print " ***> %s has no account" % user_id
continue
us, created = UserSubscription.objects.get_or_create(user_id=user_id, feed_id=new_feed_id, defaults={
'needs_unread_recalc': True,
'active': True,
'is_trained': True
})
if not created:
print " ***> %s already subscribed" % user.username
try:
usf = UserSubscriptionFolders.objects.get(user_id=user_id)
usf.add_missing_feeds()
except UserSubscriptionFolders.DoesNotExist:
print " ***> %s has no USF" % user.username
# Move classifiers
if old_feed_id:
classifier_count = 0
for classifier_type in (MClassifierAuthor, MClassifierFeed, MClassifierTag, MClassifierTitle):
classifiers = classifier_type.objects.filter(user_id=user_id, feed_id=old_feed_id)
classifier_count += classifiers.count()
for classifier in classifiers:
classifier.feed_id = new_feed_id
try:
classifier.save()
except NotUniqueError:
continue
if classifier_count:
print " Moved %s classifiers for %s" % (classifier_count, user.username)
def trim_read_stories(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
read_stories_key = "RS:%s:%s" % (self.user_id, self.feed_id)
stale_story_hashes = r.sdiff(read_stories_key, "F:%s" % self.feed_id)
if not stale_story_hashes:
return
logging.user(self.user, "~FBTrimming ~FR%s~FB read stories (~SB%s~SN)..." % (len(stale_story_hashes), self.feed_id))
r.srem(read_stories_key, *stale_story_hashes)
r.srem("RS:%s" % self.feed_id, *stale_story_hashes)
@classmethod
def trim_user_read_stories(self, user_id):
user = User.objects.get(pk=user_id)
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
subs = UserSubscription.objects.filter(user_id=user_id).only('feed')
if not subs: return
key = "RS:%s" % user_id
feeds = [f.feed_id for f in subs]
old_rs = r.smembers(key)
old_count = len(old_rs)
if not old_count:
logging.user(user, "~FBTrimming all read stories, ~SBnone found~SN.")
return
# r.sunionstore("%s:backup" % key, key)
# r.expire("%s:backup" % key, 60*60*24)
r.sunionstore(key, *["%s:%s" % (key, f) for f in feeds])
new_rs = r.smembers(key)
missing_rs = []
missing_count = 0
feed_re = re.compile(r'(\d+):.*?')
for i, rs in enumerate(old_rs):
if i and i % 1000 == 0:
if missing_rs:
r.sadd(key, *missing_rs)
missing_count += len(missing_rs)
missing_rs = []
found = feed_re.search(rs)
if not found:
print " ---> Not found: %s" % rs
continue
rs_feed_id = found.groups()[0]
if int(rs_feed_id) not in feeds:
missing_rs.append(rs)
if missing_rs:
r.sadd(key, *missing_rs)
missing_count += len(missing_rs)
new_count = len(new_rs)
new_total = new_count + missing_count
logging.user(user, "~FBTrimming ~FR%s~FB/%s (~SB%s sub'ed ~SN+ ~SB%s unsub'ed~SN saved)" %
(old_count - new_total, old_count, new_count, missing_count))
def mark_feed_read(self, cutoff_date=None):
if (self.unread_count_negative == 0
and self.unread_count_neutral == 0
and self.unread_count_positive == 0
and not self.needs_unread_recalc):
return
recount = True
# Use the latest story to get last read time.
if cutoff_date:
cutoff_date = cutoff_date + datetime.timedelta(seconds=1)
else:
latest_story = MStory.objects(story_feed_id=self.feed.pk)\
.order_by('-story_date').only('story_date').limit(1)
if latest_story and len(latest_story) >= 1:
cutoff_date = (latest_story[0]['story_date']
+ datetime.timedelta(seconds=1))
else:
cutoff_date = datetime.datetime.utcnow()
recount = False
self.last_read_date = cutoff_date
self.mark_read_date = cutoff_date
self.oldest_unread_story_date = cutoff_date
if not recount:
self.unread_count_negative = 0
self.unread_count_positive = 0
self.unread_count_neutral = 0
self.unread_count_updated = datetime.datetime.utcnow()
self.needs_unread_recalc = False
else:
self.needs_unread_recalc = True
self.save()
return True
def mark_newer_stories_read(self, cutoff_date):
if (self.unread_count_negative == 0
and self.unread_count_neutral == 0
and self.unread_count_positive == 0
and not self.needs_unread_recalc):
return
cutoff_date = cutoff_date - datetime.timedelta(seconds=1)
story_hashes = self.get_stories(limit=500, order="newest", cutoff_date=cutoff_date,
read_filter="unread", hashes_only=True)
data = self.mark_story_ids_as_read(story_hashes, aggregated=True)
return data
def mark_story_ids_as_read(self, story_hashes, request=None, aggregated=False):
data = dict(code=0, payload=story_hashes)
if not request:
request = self.user
if not self.needs_unread_recalc:
self.needs_unread_recalc = True
self.save()
if len(story_hashes) > 1:
logging.user(request, "~FYRead %s stories in feed: %s" % (len(story_hashes), self.feed))
else:
logging.user(request, "~FYRead story in feed: %s" % (self.feed))
for story_hash in set(story_hashes):
RUserStory.mark_read(self.user_id, self.feed_id, story_hash, aggregated=aggregated)
return data
def invert_read_stories_after_unread_story(self, story, request=None):
data = dict(code=1)
if story.story_date > self.mark_read_date:
return data
# Story is outside the mark as read range, so invert all stories before.
newer_stories = MStory.objects(story_feed_id=story.story_feed_id,
story_date__gte=story.story_date,
story_date__lte=self.mark_read_date
).only('story_hash')
newer_stories = [s.story_hash for s in newer_stories]
self.mark_read_date = story.story_date - datetime.timedelta(minutes=1)
self.needs_unread_recalc = True
self.save()
# Mark stories as read only after the mark_read_date has been moved, otherwise
# these would be ignored.
data = self.mark_story_ids_as_read(newer_stories, request=request, aggregated=True)
return data
def calculate_feed_scores(self, silent=False, stories=None, force=False):
# now = datetime.datetime.strptime("2009-07-06 22:30:03", "%Y-%m-%d %H:%M:%S")
now = datetime.datetime.now()
oldest_unread_story_date = now
if self.user.profile.last_seen_on < self.user.profile.unread_cutoff and not force:
# if not silent:
# logging.info(' ---> [%s] SKIPPING Computing scores: %s (1 week+)' % (self.user, self.feed))
return self
ong = self.unread_count_negative
ont = self.unread_count_neutral
ops = self.unread_count_positive
oousd = self.oldest_unread_story_date
onur = self.needs_unread_recalc
oit = self.is_trained
# if not self.feed.fetched_once:
# if not silent:
# logging.info(' ---> [%s] NOT Computing scores: %s' % (self.user, self.feed))
# self.needs_unread_recalc = False
# self.save()
# return
feed_scores = dict(negative=0, neutral=0, positive=0)
# Two weeks in age. If mark_read_date is older, mark old stories as read.
date_delta = self.user.profile.unread_cutoff
if date_delta < self.mark_read_date:
date_delta = self.mark_read_date
else:
self.mark_read_date = date_delta
if self.is_trained:
if not stories:
stories = cache.get('S:%s' % self.feed_id)
unread_story_hashes = self.story_hashes(user_id=self.user_id, feed_ids=[self.feed_id],
usersubs=[self],
read_filter='unread', group_by_feed=False,
cutoff_date=self.user.profile.unread_cutoff)
if not stories:
stories_db = MStory.objects(story_hash__in=unread_story_hashes)
stories = Feed.format_stories(stories_db, self.feed_id)
unread_stories = []
for story in stories:
if story['story_date'] < date_delta:
continue
if story['story_hash'] in unread_story_hashes:
unread_stories.append(story)
if story['story_date'] < oldest_unread_story_date:
oldest_unread_story_date = story['story_date']
# if not silent:
# logging.info(' ---> [%s] Format stories: %s' % (self.user, datetime.datetime.now() - now))
classifier_feeds = list(MClassifierFeed.objects(user_id=self.user_id, feed_id=self.feed_id, social_user_id=0))
classifier_authors = list(MClassifierAuthor.objects(user_id=self.user_id, feed_id=self.feed_id))
classifier_titles = list(MClassifierTitle.objects(user_id=self.user_id, feed_id=self.feed_id))
classifier_tags = list(MClassifierTag.objects(user_id=self.user_id, feed_id=self.feed_id))
if (not len(classifier_feeds) and
not len(classifier_authors) and
not len(classifier_titles) and
not len(classifier_tags)):
self.is_trained = False
# if not silent:
# logging.info(' ---> [%s] Classifiers: %s (%s)' % (self.user, datetime.datetime.now() - now, classifier_feeds.count() + classifier_authors.count() + classifier_tags.count() + classifier_titles.count()))
scores = {
'feed': apply_classifier_feeds(classifier_feeds, self.feed),
}
for story in unread_stories:
scores.update({
'author' : apply_classifier_authors(classifier_authors, story),
'tags' : apply_classifier_tags(classifier_tags, story),
'title' : apply_classifier_titles(classifier_titles, story),
})
max_score = max(scores['author'], scores['tags'], scores['title'])
min_score = min(scores['author'], scores['tags'], scores['title'])
if max_score > 0:
feed_scores['positive'] += 1
elif min_score < 0:
feed_scores['negative'] += 1
else:
if scores['feed'] > 0:
feed_scores['positive'] += 1
elif scores['feed'] < 0:
feed_scores['negative'] += 1
else:
feed_scores['neutral'] += 1
else:
unread_story_hashes = self.story_hashes(user_id=self.user_id, feed_ids=[self.feed_id],
usersubs=[self],
read_filter='unread', group_by_feed=False,
include_timestamps=True,
cutoff_date=date_delta)
feed_scores['neutral'] = len(unread_story_hashes)
if feed_scores['neutral']:
oldest_unread_story_date = datetime.datetime.fromtimestamp(unread_story_hashes[-1][1])
if not silent or settings.DEBUG:
logging.user(self.user, '~FBUnread count (~SB%s~SN%s): ~SN(~FC%s~FB/~FC%s~FB/~FC%s~FB) ~SBto~SN (~FC%s~FB/~FC%s~FB/~FC%s~FB)' % (self.feed_id, '/~FMtrained~FB' if self.is_trained else '', ong, ont, ops, feed_scores['negative'], feed_scores['neutral'], feed_scores['positive']))
self.unread_count_positive = feed_scores['positive']
self.unread_count_neutral = feed_scores['neutral']
self.unread_count_negative = feed_scores['negative']
self.unread_count_updated = datetime.datetime.now()
self.oldest_unread_story_date = oldest_unread_story_date
self.needs_unread_recalc = False
update_fields = []
if self.unread_count_positive != ops: update_fields.append('unread_count_positive')
if self.unread_count_neutral != ont: update_fields.append('unread_count_neutral')
if self.unread_count_negative != ong: update_fields.append('unread_count_negative')
if self.oldest_unread_story_date != oousd: update_fields.append('oldest_unread_story_date')
if self.needs_unread_recalc != onur: update_fields.append('needs_unread_recalc')
if self.is_trained != oit: update_fields.append('is_trained')
if len(update_fields):
self.save(update_fields=update_fields)
if (self.unread_count_positive == 0 and
self.unread_count_neutral == 0):
self.mark_feed_read()
if not silent:
logging.user(self.user, '~FC~SNComputing scores: %s (~SB%s~SN/~SB%s~SN/~SB%s~SN)' % (self.feed, feed_scores['negative'], feed_scores['neutral'], feed_scores['positive']))
self.trim_read_stories()
return self
@staticmethod
def score_story(scores):
max_score = max(scores['author'], scores['tags'], scores['title'])
min_score = min(scores['author'], scores['tags'], scores['title'])
if max_score > 0:
return 1
elif min_score < 0:
return -1
return scores['feed']
def switch_feed(self, new_feed, old_feed):
# Rewrite feed in subscription folders
try:
user_sub_folders = UserSubscriptionFolders.objects.get(user=self.user)
except Exception, e:
logging.info(" *** ---> UserSubscriptionFolders error: %s" % e)
return
logging.info(" ===> %s " % self.user)
# Switch read stories
RUserStory.switch_feed(user_id=self.user_id, old_feed_id=old_feed.pk,
new_feed_id=new_feed.pk)
def switch_feed_for_classifier(model):
duplicates = model.objects(feed_id=old_feed.pk, user_id=self.user_id)
if duplicates.count():
logging.info(" ---> Switching %s %s" % (duplicates.count(), model))
for duplicate in duplicates:
duplicate.feed_id = new_feed.pk
if duplicate.social_user_id is None:
duplicate.social_user_id = 0
try:
duplicate.save()
pass
except (IntegrityError, OperationError):
logging.info(" !!!!> %s already exists" % duplicate)
duplicate.delete()
switch_feed_for_classifier(MClassifierTitle)
switch_feed_for_classifier(MClassifierAuthor)
switch_feed_for_classifier(MClassifierFeed)
switch_feed_for_classifier(MClassifierTag)
# Switch to original feed for the user subscription
self.feed = new_feed
self.needs_unread_recalc = True
try:
UserSubscription.objects.get(user=self.user, feed=new_feed)
except UserSubscription.DoesNotExist:
self.save()
user_sub_folders.rewrite_feed(new_feed, old_feed)
else:
# except (IntegrityError, OperationError):
logging.info(" !!!!> %s already subscribed" % self.user)
self.delete()
return
@classmethod
def collect_orphan_feeds(cls, user):
us = cls.objects.filter(user=user)
try:
usf = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
return
us_feed_ids = set([sub.feed_id for sub in us])
folders = json.decode(usf.folders)
def collect_ids(folders, found_ids):
for item in folders:
# print ' --> %s' % item
if isinstance(item, int):
# print ' --> Adding feed: %s' % item
found_ids.add(item)
elif isinstance(item, dict):
# print ' --> Descending folder dict: %s' % item.values()
found_ids.update(collect_ids(item.values(), found_ids))
elif isinstance(item, list):
# print ' --> Descending folder list: %s' % len(item)
found_ids.update(collect_ids(item, found_ids))
# print ' --> Returning: %s' % found_ids
return found_ids
found_ids = collect_ids(folders, set())
diff = len(us_feed_ids) - len(found_ids)
if diff > 0:
logging.info(" ---> Collecting orphans on %s. %s feeds with %s orphans" % (user.username, len(us_feed_ids), diff))
orphan_ids = us_feed_ids - found_ids
folders.extend(list(orphan_ids))
usf.folders = json.encode(folders)
usf.save()
@classmethod
def verify_feeds_scheduled(cls, user_id):
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
user = User.objects.get(pk=user_id)
subs = cls.objects.filter(user=user)
feed_ids = [sub.feed.pk for sub in subs]
p = r.pipeline()
for feed_id in feed_ids:
p.zscore('scheduled_updates', feed_id)
p.zscore('error_feeds', feed_id)
results = p.execute()
p = r.pipeline()
for feed_id in feed_ids:
p.zscore('queued_feeds', feed_id)
try:
results_queued = p.execute()
except:
results_queued = map(lambda x: False, range(len(feed_ids)))
safety_net = []
for f, feed_id in enumerate(feed_ids):
scheduled_updates = results[f*2]
error_feeds = results[f*2+1]
queued_feeds = results[f]
if not scheduled_updates and not queued_feeds and not error_feeds:
safety_net.append(feed_id)
if not safety_net: return
logging.user(user, "~FBFound ~FR%s unscheduled feeds~FB, scheduling..." % len(safety_net))
for feed_id in safety_net:
feed = Feed.get_by_id(feed_id)
feed.set_next_scheduled_update()
@classmethod
def count_subscribers_to_other_subscriptions(cls, feed_id):
# feeds = defaultdict(int)
subscribing_users = cls.objects.filter(feed=feed_id).values('user', 'feed_opens').order_by('-feed_opens')[:25]
print "Got subscribing users"
subscribing_user_ids = [sub['user'] for sub in subscribing_users]
print "Got subscribing user ids"
cofeeds = cls.objects.filter(user__in=subscribing_user_ids).values('feed').annotate(
user_count=Count('user')).order_by('-user_count')[:200]
print "Got cofeeds: %s" % len(cofeeds)
# feed_subscribers = Feed.objects.filter(pk__in=[f['feed'] for f in cofeeds]).values('pk', 'num_subscribers')
# max_local_subscribers = float(max([f['user_count'] for f in cofeeds]))
# max_total_subscribers = float(max([f['num_subscribers'] for f in feed_subscribers]))
# feed_subscribers = dict([(s['pk'], float(s['num_subscribers'])) for s in feed_subscribers])
# pctfeeds = [(f['feed'],
# f['user_count'],
# feed_subscribers[f['feed']],
# f['user_count']/max_total_subscribers,
# f['user_count']/max_local_subscribers,
# max_local_subscribers,
# max_total_subscribers) for f in cofeeds]
# print pctfeeds[:5]
# orderedpctfeeds = sorted(pctfeeds, key=lambda f: .5*f[3]+.5*f[4], reverse=True)[:8]
# pprint([(Feed.get_by_id(o[0]), o[1], o[2], o[3], o[4]) for o in orderedpctfeeds])
users_by_feeds = {}
for feed in [f['feed'] for f in cofeeds]:
users_by_feeds[feed] = [u['user'] for u in cls.objects.filter(feed=feed, user__in=subscribing_user_ids).values('user')]
print "Got users_by_feeds"
table = tfidf()
for feed in users_by_feeds.keys():
table.addDocument(feed, users_by_feeds[feed])
print "Got table"
sorted_table = sorted(table.similarities(subscribing_user_ids), key=itemgetter(1), reverse=True)[:8]
pprint([(Feed.get_by_id(o[0]), o[1]) for o in sorted_table])
return table
# return cofeeds
class RUserStory:
@classmethod
def mark_story_hashes_read(cls, user_id, story_hashes, r=None, s=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
if not s:
s = redis.Redis(connection_pool=settings.REDIS_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
p = r.pipeline()
# p2 = r2.pipeline()
feed_ids = set()
friend_ids = set()
if not isinstance(story_hashes, list):
story_hashes = [story_hashes]
for story_hash in story_hashes:
feed_id, _ = MStory.split_story_hash(story_hash)
feed_ids.add(feed_id)
# Find other social feeds with this story to update their counts
friend_key = "F:%s:F" % (user_id)
share_key = "S:%s" % (story_hash)
friends_with_shares = [int(f) for f in s.sinter(share_key, friend_key)]
friend_ids.update(friends_with_shares)
cls.mark_read(user_id, feed_id, story_hash, social_user_ids=friends_with_shares, r=p)
p.execute()
# p2.execute()
return list(feed_ids), list(friend_ids)
@classmethod
def mark_story_hash_unread(cls, user_id, story_hash, r=None, s=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
if not s:
s = redis.Redis(connection_pool=settings.REDIS_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
friend_ids = set()
feed_id, _ = MStory.split_story_hash(story_hash)
# Find other social feeds with this story to update their counts
friend_key = "F:%s:F" % (user_id)
share_key = "S:%s" % (story_hash)
friends_with_shares = [int(f) for f in s.sinter(share_key, friend_key)]
friend_ids.update(friends_with_shares)
cls.mark_unread(user_id, feed_id, story_hash, social_user_ids=friends_with_shares, r=r)
return feed_id, list(friend_ids)
@classmethod
def mark_read(cls, user_id, story_feed_id, story_hash, social_user_ids=None,
aggregated=False, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
story_hash = MStory.ensure_story_hash(story_hash, story_feed_id=story_feed_id)
if not story_hash: return
def redis_commands(key):
r.sadd(key, story_hash)
# r2.sadd(key, story_hash)
r.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
all_read_stories_key = 'RS:%s' % (user_id)
redis_commands(all_read_stories_key)
read_story_key = 'RS:%s:%s' % (user_id, story_feed_id)
redis_commands(read_story_key)
if social_user_ids:
for social_user_id in social_user_ids:
social_read_story_key = 'RS:%s:B:%s' % (user_id, social_user_id)
redis_commands(social_read_story_key)
if not aggregated:
key = 'lRS:%s' % user_id
r.lpush(key, story_hash)
r.ltrim(key, 0, 1000)
r.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
@staticmethod
def story_can_be_marked_read_by_user(story, user):
message = None
if story.story_date < user.profile.unread_cutoff:
if user.profile.is_premium:
message = "Story is more than %s days old, cannot mark as unread." % (
settings.DAYS_OF_UNREAD)
elif story.story_date > user.profile.unread_cutoff_premium:
message = "Story is more than %s days old. Premiums can mark unread up to 30 days." % (
settings.DAYS_OF_UNREAD_FREE)
else:
message = "Story is more than %s days old, cannot mark as unread." % (
settings.DAYS_OF_UNREAD_FREE)
return message
@staticmethod
def mark_unread(user_id, story_feed_id, story_hash, social_user_ids=None, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
story_hash = MStory.ensure_story_hash(story_hash, story_feed_id=story_feed_id)
if not story_hash: return
def redis_commands(key):
r.srem(key, story_hash)
# r2.srem(key, story_hash)
r.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
all_read_stories_key = 'RS:%s' % (user_id)
redis_commands(all_read_stories_key)
read_story_key = 'RS:%s:%s' % (user_id, story_feed_id)
redis_commands(read_story_key)
read_stories_list_key = 'lRS:%s' % user_id
r.lrem(read_stories_list_key, story_hash)
if social_user_ids:
for social_user_id in social_user_ids:
social_read_story_key = 'RS:%s:B:%s' % (user_id, social_user_id)
redis_commands(social_read_story_key)
@staticmethod
def get_stories(user_id, feed_id, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
story_hashes = r.smembers("RS:%s:%s" % (user_id, feed_id))
return story_hashes
@staticmethod
def get_read_stories(user_id, offset=0, limit=12, order="newest"):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
key = "lRS:%s" % user_id
if order == "oldest":
count = r.llen(key)
if offset >= count: return []
offset = max(0, count - (offset+limit))
story_hashes = r.lrange(key, offset, offset+limit)
elif order == "newest":
story_hashes = r.lrange(key, offset, offset+limit)
return story_hashes
@classmethod
def switch_feed(cls, user_id, old_feed_id, new_feed_id):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
p = r.pipeline()
# p2 = r2.pipeline()
story_hashes = cls.get_stories(user_id, old_feed_id, r=r)
for story_hash in story_hashes:
_, hash_story = MStory.split_story_hash(story_hash)
new_story_hash = "%s:%s" % (new_feed_id, hash_story)
read_feed_key = "RS:%s:%s" % (user_id, new_feed_id)
p.sadd(read_feed_key, new_story_hash)
# p2.sadd(read_feed_key, new_story_hash)
p.expire(read_feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(read_feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
read_user_key = "RS:%s" % (user_id)
p.sadd(read_user_key, new_story_hash)
# p2.sadd(read_user_key, new_story_hash)
p.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
p.execute()
# p2.execute()
if len(story_hashes) > 0:
logging.info(" ---> %s read stories" % len(story_hashes))
@classmethod
def switch_hash(cls, feed_id, old_hash, new_hash):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
p = r.pipeline()
# p2 = r2.pipeline()
UNREAD_CUTOFF = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
usersubs = UserSubscription.objects.filter(feed_id=feed_id, last_read_date__gte=UNREAD_CUTOFF)
logging.info(" ---> ~SB%s usersubs~SN to switch read story hashes..." % len(usersubs))
for sub in usersubs:
rs_key = "RS:%s:%s" % (sub.user.pk, feed_id)
read = r.sismember(rs_key, old_hash)
if read:
p.sadd(rs_key, new_hash)
# p2.sadd(rs_key, new_hash)
p.expire(rs_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(rs_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
read_user_key = "RS:%s" % sub.user.pk
p.sadd(read_user_key, new_hash)
# p2.sadd(read_user_key, new_hash)
p.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
p.execute()
# p2.execute()
@classmethod
def read_story_count(cls, user_id):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
key = "RS:%s" % user_id
count = r.scard(key)
return count
class UserSubscriptionFolders(models.Model):
"""
A JSON list of folders and feeds for while a user has subscribed. The list
is a recursive descent of feeds and folders in folders. Used to layout
the feeds and folders in the Reader's feed navigation pane.
"""
user = models.ForeignKey(User, unique=True)
folders = models.TextField(default="[]")
def __unicode__(self):
return "[%s]: %s" % (self.user, len(self.folders),)
class Meta:
verbose_name_plural = "folders"
verbose_name = "folder"
def compact(self):
folders = json.decode(self.folders)
def _compact(folder):
new_folder = []
for item in folder:
if isinstance(item, int) and item not in new_folder:
new_folder.append(item)
elif isinstance(item, dict):
for f_k, f_v in item.items():
new_folder.append({f_k: _compact(f_v)})
return new_folder
new_folders = _compact(folders)
logging.info(" ---> Compacting from %s to %s" % (folders, new_folders))
new_folders = json.encode(new_folders)
logging.info(" ---> Compacting from %s to %s" % (len(self.folders), len(new_folders)))
self.folders = new_folders
self.save()
def add_folder(self, parent_folder, folder):
if self.folders:
user_sub_folders = json.decode(self.folders)
else:
user_sub_folders = []
obj = {folder: []}
user_sub_folders = add_object_to_folder(obj, parent_folder, user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
def arranged_folders(self):
user_sub_folders = json.decode(self.folders)
def _arrange_folder(folder):
folder_feeds = []
folder_folders = []
for item in folder:
if isinstance(item, int):
folder_feeds.append(item)
elif isinstance(item, dict):
for f_k, f_v in item.items():
arranged_folder = _arrange_folder(f_v)
folder_folders.append({f_k: arranged_folder})
arranged_folder = folder_feeds + folder_folders
return arranged_folder
return _arrange_folder(user_sub_folders)
def flatten_folders(self, feeds=None):
folders = json.decode(self.folders)
flat_folders = {" ": []}
def _flatten_folders(items, parent_folder="", depth=0):
for item in items:
if isinstance(item, int) and ((not feeds) or (feeds and item in feeds)):
if not parent_folder:
parent_folder = ' '
if parent_folder in flat_folders:
flat_folders[parent_folder].append(item)
else:
flat_folders[parent_folder] = [item]
elif isinstance(item, dict):
for folder_name in item:
folder = item[folder_name]
flat_folder_name = "%s%s%s" % (
parent_folder if parent_folder and parent_folder != ' ' else "",
" - " if parent_folder and parent_folder != ' ' else "",
folder_name
)
flat_folders[flat_folder_name] = []
_flatten_folders(folder, flat_folder_name, depth+1)
_flatten_folders(folders)
return flat_folders
def delete_feed(self, feed_id, in_folder, commit_delete=True):
def _find_feed_in_folders(old_folders, folder_name='', multiples_found=False, deleted=False):
new_folders = []
for k, folder in enumerate(old_folders):
if isinstance(folder, int):
if (folder == feed_id and in_folder is not None and (
(folder_name != in_folder) or
(folder_name == in_folder and deleted))):
multiples_found = True
logging.user(self.user, "~FB~SBDeleting feed, and a multiple has been found in '%s'" % (folder_name))
if (folder == feed_id and
(folder_name == in_folder or in_folder is None) and
not deleted):
logging.user(self.user, "~FBDelete feed: %s'th item: %s folders/feeds" % (
k, len(old_folders)
))
deleted = True
else:
new_folders.append(folder)
elif isinstance(folder, dict):
for f_k, f_v in folder.items():
nf, multiples_found, deleted = _find_feed_in_folders(f_v, f_k, multiples_found, deleted)
new_folders.append({f_k: nf})
return new_folders, multiples_found, deleted
user_sub_folders = self.arranged_folders()
user_sub_folders, multiples_found, deleted = _find_feed_in_folders(user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
if not multiples_found and deleted and commit_delete:
try:
user_sub = UserSubscription.objects.get(user=self.user, feed=feed_id)
except Feed.DoesNotExist:
duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
if duplicate_feed:
try:
user_sub = UserSubscription.objects.get(user=self.user,
feed=duplicate_feed[0].feed)
except Feed.DoesNotExist:
return
if user_sub:
user_sub.delete()
def delete_folder(self, folder_to_delete, in_folder, feed_ids_in_folder, commit_delete=True):
def _find_folder_in_folders(old_folders, folder_name, feeds_to_delete, deleted_folder=None):
new_folders = []
for k, folder in enumerate(old_folders):
if isinstance(folder, int):
new_folders.append(folder)
if folder in feeds_to_delete:
feeds_to_delete.remove(folder)
elif isinstance(folder, dict):
for f_k, f_v in folder.items():
if f_k == folder_to_delete and (folder_name == in_folder or in_folder is None):
logging.user(self.user, "~FBDeleting folder '~SB%s~SN' in '%s': %s" % (f_k, folder_name, folder))
deleted_folder = folder
else:
nf, feeds_to_delete, deleted_folder = _find_folder_in_folders(f_v, f_k, feeds_to_delete, deleted_folder)
new_folders.append({f_k: nf})
return new_folders, feeds_to_delete, deleted_folder
user_sub_folders = json.decode(self.folders)
user_sub_folders, feeds_to_delete, deleted_folder = _find_folder_in_folders(user_sub_folders, '', feed_ids_in_folder)
self.folders = json.encode(user_sub_folders)
self.save()
if commit_delete:
UserSubscription.objects.filter(user=self.user, feed__in=feeds_to_delete).delete()
return deleted_folder
def delete_feeds_by_folder(self, feeds_by_folder):
logging.user(self.user, "~FBDeleting ~FR~SB%s~SN feeds~FB: ~SB%s" % (
len(feeds_by_folder), feeds_by_folder))
for feed_id, in_folder in feeds_by_folder:
self.delete_feed(feed_id, in_folder)
return self
def rename_folder(self, folder_to_rename, new_folder_name, in_folder):
def _find_folder_in_folders(old_folders, folder_name):
new_folders = []
for k, folder in enumerate(old_folders):
if isinstance(folder, int):
new_folders.append(folder)
elif isinstance(folder, dict):
for f_k, f_v in folder.items():
nf = _find_folder_in_folders(f_v, f_k)
if f_k == folder_to_rename and folder_name == in_folder:
logging.user(self.user, "~FBRenaming folder '~SB%s~SN' in '%s' to: ~SB%s" % (
f_k, folder_name, new_folder_name))
f_k = new_folder_name
new_folders.append({f_k: nf})
return new_folders
user_sub_folders = json.decode(self.folders)
user_sub_folders = _find_folder_in_folders(user_sub_folders, '')
self.folders = json.encode(user_sub_folders)
self.save()
def move_feed_to_folders(self, feed_id, in_folders=None, to_folders=None):
logging.user(self.user, "~FBMoving feed '~SB%s~SN' in '%s' to: ~SB%s" % (
feed_id, in_folders, to_folders))
user_sub_folders = json.decode(self.folders)
for in_folder in in_folders:
self.delete_feed(feed_id, in_folder, commit_delete=False)
user_sub_folders = json.decode(self.folders)
for to_folder in to_folders:
user_sub_folders = add_object_to_folder(int(feed_id), to_folder, user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
return self
def move_feed_to_folder(self, feed_id, in_folder=None, to_folder=None):
logging.user(self.user, "~FBMoving feed '~SB%s~SN' in '%s' to: ~SB%s" % (
feed_id, in_folder, to_folder))
user_sub_folders = json.decode(self.folders)
self.delete_feed(feed_id, in_folder, commit_delete=False)
user_sub_folders = json.decode(self.folders)
user_sub_folders = add_object_to_folder(int(feed_id), to_folder, user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
return self
def move_folder_to_folder(self, folder_name, in_folder=None, to_folder=None):
logging.user(self.user, "~FBMoving folder '~SB%s~SN' in '%s' to: ~SB%s" % (
folder_name, in_folder, to_folder))
user_sub_folders = json.decode(self.folders)
deleted_folder = self.delete_folder(folder_name, in_folder, [], commit_delete=False)
user_sub_folders = json.decode(self.folders)
user_sub_folders = add_object_to_folder(deleted_folder, to_folder, user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
return self
def move_feeds_by_folder_to_folder(self, feeds_by_folder, to_folder):
logging.user(self.user, "~FBMoving ~SB%s~SN feeds to folder: ~SB%s" % (
len(feeds_by_folder), to_folder))
for feed_id, in_folder in feeds_by_folder:
self.move_feed_to_folder(feed_id, in_folder, to_folder)
return self
def rewrite_feed(self, original_feed, duplicate_feed):
def rewrite_folders(folders, original_feed, duplicate_feed):
new_folders = []
for k, folder in enumerate(folders):
if isinstance(folder, int):
if folder == duplicate_feed.pk:
# logging.info(" ===> Rewrote %s'th item: %s" % (k+1, folders))
new_folders.append(original_feed.pk)
else:
new_folders.append(folder)
elif isinstance(folder, dict):
for f_k, f_v in folder.items():
new_folders.append({f_k: rewrite_folders(f_v, original_feed, duplicate_feed)})
return new_folders
folders = json.decode(self.folders)
folders = rewrite_folders(folders, original_feed, duplicate_feed)
self.folders = json.encode(folders)
self.save()
def flat(self):
folders = json.decode(self.folders)
def _flat(folder, feeds=None):
if not feeds:
feeds = []
for item in folder:
if isinstance(item, int) and item not in feeds:
feeds.append(item)
elif isinstance(item, dict):
for f_k, f_v in item.items():
feeds.extend(_flat(f_v))
return feeds
return _flat(folders)
def feed_ids_under_folder_slug(self, slug):
folders = json.decode(self.folders)
def _feeds(folder, found=False, folder_title=None):
feeds = []
local_found = False
for item in folder:
if isinstance(item, int) and item not in feeds and found:
feeds.append(item)
elif isinstance(item, dict):
for f_k, f_v in item.items():
if slugify(f_k) == slug:
found = True
local_found = True
folder_title = f_k
found_feeds, folder_title = _feeds(f_v, found, folder_title)
feeds.extend(found_feeds)
if local_found:
found = False
local_found = False
return feeds, folder_title
return _feeds(folders)
@classmethod
def add_all_missing_feeds(cls):
usf = cls.objects.all().order_by('pk')
total = usf.count()
for i, f in enumerate(usf):
print "%s/%s: %s" % (i, total, f)
f.add_missing_feeds()
def add_missing_feeds(self):
all_feeds = self.flat()
subs = [us.feed_id for us in
UserSubscription.objects.filter(user=self.user).only('feed')]
missing_subs = set(all_feeds) - set(subs)
if missing_subs:
logging.debug(" ---> %s is missing %s subs. Adding %s..." % (
self.user, len(missing_subs), missing_subs))
for feed_id in missing_subs:
feed = Feed.get_by_id(feed_id)
if feed:
us, _ = UserSubscription.objects.get_or_create(user=self.user, feed=feed, defaults={
'needs_unread_recalc': True
})
if not us.needs_unread_recalc:
us.needs_unread_recalc = True
us.save()
missing_folder_feeds = set(subs) - set(all_feeds)
if missing_folder_feeds:
user_sub_folders = json.decode(self.folders)
logging.debug(" ---> %s is missing %s folder feeds. Adding %s..." % (
self.user, len(missing_folder_feeds), missing_folder_feeds))
for feed_id in missing_folder_feeds:
feed = Feed.get_by_id(feed_id)
if feed and feed.pk == feed_id:
user_sub_folders = add_object_to_folder(feed_id, "", user_sub_folders)
self.folders = json.encode(user_sub_folders)
self.save()
def auto_activate(self):
if self.user.profile.is_premium: return
active_count = UserSubscription.objects.filter(user=self.user, active=True).count()
if active_count: return
all_feeds = self.flat()
if not all_feeds: return
for feed in all_feeds[:64]:
try:
sub = UserSubscription.objects.get(user=self.user, feed=feed)
except UserSubscription.DoesNotExist:
continue
sub.active = True
sub.save()
if sub.feed.active_subscribers <= 0:
sub.feed.count_subscribers()
class Feature(models.Model):
"""
Simple blog-like feature board shown to all users on the home page.
"""
description = models.TextField(default="")
date = models.DateTimeField(default=datetime.datetime.now)
def __unicode__(self):
return "[%s] %s" % (self.date, self.description[:50])
class Meta:
ordering = ["-date"]
|
{
"content_hash": "e0aa03d4fbff038445eb3b4440334c46",
"timestamp": "",
"source": "github",
"line_count": 1579,
"max_line_length": 289,
"avg_line_length": 44.585180493983536,
"alnum_prop": 0.5425,
"repo_name": "epiphany27/NewsBlur",
"id": "2bbb32876f95dc59209a482d622a346703859468",
"size": "70400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/reader/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4431"
},
{
"name": "C++",
"bytes": "2926"
},
{
"name": "CSS",
"bytes": "677370"
},
{
"name": "CoffeeScript",
"bytes": "6451"
},
{
"name": "HTML",
"bytes": "268726"
},
{
"name": "Java",
"bytes": "700898"
},
{
"name": "JavaScript",
"bytes": "1575332"
},
{
"name": "M",
"bytes": "47696"
},
{
"name": "Nginx",
"bytes": "897"
},
{
"name": "Objective-C",
"bytes": "3716549"
},
{
"name": "Perl",
"bytes": "55598"
},
{
"name": "Python",
"bytes": "2405347"
},
{
"name": "R",
"bytes": "527"
},
{
"name": "Ruby",
"bytes": "870"
},
{
"name": "Shell",
"bytes": "40018"
}
],
"symlink_target": ""
}
|
import sys
sys.path[0:0] = [""]
import unittest
from datetime import datetime
from mongoengine import *
__all__ = ("ValidatorErrorTest",)
class ValidatorErrorTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
def test_to_dict(self):
"""Ensure a ValidationError handles error to_dict correctly.
"""
error = ValidationError('root')
self.assertEqual(error.to_dict(), {})
# 1st level error schema
error.errors = {'1st': ValidationError('bad 1st'), }
self.assertTrue('1st' in error.to_dict())
self.assertEqual(error.to_dict()['1st'], 'bad 1st')
# 2nd level error schema
error.errors = {'1st': ValidationError('bad 1st', errors={
'2nd': ValidationError('bad 2nd'),
})}
self.assertTrue('1st' in error.to_dict())
self.assertTrue(isinstance(error.to_dict()['1st'], dict))
self.assertTrue('2nd' in error.to_dict()['1st'])
self.assertEqual(error.to_dict()['1st']['2nd'], 'bad 2nd')
# moar levels
error.errors = {'1st': ValidationError('bad 1st', errors={
'2nd': ValidationError('bad 2nd', errors={
'3rd': ValidationError('bad 3rd', errors={
'4th': ValidationError('Inception'),
}),
}),
})}
self.assertTrue('1st' in error.to_dict())
self.assertTrue('2nd' in error.to_dict()['1st'])
self.assertTrue('3rd' in error.to_dict()['1st']['2nd'])
self.assertTrue('4th' in error.to_dict()['1st']['2nd']['3rd'])
self.assertEqual(error.to_dict()['1st']['2nd']['3rd']['4th'],
'Inception')
self.assertEqual(error.message, "root(2nd.3rd.4th.Inception: ['1st'])")
def test_model_validation(self):
class User(Document):
username = StringField(primary_key=True)
name = StringField(required=True)
User.drop_collection()
try:
User().validate()
except ValidationError, e:
self.assertTrue("User:None" in e.message)
self.assertEqual(e.to_dict(), {
'username': 'Field is required',
'name': 'Field is required'})
user = User(username="RossC0", name="Ross").save()
user.name = None
try:
user.save()
except ValidationError, e:
self.assertTrue("User:RossC0" in e.message)
self.assertEqual(e.to_dict(), {
'name': 'Field is required'})
def test_fields_rewrite(self):
class BasePerson(Document):
name = StringField()
age = IntField()
meta = {'abstract': True}
class Person(BasePerson):
name = StringField(required=True)
p = Person(age=15)
self.assertRaises(ValidationError, p.validate)
def test_datetime_validation(self):
class DTDoc(Document):
date = DateTimeField()
dtd = DTDoc()
dtd.date = 'whatever'
self.assertRaises(ValidationError, dtd.save)
# make sure that passing a parsable datetime works
dtd = DTDoc()
dtd.date = str(datetime.utcnow())
dtd.save()
dtd.reload()
self.assertTrue(isinstance(dtd.date, datetime))
def test_embedded_document_validation(self):
"""Ensure that embedded documents may be validated.
"""
class Comment(EmbeddedDocument):
date = DateTimeField()
content = StringField(required=True)
comment = Comment()
self.assertRaises(ValidationError, comment.validate)
comment.content = 'test'
comment.validate()
comment.date = 4
self.assertRaises(ValidationError, comment.validate)
comment.date = datetime.now()
comment.validate()
self.assertEqual(comment._instance, None)
def test_embedded_db_field_validate(self):
class SubDoc(EmbeddedDocument):
val = IntField(required=True)
class Doc(Document):
id = StringField(primary_key=True)
e = EmbeddedDocumentField(SubDoc, db_field='eb')
try:
Doc(id="bad").validate()
except ValidationError, e:
self.assertTrue("SubDoc:None" in e.message)
self.assertEqual(e.to_dict(), {
"e": {'val': 'OK could not be converted to int'}})
Doc.drop_collection()
Doc(id="test", e=SubDoc(val=15)).save()
doc = Doc.objects.first()
keys = doc.to_dict().keys()
self.assertEqual(2, len(keys))
self.assertTrue('e' in keys)
self.assertTrue('id' in keys)
with self.assertRaises(ValueError):
doc.e.val = "OK"
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "6fb6aad467003579985fbb846d20ec05",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 79,
"avg_line_length": 31.044871794871796,
"alnum_prop": 0.5641131530043362,
"repo_name": "closeio/mongoengine",
"id": "9614d714f7918a84774379f91bacf9f6ecb518c9",
"size": "4867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/document/validation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "783824"
}
],
"symlink_target": ""
}
|
import numpy as np
import sys
if len(sys.argv) < 3:
print("")
print("Usage:")
print(" "+argv[0]+" [file1.csv] [file2.csv]")
print("")
filename_ref = sys.argv[1]
filename_sweet = sys.argv[2]
#filename_ref = 'job_benchref_solution/'+filename
data_ref = np.loadtxt(filename_ref)
#print("Reference data size:")
#print(data_ref.shape)
if "bench_" in filename_ref:
data_ref = data_ref[1:,1:]
print("")
print("data ref")
print(" + min: "+str(np.min(data_ref)))
print(" + max: "+str(np.max(data_ref)))
print("")
#filename_sweet = 'job_bench_sweet/'+filename
data_sweet = np.loadtxt(filename_sweet)
# Skip first row and col since they contain the angles
data_sweet = data_sweet[1:,1:]
#print(data_ref[0]-data_sweet[0])
#sys.exit(1)
#print(data_ref-data_sweet)
#print("Sweet data size:")
#print(data_sweet.shape)
print("")
print("data sweet")
print(" + min: "+str(np.min(data_sweet)))
print(" + max: "+str(np.max(data_sweet)))
print("")
diff = data_ref - data_sweet
if 0:
import matplotlib.pyplot as plt
plt.imshow(diff)
plt.colorbar()
plt.show()
sys.exit(1)
lmax_error = np.max(np.abs(data_ref-data_sweet))
print(" + Error Lmax: "+str(lmax_error))
if 'prog_phi' in filename_ref:
if lmax_error > 1e-5:
raise Exception("ERROR threshold too large!")
else:
if lmax_error > 1e-8:
raise Exception("ERROR threshold too large!")
|
{
"content_hash": "0a22c1a77c7ddef4c6f021c2ea9ce890",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 54,
"avg_line_length": 19.87142857142857,
"alnum_prop": 0.6455787203450755,
"repo_name": "schreiberx/sweet",
"id": "8aa53e20bad0ecabb45747463852af90a3c42d7f",
"size": "1416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/70_program_swe_sphere_comparison_with_reference_implementation/cmp_solutions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "133036"
},
{
"name": "C++",
"bytes": "2947985"
},
{
"name": "Fortran",
"bytes": "109460"
},
{
"name": "GLSL",
"bytes": "27428"
},
{
"name": "Makefile",
"bytes": "312"
},
{
"name": "Python",
"bytes": "2503502"
},
{
"name": "Shell",
"bytes": "490940"
},
{
"name": "TeX",
"bytes": "3093"
}
],
"symlink_target": ""
}
|
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import inspect
import os
import six
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import parser
from tensorflow.tools.docs import pretty_docs
from tensorflow.tools.docs import py_guide_parser
def _is_free_function(py_object, full_name, index):
"""Check if input is a free function (and not a class- or static method)."""
if not inspect.isfunction(py_object):
return False
# Static methods are functions to inspect (in 2.7), so check if the parent
# is a class. If there is no parent, it's not a function.
if '.' not in full_name:
return False
parent_name = full_name.rsplit('.', 1)[0]
if inspect.isclass(index[parent_name]):
return False
return True
def write_docs(output_dir, parser_config, duplicate_of, index, yaml_toc):
"""Write previously extracted docs to disk.
Write a docs page for each symbol in `index` to a tree of docs at
`output_dir`.
Symbols with multiple aliases will have only one page written about
them, which is referenced for all aliases.
Args:
output_dir: Directory to write documentation markdown files to. Will be
created if it doesn't exist.
parser_config: A `parser.ParserConfig` object.
duplicate_of: A `dict` mapping fully qualified names to "master" names.
Used to determine which docs pages to write.
index: A `dict` mapping fully qualified names to the corresponding Python
objects. Used to produce docs for child objects.
yaml_toc: Set to `True` to generate a "_toc.yaml" file.
"""
# Make output_dir.
try:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
except OSError as e:
print('Creating output dir "%s" failed: %s' % (output_dir, e))
raise
# These dictionaries are used for table-of-contents generation below
# They will contain, after the for-loop below::
# - module name(string):classes and functions the module contains(list)
module_children = {}
# - symbol name(string):pathname (string)
symbol_to_file = {}
# Parse and write Markdown pages, resolving cross-links (@{symbol}).
for full_name, py_object in six.iteritems(index):
if full_name in duplicate_of:
continue
# Methods and some routines are documented only as part of their class.
if not (inspect.ismodule(py_object) or
inspect.isclass(py_object) or
_is_free_function(py_object, full_name, index)):
continue
sitepath = os.path.join('api_docs/python',
parser.documentation_path(full_name)[:-3])
# For TOC, we need to store a mapping from full_name to the file
# we're generating
symbol_to_file[full_name] = sitepath
# For a module, remember the module for the table-of-contents
if inspect.ismodule(py_object):
if full_name in parser_config.tree:
module_children.setdefault(full_name, [])
# For something else that's documented,
# figure out what module it lives in
else:
subname = str(full_name)
while True:
subname = subname[:subname.rindex('.')]
if inspect.ismodule(index[subname]):
module_children.setdefault(subname, []).append(full_name)
break
print('Writing docs for %s (%r).' % (full_name, py_object))
# Generate docs for `py_object`, resolving references.
page_info = parser.docs_for_object(full_name, py_object, parser_config)
path = os.path.join(output_dir, parser.documentation_path(full_name))
directory = os.path.dirname(path)
try:
if not os.path.exists(directory):
os.makedirs(directory)
with open(path, 'w') as f:
f.write(pretty_docs.build_md_page(page_info))
except OSError as e:
print('Cannot write documentation for %s to %s: %s' % (full_name,
directory, e))
raise
if yaml_toc:
# Generate table of contents
# Put modules in alphabetical order, case-insensitive
modules = sorted(module_children.keys(), key=lambda a: a.upper())
leftnav_path = os.path.join(output_dir, '_toc.yaml')
with open(leftnav_path, 'w') as f:
# Generate header
f.write('# Automatically generated file; please do not edit\ntoc:\n')
for module in modules:
f.write(' - title: ' + module + '\n'
' section:\n' +
' - title: Overview\n' +
' path: /TARGET_DOC_ROOT/' + symbol_to_file[module] + '\n')
symbols_in_module = module_children.get(module, [])
symbols_in_module.sort(key=lambda a: a.upper())
for full_name in symbols_in_module:
f.write(' - title: ' + full_name[len(module)+1:] + '\n'
' path: /TARGET_DOC_ROOT/' +
symbol_to_file[full_name] + '\n')
# Write a global index containing all full names with links.
with open(os.path.join(output_dir, 'index.md'), 'w') as f:
f.write(parser.generate_global_index(
'TensorFlow', index, parser_config.reference_resolver))
def add_dict_to_dict(add_from, add_to):
for key in add_from:
if key in add_to:
add_to[key].extend(add_from[key])
else:
add_to[key] = add_from[key]
# Exclude some libaries in contrib from the documentation altogether.
def _get_default_do_not_descend_map():
# TODO(wicke): Shrink this list.
return {
'': ['cli', 'lib', 'wrappers'],
'contrib': [
'compiler',
'factorization',
'grid_rnn',
'labeled_tensor',
'ndlstm',
'quantization',
'session_bundle',
'slim',
'solvers',
'specs',
'tensor_forest',
'tensorboard',
'testing',
'training',
'tfprof',
],
'contrib.bayesflow': [
'special_math', 'stochastic_gradient_estimators',
'stochastic_variables'
],
'contrib.ffmpeg': ['ffmpeg_ops'],
'contrib.graph_editor': [
'edit',
'match',
'reroute',
'subgraph',
'transform',
'select',
'util'
],
'contrib.layers': ['feature_column', 'summaries'],
'contrib.learn': [
'datasets',
'head',
'graph_actions',
'io',
'models',
'monitors',
'ops',
'preprocessing',
'utils',
],
'contrib.util': ['loader'],
}
def extract(py_modules, do_not_descend_map):
"""Extract docs from tf namespace and write them to disk."""
# Traverse the first module.
visitor = doc_generator_visitor.DocGeneratorVisitor(py_modules[0][0])
api_visitor = public_api.PublicAPIVisitor(visitor)
add_dict_to_dict(do_not_descend_map, api_visitor.do_not_descend_map)
traverse.traverse(py_modules[0][1], api_visitor)
# Traverse all py_modules after the first:
for module_name, module in py_modules[1:]:
visitor.set_root_name(module_name)
traverse.traverse(module, api_visitor)
return visitor
class _GetMarkdownTitle(py_guide_parser.PyGuideParser):
"""Extract the title from a .md file."""
def __init__(self):
self.title = None
py_guide_parser.PyGuideParser.__init__(self)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
class _DocInfo(object):
"""A simple struct for holding a doc's url and title."""
def __init__(self, url, title):
self.url = url
self.title = title
def build_doc_index(src_dir):
"""Build an index from a keyword designating a doc to _DocInfo objects."""
doc_index = {}
for dirpath, _, filenames in os.walk(src_dir):
suffix = os.path.relpath(path=dirpath, start=src_dir)
for base_name in filenames:
if not base_name.endswith('.md'): continue
title_parser = _GetMarkdownTitle()
title_parser.process(os.path.join(dirpath, base_name))
key_parts = os.path.join(suffix, base_name[:-3]).split('/')
if key_parts[-1] == 'index':
key_parts = key_parts[:-1]
doc_info = _DocInfo(os.path.join(suffix, base_name), title_parser.title)
doc_index[key_parts[-1]] = doc_info
if len(key_parts) > 1:
doc_index['/'.join(key_parts[-2:])] = doc_info
return doc_index
class _GuideRef(object):
def __init__(self, base_name, title, section_title, section_tag):
self.url = 'api_guides/python/' + (
('%s#%s' % (base_name, section_tag)) if section_tag else base_name)
self.link_text = (('%s > %s' % (title, section_title))
if section_title else title)
def make_md_link(self, url_prefix):
return '[%s](%s%s)' % (self.link_text, url_prefix, self.url)
class _GenerateGuideIndex(py_guide_parser.PyGuideParser):
"""Turn guide files into an index from symbol name to a list of _GuideRefs."""
def __init__(self):
self.index = {}
py_guide_parser.PyGuideParser.__init__(self)
def process(self, full_path, base_name):
"""Index a file, reading from `full_path`, with `base_name` as the link."""
self.full_path = full_path
self.base_name = base_name
self.title = None
self.section_title = None
self.section_tag = None
py_guide_parser.PyGuideParser.process(self, full_path)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
def process_section(self, _, section_title, tag):
self.section_title = section_title
self.section_tag = tag
def process_line(self, _, line):
"""Index @{symbol} references as in the current file & section."""
for match in parser.SYMBOL_REFERENCE_RE.finditer(line):
val = self.index.get(match.group(1), [])
val.append(_GuideRef(
self.base_name, self.title, self.section_title, self.section_tag))
self.index[match.group(1)] = val
def _build_guide_index(guide_src_dir):
"""Return dict: symbol name -> _GuideRef from the files in `guide_src_dir`."""
index_generator = _GenerateGuideIndex()
if os.path.exists(guide_src_dir):
for full_path, base_name in py_guide_parser.md_files_in_dir(guide_src_dir):
index_generator.process(full_path, base_name)
return index_generator.index
class _UpdateTags(py_guide_parser.PyGuideParser):
"""Rewrites a Python guide so that each section has an explicit tag."""
def process_section(self, line_number, section_title, tag):
self.replace_line(line_number, '<h2 id="%s">%s</h2>' % (tag, section_title))
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
def _other_docs(src_dir, output_dir, reference_resolver):
"""Convert all the files in `src_dir` and write results to `output_dir`."""
header = '<!-- DO NOT EDIT! Automatically generated file. -->\n'
# Iterate through all the source files and process them.
tag_updater = _UpdateTags()
for dirpath, _, filenames in os.walk(src_dir):
# How to get from `dirpath` to api_docs/python/
relative_path_to_root = os.path.relpath(
path=os.path.join(src_dir, 'api_docs/python'), start=dirpath)
# Make the directory under output_dir.
new_dir = os.path.join(output_dir,
os.path.relpath(path=dirpath, start=src_dir))
try:
if not os.path.exists(new_dir):
os.makedirs(new_dir)
except OSError as e:
print('Creating output dir "%s" failed: %s' % (new_dir, e))
raise
for base_name in filenames:
if base_name in EXCLUDED:
print('Skipping excluded file %s...' % base_name)
continue
full_in_path = os.path.join(dirpath, base_name)
suffix = os.path.relpath(path=full_in_path, start=src_dir)
full_out_path = os.path.join(output_dir, suffix)
if not base_name.endswith('.md'):
print('Copying non-md file %s...' % suffix)
open(full_out_path, 'w').write(open(full_in_path).read())
continue
if dirpath.endswith('/api_guides/python'):
print('Processing Python guide %s...' % base_name)
md_string = tag_updater.process(full_in_path)
else:
print('Processing doc %s...' % suffix)
md_string = open(full_in_path).read()
output = reference_resolver.replace_references(
md_string, relative_path_to_root)
with open(full_out_path, 'w') as f:
f.write(header + output)
print('Done.')
class DocGenerator(object):
"""Main entry point for generating docs."""
def __init__(self):
self.argument_parser = argparse.ArgumentParser()
self._py_modules = None
self._do_not_descend_map = _get_default_do_not_descend_map()
self.yaml_toc = True
def add_output_dir_argument(self):
self.argument_parser.add_argument(
'--output_dir',
type=str,
default=None,
required=True,
help='Directory to write docs to.'
)
def add_src_dir_argument(self):
self.argument_parser.add_argument(
'--src_dir',
type=str,
default=None,
required=True,
help='Directory with the source docs.'
)
def add_base_dir_argument(self, default_base_dir):
self.argument_parser.add_argument(
'--base_dir',
type=str,
default=default_base_dir,
help='Base directory to to strip from file names referenced in docs.'
)
def parse_known_args(self):
flags, _ = self.argument_parser.parse_known_args()
return flags
def add_to_do_not_descend_map(self, d):
add_dict_to_dict(d, self._do_not_descend_map)
def set_do_not_descend_map(self, d):
self._do_not_descend_map = d
def set_py_modules(self, py_modules):
self._py_modules = py_modules
def load_contrib(self):
"""Access something in contrib so tf.contrib is properly loaded."""
# Without this, it ends up hidden behind lazy loading. Requires
# that the caller has already called set_py_modules().
if self._py_modules is None:
raise RuntimeError(
'Must call set_py_modules() before running load_contrib().')
for name, module in self._py_modules:
if name == 'tf':
_ = module.contrib.__name__
return True
return False
def py_module_names(self):
if self._py_modules is None:
raise RuntimeError(
'Must call set_py_modules() before running py_module_names().')
return [name for (name, _) in self._py_modules]
def make_reference_resolver(self, visitor, doc_index):
return parser.ReferenceResolver(
duplicate_of=visitor.duplicate_of,
doc_index=doc_index, index=visitor.index,
py_module_names=self.py_module_names())
def make_parser_config(self, visitor, reference_resolver, guide_index,
base_dir):
return parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=visitor.duplicates,
tree=visitor.tree,
reverse_index=visitor.reverse_index,
guide_index=guide_index,
base_dir=base_dir)
def run_extraction(self):
return extract(self._py_modules, self._do_not_descend_map)
def build(self, flags):
"""Actually build the docs."""
doc_index = build_doc_index(flags.src_dir)
visitor = self.run_extraction()
reference_resolver = self.make_reference_resolver(visitor, doc_index)
guide_index = _build_guide_index(
os.path.join(flags.src_dir, 'api_guides/python'))
parser_config = self.make_parser_config(visitor, reference_resolver,
guide_index, flags.base_dir)
output_dir = os.path.join(flags.output_dir, 'api_docs/python')
write_docs(output_dir, parser_config, visitor.duplicate_of, visitor.index,
yaml_toc=self.yaml_toc)
_other_docs(flags.src_dir, flags.output_dir, reference_resolver)
if parser.all_errors:
print('Errors during processing:\n ' + '\n '.join(parser.all_errors))
return 1
return 0
|
{
"content_hash": "6ade7c509db3ecf2fbe1e1cbc9b152ae",
"timestamp": "",
"source": "github",
"line_count": 486,
"max_line_length": 80,
"avg_line_length": 33.257201646090536,
"alnum_prop": 0.6306378766318134,
"repo_name": "sugartom/tensorflow-alien",
"id": "1fa698628814134d5347f4843388ac421563ad22",
"size": "16852",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/tools/docs/generate_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7583"
},
{
"name": "C",
"bytes": "175397"
},
{
"name": "C++",
"bytes": "21819497"
},
{
"name": "CMake",
"bytes": "131374"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "781191"
},
{
"name": "HTML",
"bytes": "558790"
},
{
"name": "Java",
"bytes": "279506"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833831"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "36991"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64656"
},
{
"name": "Protocol Buffer",
"bytes": "199996"
},
{
"name": "Python",
"bytes": "18062852"
},
{
"name": "Shell",
"bytes": "325198"
},
{
"name": "TypeScript",
"bytes": "775401"
}
],
"symlink_target": ""
}
|
from io import StringIO
import os
import zipfile
import click
from click import option as opt
import cv2
import numpy as np
from tqdm import tqdm
import pandas as pd
import config
# RLE Encoding functions from https://www.kaggle.com/stainsby/fast-tested-rle
def rle_encode(mask_image):
pixels = mask_image.flatten()
pixels[0] = 0
pixels[-1] = 0
runs = np.where(pixels[1:] != pixels[:-1])[0] + 2
runs[1::2] = runs[1::2] - runs[:-1:2]
return runs
def rle_to_string(runs):
return ' '.join(str(x) for x in runs)
@click.command()
@opt('--dirname', type=str, required=True)
@opt('--threshold', default=127)
@opt('--submission-filename', type=str, required=True)
def main(dirname, threshold, submission_filename):
predictions_parent_dir = os.path.join(config.PREDICTIONS_PATH, dirname)
submission = pd.read_csv(config.SAMPLE_SUBMISSION_PATH)
submission.set_index('img', inplace=True)
fold_dirs = os.listdir(predictions_parent_dir)
filenames = os.listdir(os.path.join(predictions_parent_dir, fold_dirs[0]))
unique_filenames = list(
sorted({filename.replace('_flip_lr', '') for filename in filenames}))
for filename in tqdm(unique_filenames):
predictions = np.zeros((len(fold_dirs) * 2, 1280, 1918))
for i, fold_dir in enumerate(fold_dirs):
img_file = os.path.join(predictions_parent_dir, fold_dir, filename)
flip_lr_img_file = os.path.join(predictions_parent_dir, fold_dir,
filename.replace('.png',
'_flip_lr.png'))
predictions[i * 2] = cv2.imread(img_file, cv2.IMREAD_UNCHANGED)
predictions[i * 2 + 1] = np.fliplr(
cv2.imread(flip_lr_img_file, cv2.IMREAD_UNCHANGED))
predictions = np.uint8(np.mean(predictions, axis=0))
predictions[predictions < threshold] = 0
predictions[predictions >= threshold] = 255
rle_mask = rle_encode(predictions)
img_name = filename.replace('.png', '.jpg')
submission.loc[img_name]['rle_mask'] = rle_to_string(rle_mask)
buffer = StringIO()
submission.reset_index(inplace=True)
submission.to_csv(buffer, index=False)
buffer.seek(0)
os.makedirs(config.SUBMISSIONS_PATH, exist_ok=True)
submission_path = os.path.join(config.SUBMISSIONS_PATH,
submission_filename + '.zip')
with zipfile.ZipFile(submission_path, mode='w',
compression=zipfile.ZIP_DEFLATED) as f:
f.writestr(submission_filename, buffer.read())
if __name__ == '__main__':
main()
|
{
"content_hash": "ea773bd8b06b5d2b6c4b8390366b16bd",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 79,
"avg_line_length": 36.067567567567565,
"alnum_prop": 0.6242038216560509,
"repo_name": "creafz/kaggle-carvana",
"id": "f41674cc0b4b58d6168659e32f48d7124f360d6b",
"size": "2669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "make_submission.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39145"
},
{
"name": "Shell",
"bytes": "372"
}
],
"symlink_target": ""
}
|
import os
from typing import Sequence, List, Tuple, BinaryIO, cast, Optional # noqa: F401
from aptly_api.base import BaseAPIClient, AptlyAPIException
class FilesAPISection(BaseAPIClient):
def list(self, directory: Optional[str] = None) -> Sequence[str]:
if directory is None:
resp = self.do_get("api/files")
else:
resp = self.do_get("api/files/%s" % directory)
return cast(List[str], resp.json())
def upload(self, destination: str, *files: str) -> Sequence[str]:
to_upload = [] # type: List[Tuple[str, BinaryIO]]
for f in files:
if not os.path.exists(f) or not os.access(f, os.R_OK):
raise AptlyAPIException("File to upload %s can't be opened or read" % f)
fh = open(f, mode="rb")
to_upload.append((f, fh),)
try:
resp = self.do_post("api/files/%s" % destination,
files=to_upload)
except AptlyAPIException:
raise
finally:
for fn, to_close in to_upload:
if not to_close.closed:
to_close.close()
return cast(List[str], resp.json())
def delete(self, path: Optional[str] = None) -> None:
self.do_delete("api/files/%s" % path)
|
{
"content_hash": "6ec944588317ed9b8182f3155eeac6c5",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 88,
"avg_line_length": 35.21621621621622,
"alnum_prop": 0.5610130468150422,
"repo_name": "gopythongo/aptly-api-client",
"id": "c71b9e6794230ebff48ad5e6162fee57c5e2a25f",
"size": "1527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aptly_api/parts/files.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "81609"
}
],
"symlink_target": ""
}
|
"""Setup a regtest environment for testing and development.
This can be run from the command line.
main():
First, any existing regnet is destroyed.
A folder called regnet is created, containing folders for three nodes,
Alice, Bob, and Carol. For each node, bitcoind is started. The bitcoind nodes
are connected Alice -- Bob -- Carol.
Lightning nodes are also started for Alice, Bob, and Carol.
Once all bitcoind nodes have started up, the function returns
a tuple of config.ProxySet is returned, for Alice, Bob and Carol.
"""
import os
import os.path
import shutil
import subprocess
import itertools
import time
import bitcoin
import bitcoin.rpc
import config
import destroy_regnet
bitcoin.SelectParams('regtest')
BITCOIN_CONFIGURATION = """\
# Configuration for %(node)s
testnet=0
regtest=1
txindex=1
daemon=1
listen=1
relaypriority=0
rpcuser=%(node)s
rpcpassword=%(password)s
rpcport=%(rpcport)d
port=%(port)d
"""
LIGHTNING_CONFIGURATION = """\
# Lightning configuration for %(node)s
testnet=0
regtest=1
daemon=1
debug=0
rpcuser=%(node)s
rpcpassword=%(password)s
port=%(port)d
"""
def main():
"""Set up a regtest network in regnet."""
destroy_regnet.main()
ports = ((18412 + i, 18414 + i, 18416 + i)
for i in itertools.count(0, 10))
bitcoind = os.path.abspath('bitcoind')
assert os.path.isfile(bitcoind)
lightningd = os.path.abspath('lightningd.py')
assert os.path.isfile(lightningd)
regnet_dir = os.path.abspath('regnet')
assert not os.path.exists(regnet_dir)
os.mkdir(regnet_dir)
nodes = zip(['Alice', 'Bob', 'Carol'], ports)
last_node = None
for node, ports in nodes:
port, rpcport, lport = ports
node_dir = os.path.join(regnet_dir, node)
os.mkdir(node_dir)
try:
with open(os.path.join(node_dir, 'bitcoin.conf'), 'w') as conf:
conf.write(BITCOIN_CONFIGURATION % {
'node': node,
'password': node,
'rpcport': rpcport,
'port': port,
})
#Connect in a chain
if last_node is not None:
conf.write("connect=localhost:%d\n" % last_node[1][0])
with open(os.path.join(node_dir, 'lightning.conf'), 'w') as conf:
conf.write(LIGHTNING_CONFIGURATION % {
'node': node,
'password': node,
'port': lport,
})
last_node = (node, ports)
except:
print("Failed")
shutil.rmtree(node_dir)
raise
with open(os.path.join(node_dir, 'log.txt'), 'a') as log_file:
#log_file = None
subprocess.check_call([bitcoind, "-datadir=%s" % node_dir, "-debug"],
stdin=subprocess.DEVNULL,
stdout=log_file,
stderr=subprocess.STDOUT)
subprocess.check_call([lightningd, "-datadir=%s" % node_dir],
stdin=subprocess.DEVNULL,
stdout=log_file,
stderr=subprocess.STDOUT)
def loading_wallet(proxy):
"""Check if bitcoind is still loading."""
try:
proxy.getinfo()
except bitcoin.rpc.JSONRPCException as error:
if error.error['code'] == -28:
return True
return False
time.sleep(1)
proxies = [config.collect_proxies(os.path.join(regnet_dir, node))
for node in os.listdir(regnet_dir)]
while any(loading_wallet(proxy.bit) for proxy in proxies):
time.sleep(1)
return proxies
if __name__ == "__main__":
main()
|
{
"content_hash": "857d0e04f5f696998be2880d6d79c4f8",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 81,
"avg_line_length": 29.68503937007874,
"alnum_prop": 0.5755968169761273,
"repo_name": "josephyzhou/Lightning-1",
"id": "7448be90b792bb927e14405c0f0d49d74a9d4cde",
"size": "3795",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "create_regnet.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55736"
}
],
"symlink_target": ""
}
|
import random
from rlkit.data_management.replay_buffer import ReplayBuffer
class SplitReplayBuffer(ReplayBuffer):
"""
Split the data into a training and validation set.
"""
def __init__(
self,
train_replay_buffer: ReplayBuffer,
validation_replay_buffer: ReplayBuffer,
fraction_paths_in_train,
):
self.train_replay_buffer = train_replay_buffer
self.validation_replay_buffer = validation_replay_buffer
self.fraction_paths_in_train = fraction_paths_in_train
self.replay_buffer = self.train_replay_buffer
def add_sample(self, *args, **kwargs):
self.replay_buffer.add_sample(*args, **kwargs)
def add_path(self, path):
self.replay_buffer.add_path(path)
self._randomly_set_replay_buffer()
def num_steps_can_sample(self):
return min(
self.train_replay_buffer.num_steps_can_sample(),
self.validation_replay_buffer.num_steps_can_sample(),
)
def terminate_episode(self, *args, **kwargs):
self.replay_buffer.terminate_episode(*args, **kwargs)
self._randomly_set_replay_buffer()
def _randomly_set_replay_buffer(self):
if random.random() <= self.fraction_paths_in_train:
self.replay_buffer = self.train_replay_buffer
else:
self.replay_buffer = self.validation_replay_buffer
def get_replay_buffer(self, training=True):
if training:
return self.train_replay_buffer
else:
return self.validation_replay_buffer
def random_batch(self, batch_size):
return self.train_replay_buffer.random_batch(batch_size)
def __getattr__(self, attrname):
return getattr(self.replay_buffer, attrname)
def __getstate__(self):
# Do not save self.replay_buffer since it's a duplicate and seems to
# cause joblib recursion issues.
return dict(
train_replay_buffer=self.train_replay_buffer,
validation_replay_buffer=self.validation_replay_buffer,
fraction_paths_in_train=self.fraction_paths_in_train,
)
def __setstate__(self, d):
self.train_replay_buffer = d['train_replay_buffer']
self.validation_replay_buffer = d['validation_replay_buffer']
self.fraction_paths_in_train = d['fraction_paths_in_train']
self.replay_buffer = self.train_replay_buffer
|
{
"content_hash": "59599fd9852946897c751539b8a0bc28",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 76,
"avg_line_length": 35.30434782608695,
"alnum_prop": 0.6408045977011494,
"repo_name": "vitchyr/rlkit",
"id": "a29e96f9cdfe6ff1af561013ae43c174c4b60ed0",
"size": "2436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rlkit/data_management/split_buffer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3338"
},
{
"name": "Python",
"bytes": "355210"
}
],
"symlink_target": ""
}
|
from xhtml2pdf.context import pisaContext
from xhtml2pdf.default import DEFAULT_CSS
from xhtml2pdf.parser import pisaParser
from reportlab.platypus.flowables import Spacer
from reportlab.platypus.frames import Frame
from xhtml2pdf.xhtml2pdf_reportlab import PmlBaseDoc, PmlPageTemplate
from xhtml2pdf.util import pisaTempFile, getBox, PyPDF2
import cgi
import logging
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
log = logging.getLogger("xhtml2pdf")
def pisaErrorDocument(dest, c):
out = pisaTempFile(capacity=c.capacity)
out.write("<p style='background-color:red;'><strong>%d error(s) occured:</strong><p>" % c.err)
for mode, line, msg, _ in c.log:
if mode == "error":
out.write("<pre>%s in line %d: %s</pre>" % (mode, line, cgi.escape(msg)))
out.write("<p><strong>%d warning(s) occured:</strong><p>" % c.warn)
for mode, line, msg, _ in c.log:
if mode == "warning":
out.write("<p>%s in line %d: %s</p>" % (mode, line, cgi.escape(msg)))
return pisaDocument(out.getvalue(), dest, raise_exception=False)
def pisaStory(src, path=None, link_callback=None, debug=0, default_css=None,
xhtml=False, encoding=None, context=None, xml_output=None,
**kw):
# Prepare Context
if not context:
context = pisaContext(path, debug=debug)
context.pathCallback = link_callback
# Use a default set of CSS definitions to get an expected output
if default_css is None:
default_css = DEFAULT_CSS
# Parse and fill the story
pisaParser(src, context, default_css, xhtml, encoding, xml_output)
# Avoid empty documents
if not context.story:
context.story = [Spacer(1, 1)]
if context.indexing_story:
context.story.append(context.indexing_story)
# Remove anchors if they do not exist (because of a bug in Reportlab)
for frag, anchor in context.anchorFrag:
if anchor not in context.anchorName:
frag.link = None
return context
def pisaDocument(src, dest=None, path=None, link_callback=None, debug=0,
default_css=None, xhtml=False, encoding=None, xml_output=None,
raise_exception=True, capacity=100 * 1024, **kw):
log.debug("pisaDocument options:\n src = %r\n dest = %r\n path = %r\n link_callback = %r\n xhtml = %r",
src,
dest,
path,
link_callback,
xhtml)
# Prepare simple context
context = pisaContext(path, debug=debug, capacity=capacity)
context.pathCallback = link_callback
# Build story
context = pisaStory(src, path, link_callback, debug, default_css, xhtml,
encoding, context=context, xml_output=xml_output)
# Buffer PDF into memory
out = pisaTempFile(capacity=context.capacity)
doc = PmlBaseDoc(
out,
pagesize=context.pageSize,
author=context.meta["author"].strip(),
subject=context.meta["subject"].strip(),
keywords=[x.strip() for x in
context.meta["keywords"].strip().split(",") if x],
title=context.meta["title"].strip(),
showBoundary=0,
allowSplitting=1)
# Prepare templates and their frames
if "body" in context.templateList:
body = context.templateList["body"]
del context.templateList["body"]
else:
x, y, w, h = getBox("1cm 1cm -1cm -1cm", context.pageSize)
body = PmlPageTemplate(
id="body",
frames=[
Frame(x, y, w, h,
id="body",
leftPadding=0,
rightPadding=0,
bottomPadding=0,
topPadding=0)],
pagesize=context.pageSize)
doc.addPageTemplates([body] + context.templateList.values())
# Use multibuild e.g. if a TOC has to be created
if context.multiBuild:
doc.multiBuild(context.story)
else:
doc.build(context.story)
# Add watermarks
if PyPDF2:
for bgouter in context.pisaBackgroundList:
# If we have at least one background, then lets do it
if bgouter:
istream = out
output = PyPDF2.PdfFileWriter()
input1 = PyPDF2.PdfFileReader(istream)
ctr = 0
# TODO: Why do we loop over the same list again?
# see bgouter at line 137
for bg in context.pisaBackgroundList:
page = input1.getPage(ctr)
if (bg and not bg.notFound()
and (bg.mimetype == "application/pdf")):
bginput = PyPDF2.PdfFileReader(bg.getFile())
pagebg = bginput.getPage(0)
pagebg.mergePage(page)
page = pagebg
else:
log.warn(context.warning(
"Background PDF %s doesn't exist.", bg))
output.addPage(page)
ctr += 1
out = pisaTempFile(capacity=context.capacity)
output.write(out)
# data = sout.getvalue()
# Found a background? So leave loop after first occurence
break
else:
log.warn(context.warning("pyPDF not installed!"))
# Get the resulting PDF and write it to the file object
# passed from the caller
if dest is None:
# No output file was passed - Let's use a pisaTempFile
dest = pisaTempFile(capacity=context.capacity)
context.dest = dest
data = out.getvalue() # TODO: That load all the tempfile in RAM - Why bother with a swapping tempfile then?
context.dest.write(data) # TODO: context.dest is a tempfile as well...
return context
|
{
"content_hash": "5cbf1b815a71adaebb7049fa21e33e53",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 112,
"avg_line_length": 36.982658959537574,
"alnum_prop": 0.6023757424195061,
"repo_name": "carschar/xhtml2pdf",
"id": "176f3eae1ea3b38c420f29749819f61b2eea49cb",
"size": "6422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xhtml2pdf/document.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "475717"
}
],
"symlink_target": ""
}
|
import sys
import os
from array import array
# parse command line arguments
file_name = sys.argv[1]
size_in_mb = int(sys.argv[2])
# output directory, defaults to current directory
output_dir = os.environ.get("OUTPUT_DIR", os.environ["PWD"])
# output file
output_file = open(os.path.join(output_dir, file_name), 'wb')
# each loop iteration will write out 1 KB of data
for i in range(size_in_mb*1024): # loop over KB
# array of 128 doubles = 128x8 bytes = 1024 bytes = 1 KB
data = range(1024/8)
float_array = array('d', data) # array of 'double' - each double is 8 bytes
float_array.tofile(output_file)
output_file.close()
|
{
"content_hash": "222d7de949c4aaa82d1e7562accb9808",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 27.695652173913043,
"alnum_prop": 0.7048665620094191,
"repo_name": "oodt-cloud/docker",
"id": "c701f2cd9132cca3f727eebe314b3bc3cb97453a",
"size": "950",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/test-workflow/swarm/wmgr_config/pges/test-workflow/writeBin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7551"
},
{
"name": "Shell",
"bytes": "114851"
}
],
"symlink_target": ""
}
|
"""Cloudmesh install util functions.
This file contains basic utility functions that must not need any
import from cloudmesh OR any other non-standard python
modules. Everything in this file must execute on a clean python 2.7.x
environment.
"""
import platform
from string import Template
import os
import sys
def get_system():
if is_ubuntu():
return "ubuntu"
elif is_centos():
return "centos"
elif is_osx():
return "osx"
else:
return "unsupported"
def is_ubuntu():
"""test sif the platform is ubuntu"""
(dist, version, release) = platform.dist()
if dist == "ubuntu" and version not in ["14.04"]:
print("ERROR: %s %s is not tested" % (dist, version))
return dist == 'Ubuntu'
def is_centos():
"""test if the platform is centos"""
(dist, version, release) = platform.dist()
if dist == "centos" and version not in ["6.5"]:
print("WARNING: %s %s is not tested" % (dist, version))
return dist == "centos"
def is_osx():
osx = platform.system().lower() == 'darwin'
if osx:
os_version = platform.mac_ver()[0]
if os_version not in ['10.9.5',
'10.10',
'10.10.1',
'10.10.2',
'10.10.3']:
osx = False
print("WARNING: %s %s is not tested" % ('OSX', os_version))
return osx
|
{
"content_hash": "3274ef5cf827e8b39ce61ec8437f822a",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 71,
"avg_line_length": 25.714285714285715,
"alnum_prop": 0.55625,
"repo_name": "rajpushkar83/cloudmesh",
"id": "a09f362cfe61eb803bdb3a0337c6899eaa645ddf",
"size": "1440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudmesh_install/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "390396"
},
{
"name": "HTML",
"bytes": "4158355"
},
{
"name": "Java",
"bytes": "369"
},
{
"name": "JavaScript",
"bytes": "2803977"
},
{
"name": "Makefile",
"bytes": "7572"
},
{
"name": "PHP",
"bytes": "183557"
},
{
"name": "Python",
"bytes": "1736957"
},
{
"name": "Ruby",
"bytes": "10670"
},
{
"name": "Shell",
"bytes": "32263"
}
],
"symlink_target": ""
}
|
print("Importing panda3d")
import panda3d
import subprocess
import os, sys
from os.path import isfile, isdir, realpath, dirname, join
THIS_DIR = realpath(dirname(__file__)) or "."
start_cwd = os.getcwd() or "."
print("Processing meta files")
os.chdir(THIS_DIR)
try:
subprocess.call([sys.executable, "process_meta_files.py"])
except Exception as msg:
print(msg)
sys.exit(-1)
os.chdir(join(THIS_DIR, "../../p3d_ecs/native/"))
print("Updating module builder ..")
try:
subprocess.call([sys.executable, "update_module_builder.py"])
except Exception as msg:
print(msg)
sys.exit(-1)
print("Compiling native code")
try:
subprocess.call([sys.executable, "build.py"])
except Exception as msg:
print(msg)
sys.exit(-1)
os.chdir(start_cwd)
sys.exit(0)
|
{
"content_hash": "af386167373c07be89d507845e3002cd",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 63,
"avg_line_length": 20.289473684210527,
"alnum_prop": 0.6977950713359273,
"repo_name": "tobspr/P3D-ECS",
"id": "c5d388953a632d5169df2c56a05d12015372d55c",
"size": "804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p3d_ecs/scripts/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "912"
},
{
"name": "C",
"bytes": "2064"
},
{
"name": "C++",
"bytes": "237926"
},
{
"name": "CMake",
"bytes": "594"
},
{
"name": "Protocol Buffer",
"bytes": "1328"
},
{
"name": "Python",
"bytes": "97194"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import datetime
import os
import re
import signal
import threading
import time
import uuid
from multiprocessing import Value
from unittest import mock
from unittest.mock import patch
import psutil
import pytest
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.models.dagbag import DagBag
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import TaskInstance
from airflow.operators.empty import EmptyOperator
from airflow.operators.python import PythonOperator
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.types import DagRunType
from tests.test_utils import db
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.config import conf_vars
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAG_FOLDER = os.environ["AIRFLOW__CORE__DAGS_FOLDER"]
@pytest.fixture
def clear_db():
db.clear_db_dags()
db.clear_db_jobs()
db.clear_db_runs()
db.clear_db_task_fail()
yield
@pytest.fixture(scope="class")
def clear_db_class():
yield
db.clear_db_dags()
db.clear_db_jobs()
db.clear_db_runs()
db.clear_db_task_fail()
@pytest.fixture(scope="module")
def dagbag():
return DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
@pytest.mark.usefixtures("clear_db_class", "clear_db")
class TestLocalTaskJob:
@pytest.fixture(autouse=True)
def set_instance_attrs(self, dagbag):
self.dagbag = dagbag
with patch("airflow.jobs.base_job.sleep") as self.mock_base_job_sleep:
yield
def validate_ti_states(self, dag_run, ti_state_mapping, error_message):
for task_id, expected_state in ti_state_mapping.items():
task_instance = dag_run.get_task_instance(task_id=task_id)
task_instance.refresh_from_db()
assert task_instance.state == expected_state, error_message
def test_localtaskjob_essential_attr(self, dag_maker):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
with dag_maker("test_localtaskjob_essential_attr"):
op1 = EmptyOperator(task_id="op1")
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
assert all(check_result_1)
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
assert all(check_result_2)
def test_localtaskjob_heartbeat(self, dag_maker):
session = settings.Session()
with dag_maker("test_localtaskjob_heartbeat"):
op1 = EmptyOperator(task_id="op1")
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
ti.task = op1
ti.refresh_from_task(op1)
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.process = mock.Mock()
with pytest.raises(AirflowException):
job1.heartbeat_callback()
job1.task_runner.process.pid = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
assert ti.pid != os.getpid()
assert not ti.run_as_user
assert not job1.task_runner.run_as_user
job1.heartbeat_callback(session=None)
job1.task_runner.process.pid = 2
with pytest.raises(AirflowException):
job1.heartbeat_callback()
# Now, set the ti.pid to None and test that no error
# is raised.
ti.pid = None
session.merge(ti)
session.commit()
assert ti.pid != job1.task_runner.process.pid
assert not ti.run_as_user
assert not job1.task_runner.run_as_user
job1.heartbeat_callback()
@mock.patch("subprocess.check_call")
@mock.patch("airflow.jobs.local_task_job.psutil")
def test_localtaskjob_heartbeat_with_run_as_user(self, psutil_mock, _, dag_maker):
session = settings.Session()
with dag_maker("test_localtaskjob_heartbeat"):
op1 = EmptyOperator(task_id="op1", run_as_user="myuser")
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.pid = 2
ti.hostname = get_hostname()
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
ti.task = op1
ti.refresh_from_task(op1)
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.process = mock.Mock()
job1.task_runner.process.pid = 2
# Here, ti.pid is 2, the parent process of ti.pid is a mock(different).
# And task_runner process is 2. Should fail
with pytest.raises(AirflowException, match="PID of job runner does not match"):
job1.heartbeat_callback()
job1.task_runner.process.pid = 1
# We make the parent process of ti.pid to equal the task_runner process id
psutil_mock.Process.return_value.ppid.return_value = 1
ti.state = State.RUNNING
ti.pid = 2
# The task_runner process id is 1, same as the parent process of ti.pid
# as seen above
assert ti.run_as_user
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
# Here the task_runner process id is changed to 2
# while parent process of ti.pid is kept at 1, which is different
job1.task_runner.process.pid = 2
with pytest.raises(AirflowException, match="PID of job runner does not match"):
job1.heartbeat_callback()
# Here we set the ti.pid to None and test that no error is
# raised
ti.pid = None
session.merge(ti)
session.commit()
assert ti.run_as_user
assert job1.task_runner.run_as_user == ti.run_as_user
assert ti.pid != job1.task_runner.process.pid
job1.heartbeat_callback()
@conf_vars({("core", "default_impersonation"): "testuser"})
@mock.patch("subprocess.check_call")
@mock.patch("airflow.jobs.local_task_job.psutil")
def test_localtaskjob_heartbeat_with_default_impersonation(self, psutil_mock, _, dag_maker):
session = settings.Session()
with dag_maker("test_localtaskjob_heartbeat"):
op1 = EmptyOperator(task_id="op1")
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.pid = 2
ti.hostname = get_hostname()
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
ti.task = op1
ti.refresh_from_task(op1)
job1.task_runner = StandardTaskRunner(job1)
job1.task_runner.process = mock.Mock()
job1.task_runner.process.pid = 2
# Here, ti.pid is 2, the parent process of ti.pid is a mock(different).
# And task_runner process is 2. Should fail
with pytest.raises(AirflowException, match="PID of job runner does not match"):
job1.heartbeat_callback()
job1.task_runner.process.pid = 1
# We make the parent process of ti.pid to equal the task_runner process id
psutil_mock.Process.return_value.ppid.return_value = 1
ti.state = State.RUNNING
ti.pid = 2
# The task_runner process id is 1, same as the parent process of ti.pid
# as seen above
assert job1.task_runner.run_as_user == "testuser"
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
# Here the task_runner process id is changed to 2
# while parent process of ti.pid is kept at 1, which is different
job1.task_runner.process.pid = 2
with pytest.raises(AirflowException, match="PID of job runner does not match"):
job1.heartbeat_callback()
# Now, set the ti.pid to None and test that no error
# is raised.
ti.pid = None
session.merge(ti)
session.commit()
assert job1.task_runner.run_as_user == "testuser"
assert ti.run_as_user is None
assert ti.pid != job1.task_runner.process.pid
job1.heartbeat_callback()
def test_heartbeat_failed_fast(self):
"""
Test that task heartbeat will sleep when it fails fast
"""
self.mock_base_job_sleep.side_effect = time.sleep
dag_id = "test_heartbeat_failed_fast"
task_id = "test_heartbeat_failed_fast_op"
with create_session() as session:
dag_id = "test_heartbeat_failed_fast"
task_id = "test_heartbeat_failed_fast_op"
dag = self.dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
dr = dag.create_dagrun(
run_id="test_heartbeat_failed_fast_run",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = dr.task_instances[0]
ti.refresh_from_task(task)
ti.state = State.QUEUED
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
job = LocalTaskJob(task_instance=ti, executor=MockExecutor(do_update=False))
job.heartrate = 2
heartbeat_records = []
job.heartbeat_callback = lambda session: heartbeat_records.append(job.latest_heartbeat)
job._execute()
assert len(heartbeat_records) > 2
for i in range(1, len(heartbeat_records)):
time1 = heartbeat_records[i - 1]
time2 = heartbeat_records[i]
# Assert that difference small enough
delta = (time2 - time1).total_seconds()
assert abs(delta - job.heartrate) < 0.8
def test_mark_success_no_kill(self, caplog, get_test_dag, session):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dag = get_test_dag("test_mark_state")
dr = dag.create_dagrun(
state=State.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
task = dag.get_task(task_id="test_mark_success_no_kill")
ti = dr.get_task_instance(task.task_id)
ti.refresh_from_task(task)
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
with timeout(30):
job1.run()
ti.refresh_from_db()
assert State.SUCCESS == ti.state
assert (
"State of this instance has been externally set to success. Terminating instance." in caplog.text
)
def test_localtaskjob_double_trigger(self):
dag = self.dagbag.dags.get("test_localtaskjob_double_trigger")
task = dag.get_task("test_localtaskjob_double_trigger_task")
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(
run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session,
)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ti_run = TaskInstance(task=task, run_id=dr.run_id)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
with patch.object(StandardTaskRunner, "start", return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
assert ti.pid == 1
assert ti.state == State.RUNNING
session.close()
@patch.object(StandardTaskRunner, "return_code")
@mock.patch("airflow.jobs.scheduler_job.Stats.incr", autospec=True)
def test_local_task_return_code_metric(self, mock_stats_incr, mock_return_code, create_dummy_dag):
_, task = create_dummy_dag("test_localtaskjob_code")
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
job1.id = 95
mock_return_code.side_effect = [None, -9, None]
with timeout(10):
job1.run()
mock_stats_incr.assert_has_calls(
[
mock.call("local_task_job.task_exit.95.test_localtaskjob_code.op1.-9"),
]
)
@pytest.mark.quarantined
@patch.object(StandardTaskRunner, "return_code")
def test_localtaskjob_maintain_heart_rate(self, mock_return_code, caplog, create_dummy_dag):
_, task = create_dummy_dag("test_localtaskjob_double_trigger")
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run, executor=SequentialExecutor())
time_start = time.time()
# this should make sure we only heartbeat once and exit at the second
# loop in _execute(). While the heartbeat exits at second loop, return_code
# is also called by task_runner.terminate method for proper clean up,
# hence the extra value after 0.
mock_return_code.side_effect = [None, 0, None]
with timeout(10):
job1.run()
assert mock_return_code.call_count == 3
time_end = time.time()
assert self.mock_base_job_sleep.call_count == 1
assert job1.state == State.SUCCESS
# Consider we have patched sleep call, it should not be sleeping to
# keep up with the heart rate in other unpatched places
#
# We already make sure patched sleep call is only called once
assert time_end - time_start < job1.heartrate
assert "Task exited with return code 0" in caplog.text
def test_mark_failure_on_failure_callback(self, caplog, get_test_dag):
"""
Test that ensures that mark_failure in the UI fails
the task, and executes on_failure_callback
"""
dag = get_test_dag("test_mark_state")
with create_session() as session:
dr = dag.create_dagrun(
state=State.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
task = dag.get_task(task_id="test_mark_failure_externally")
ti = dr.get_task_instance(task.task_id)
ti.refresh_from_task(task)
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
with timeout(30):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callable above bigger
job1.run()
ti.refresh_from_db()
assert ti.state == State.FAILED
assert (
"State of this instance has been externally set to failed. Terminating instance."
) in caplog.text
def test_dagrun_timeout_logged_in_task_logs(self, caplog, get_test_dag):
"""
Test that ensures that if a running task is externally skipped (due to a dagrun timeout)
It is logged in the task logs.
"""
dag = get_test_dag("test_mark_state")
dag.dagrun_timeout = datetime.timedelta(microseconds=1)
with create_session() as session:
dr = dag.create_dagrun(
state=State.RUNNING,
start_date=DEFAULT_DATE,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
task = dag.get_task(task_id="test_mark_skipped_externally")
ti = dr.get_task_instance(task.task_id)
ti.refresh_from_task(task)
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
with timeout(30):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callable above bigger
job1.run()
ti.refresh_from_db()
assert ti.state == State.SKIPPED
assert "DagRun timed out after " in caplog.text
def test_failure_callback_called_by_airflow_run_raw_process(self, monkeypatch, tmp_path, get_test_dag):
"""
Ensure failure callback of a task is run by the airflow run --raw process
"""
callback_file = tmp_path.joinpath("callback.txt")
callback_file.touch()
monkeypatch.setenv("AIRFLOW_CALLBACK_FILE", str(callback_file))
dag = get_test_dag("test_on_failure_callback")
with create_session() as session:
dag.create_dagrun(
state=State.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
task = dag.get_task(task_id="test_on_failure_callback_task")
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.run()
ti.refresh_from_db()
assert ti.state == State.FAILED # task exits with failure state
with open(callback_file) as f:
lines = f.readlines()
assert len(lines) == 1 # invoke once
assert lines[0].startswith(ti.key.primary)
m = re.match(r"^.+pid: (\d+)$", lines[0])
assert m, "pid expected in output."
assert os.getpid() != int(m.group(1))
def test_mark_success_on_success_callback(self, caplog, get_test_dag):
"""
Test that ensures that where a task is marked success in the UI
on_success_callback gets executed
"""
dag = get_test_dag("test_mark_state")
with create_session() as session:
dr = dag.create_dagrun(
state=State.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
task = dag.get_task(task_id="test_mark_success_no_kill")
ti = dr.get_task_instance(task.task_id)
ti.refresh_from_task(task)
job = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
with timeout(30):
job.run() # This should run fast because of the return_code=None
ti.refresh_from_db()
assert (
"State of this instance has been externally set to success. Terminating instance." in caplog.text
)
@pytest.mark.parametrize("signal_type", [signal.SIGTERM, signal.SIGKILL])
def test_process_os_signal_calls_on_failure_callback(
self, monkeypatch, tmp_path, get_test_dag, signal_type
):
"""
Test that ensures that when a task is killed with sigkill or sigterm
on_failure_callback does not get executed by LocalTaskJob.
Callbacks should not be executed by LocalTaskJob. If the task killed via sigkill,
it will be reaped as zombie, then the callback is executed
"""
callback_file = tmp_path.joinpath("callback.txt")
# callback_file will be created by the task: bash_sleep
monkeypatch.setenv("AIRFLOW_CALLBACK_FILE", str(callback_file))
dag = get_test_dag("test_on_failure_callback")
with create_session() as session:
dag.create_dagrun(
state=State.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
task = dag.get_task(task_id="bash_sleep")
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
signal_sent_status = {"sent": False}
def get_ti_current_pid(ti) -> str:
with create_session() as session:
pid = (
session.query(TaskInstance.pid)
.filter(
TaskInstance.dag_id == ti.dag_id,
TaskInstance.task_id == ti.task_id,
TaskInstance.run_id == ti.run_id,
)
.one_or_none()
)
return pid[0]
def send_signal(ti, signal_sent, sig):
while True:
task_pid = get_ti_current_pid(
ti
) # get pid from the db, which is the pid of airflow run --raw
if (
task_pid and ti.current_state() == State.RUNNING and os.path.isfile(callback_file)
): # ensure task is running before sending sig
signal_sent["sent"] = True
os.kill(task_pid, sig)
break
time.sleep(1)
thread = threading.Thread(
name="signaler",
target=send_signal,
args=(ti, signal_sent_status, signal_type),
)
thread.daemon = True
thread.start()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
job1.run()
ti.refresh_from_db()
assert signal_sent_status["sent"]
if signal_type == signal.SIGTERM:
assert ti.state == State.FAILED
with open(callback_file) as f:
lines = f.readlines()
assert len(lines) == 1
assert lines[0].startswith(ti.key.primary)
m = re.match(r"^.+pid: (\d+)$", lines[0])
assert m, "pid expected in output."
pid = int(m.group(1))
assert os.getpid() != pid # ensures callback is NOT run by LocalTaskJob
assert ti.pid == pid # ensures callback is run by airflow run --raw (TaskInstance#_run_raw_task)
elif signal_type == signal.SIGKILL:
assert (
ti.state == State.RUNNING
) # task exits with running state, will be reaped as zombie by scheduler
with open(callback_file) as f:
lines = f.readlines()
assert len(lines) == 0
@pytest.mark.parametrize(
"conf, init_state, first_run_state, second_run_state, task_ids_to_run, error_message",
[
(
{("scheduler", "schedule_after_task_execution"): "True"},
{"A": State.QUEUED, "B": State.NONE, "C": State.NONE},
{"A": State.SUCCESS, "B": State.SCHEDULED, "C": State.NONE},
{"A": State.SUCCESS, "B": State.SUCCESS, "C": State.SCHEDULED},
["A", "B"],
"A -> B -> C, with fast-follow ON when A runs, B should be QUEUED. Same for B and C.",
),
(
{("scheduler", "schedule_after_task_execution"): "False"},
{"A": State.QUEUED, "B": State.NONE, "C": State.NONE},
{"A": State.SUCCESS, "B": State.NONE, "C": State.NONE},
None,
["A", "B"],
"A -> B -> C, with fast-follow OFF, when A runs, B shouldn't be QUEUED.",
),
(
{("scheduler", "schedule_after_task_execution"): "True"},
{"D": State.QUEUED, "E": State.NONE, "F": State.NONE, "G": State.NONE},
{"D": State.SUCCESS, "E": State.NONE, "F": State.NONE, "G": State.NONE},
None,
["D", "E"],
"G -> F -> E & D -> E, when D runs but F isn't QUEUED yet, E shouldn't be QUEUED.",
),
(
{("scheduler", "schedule_after_task_execution"): "True"},
{"H": State.QUEUED, "I": State.FAILED, "J": State.NONE},
{"H": State.SUCCESS, "I": State.FAILED, "J": State.UPSTREAM_FAILED},
None,
["H", "I"],
"H -> J & I -> J, when H is QUEUED but I has FAILED, J is marked UPSTREAM_FAILED.",
),
],
)
def test_fast_follow(
self,
conf,
init_state,
first_run_state,
second_run_state,
task_ids_to_run,
error_message,
get_test_dag,
):
with conf_vars(conf):
dag = get_test_dag(
"test_dagrun_fast_follow",
)
scheduler_job = SchedulerJob(subdir=os.devnull)
scheduler_job.dagbag.bag_dag(dag, root_dag=dag)
dag_run = dag.create_dagrun(run_id="test_dagrun_fast_follow", state=State.RUNNING)
ti_by_task_id = {}
with create_session() as session:
for task_id in init_state:
ti = TaskInstance(dag.get_task(task_id), run_id=dag_run.run_id, state=init_state[task_id])
session.merge(ti)
ti_by_task_id[task_id] = ti
ti = TaskInstance(task=dag.get_task(task_ids_to_run[0]), execution_date=dag_run.execution_date)
ti.refresh_from_db()
job1 = LocalTaskJob(
task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor(),
)
job1.task_runner = StandardTaskRunner(job1)
job1.run()
self.validate_ti_states(dag_run, first_run_state, error_message)
if second_run_state:
ti = TaskInstance(
task=dag.get_task(task_ids_to_run[1]), execution_date=dag_run.execution_date
)
ti.refresh_from_db()
job2 = LocalTaskJob(
task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor(),
)
job2.task_runner = StandardTaskRunner(job2)
job2.run()
self.validate_ti_states(dag_run, second_run_state, error_message)
if scheduler_job.processor_agent:
scheduler_job.processor_agent.end()
@conf_vars({("scheduler", "schedule_after_task_execution"): "True"})
def test_mini_scheduler_works_with_wait_for_upstream(self, caplog, get_test_dag):
dag = get_test_dag("test_dagrun_fast_follow")
dag.catchup = False
SerializedDagModel.write_dag(dag)
dr = dag.create_dagrun(run_id="test_1", state=State.RUNNING, execution_date=DEFAULT_DATE)
dr2 = dag.create_dagrun(
run_id="test_2", state=State.RUNNING, execution_date=DEFAULT_DATE + datetime.timedelta(hours=1)
)
task_k = dag.get_task("K")
task_l = dag.get_task("L")
with create_session() as session:
ti_k = TaskInstance(task_k, run_id=dr.run_id, state=State.SUCCESS)
ti_b = TaskInstance(task_l, run_id=dr.run_id, state=State.SUCCESS)
ti2_k = TaskInstance(task_k, run_id=dr2.run_id, state=State.NONE)
ti2_l = TaskInstance(task_l, run_id=dr2.run_id, state=State.NONE)
session.merge(ti_k)
session.merge(ti_b)
session.merge(ti2_k)
session.merge(ti2_l)
job1 = LocalTaskJob(task_instance=ti2_k, ignore_ti_state=True, executor=SequentialExecutor())
job1.task_runner = StandardTaskRunner(job1)
job1.run()
ti2_k.refresh_from_db()
ti2_l.refresh_from_db()
assert ti2_k.state == State.SUCCESS
assert ti2_l.state == State.NONE
failed_deps = list(ti2_l.get_failed_dep_statuses())
assert len(failed_deps) == 1
assert failed_deps[0].dep_name == "Previous Dagrun State"
assert not failed_deps[0].passed
@pytest.mark.quarantined
def test_process_sigterm_works_with_retries(self, caplog, dag_maker):
"""
Test that ensures that task runner sets tasks to retry when they(task runner)
receive sigterm
"""
# use shared memory value so we can properly track value change even if
# it's been updated across processes.
retry_callback_called = Value("i", 0)
def retry_callback(context):
with retry_callback_called.get_lock():
retry_callback_called.value += 1
assert context["dag_run"].dag_id == "test_mark_failure_2"
def task_function(ti):
while not ti.pid:
time.sleep(0.1)
os.kill(psutil.Process(os.getpid()).ppid(), signal.SIGTERM)
with dag_maker(dag_id="test_mark_failure_2"):
task = PythonOperator(
task_id="test_on_failure",
python_callable=task_function,
retries=1,
on_retry_callback=retry_callback,
)
dag_maker.create_dagrun()
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
settings.engine.dispose()
with timeout(10):
job1.run()
assert retry_callback_called.value == 1
assert "Received SIGTERM. Terminating subprocesses" in caplog.text
assert "Task exited with return code 143" in caplog.text
@pytest.fixture()
def clean_db_helper():
yield
db.clear_db_jobs()
db.clear_db_runs()
@pytest.mark.usefixtures("clean_db_helper")
@mock.patch("airflow.jobs.local_task_job.get_task_runner")
def test_number_of_queries_single_loop(mock_get_task_runner, dag_maker):
codes: list[int | None] = 9 * [None] + [0]
mock_get_task_runner.return_value.return_code.side_effects = [[0], codes]
unique_prefix = str(uuid.uuid4())
with dag_maker(dag_id=f"{unique_prefix}_test_number_of_queries"):
task = EmptyOperator(task_id="test_state_succeeded1")
dr = dag_maker.create_dagrun(run_id=unique_prefix, state=State.NONE)
ti = dr.task_instances[0]
ti.refresh_from_task(task)
job = LocalTaskJob(task_instance=ti, executor=MockExecutor())
with assert_queries_count(18):
job.run()
|
{
"content_hash": "6e5e71e6ee222f6a8c27b02173bcedb8",
"timestamp": "",
"source": "github",
"line_count": 816,
"max_line_length": 110,
"avg_line_length": 38.474264705882355,
"alnum_prop": 0.5953495779582736,
"repo_name": "apache/airflow",
"id": "ecfb750fd12f8360526ba841896aa41580ae74ef",
"size": "32182",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/jobs/test_local_task_job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
}
|
from collections import ChainMap
import os, argparse
defaults = {
'color': 'red',
'user': 'guest'
}
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user')
parser.add_argument('-c', '--color')
namespace = parser.parse_args()
command_line_args = {k: v for k, v in vars(namespace).items() if v}
combined = ChainMap(command_line_args, os.environ, defaults)
print('color=%s' % combined['color'])
print('user=%s' % combined['user'])
|
{
"content_hash": "acc43f9e5f0700c329bb363d30e934d3",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 67,
"avg_line_length": 26.705882352941178,
"alnum_prop": 0.6740088105726872,
"repo_name": "whyDK37/py_bootstrap",
"id": "1dc5035d2c787637df116b4185f043e573c98225",
"size": "502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/commonlib/use_chainmap.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16283"
}
],
"symlink_target": ""
}
|
import time
from datetime import datetime
from openerp.osv import osv
from openerp.report import report_sxw
from openerp.tools import amount_to_text_en
class payroll_advice_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(payroll_advice_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_month': self.get_month,
'convert': self.convert,
'get_detail': self.get_detail,
'get_bysal_total': self.get_bysal_total,
})
self.context = context
def get_month(self, input_date):
payslip_pool = self.pool.get('hr.payslip')
res = {
'from_name': '', 'to_name': ''
}
slip_ids = payslip_pool.search(self.cr, self.uid, [('date_from','<=',input_date), ('date_to','>=',input_date)], context=self.context)
if slip_ids:
slip = payslip_pool.browse(self.cr, self.uid, slip_ids, context=self.context)[0]
from_date = datetime.strptime(slip.date_from, '%Y-%m-%d')
to_date = datetime.strptime(slip.date_to, '%Y-%m-%d')
res['from_name']= from_date.strftime('%d')+'-'+from_date.strftime('%B')+'-'+from_date.strftime('%Y')
res['to_name']= to_date.strftime('%d')+'-'+to_date.strftime('%B')+'-'+to_date.strftime('%Y')
return res
def convert(self, amount, cur):
return amount_to_text_en.amount_to_text(amount, 'en', cur);
def get_bysal_total(self):
return self.total_bysal
def get_detail(self, line_ids):
result = []
self.total_bysal = 0.00
for l in line_ids:
res = {}
res.update({
'name': l.employee_id.name,
'acc_no': l.name,
'ifsc_code': l.ifsc_code,
'bysal': l.bysal,
'debit_credit': l.debit_credit,
})
self.total_bysal += l.bysal
result.append(res)
return result
class wrapped_report_payroll_advice(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_payrolladvice'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_payrolladvice'
_wrapped_report_class = payroll_advice_report
|
{
"content_hash": "f3b63d2772a1164ea3af4866c980476c",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 141,
"avg_line_length": 38.83606557377049,
"alnum_prop": 0.5555086534402701,
"repo_name": "vileopratama/vitech",
"id": "c045772f009421b23b962b1c89c02812af827b48",
"size": "2468",
"binary": false,
"copies": "48",
"ref": "refs/heads/master",
"path": "src/addons/l10n_in_hr_payroll/report/report_payroll_advice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.humanize.templatetags.humanize import naturaltime
from django.contrib.auth.models import User
from pytz import timezone
from time import gmtime, strftime
class Folder(models.Model):
color = models.TextField(max_length=20,blank=True)
user = models.ForeignKey(User)
title = models.TextField()
def as_json(self):
return dict(
id=self.id,
title=self.title,
color=self.color,
)
def __str__(self):
return self.title
class Subscription(models.Model):
last_crawled = models.CharField(max_length=200)
url = models.TextField()
site_url = models.TextField()
title = models.TextField()
favicon_url = models.TextField(blank=True)
def as_json(self):
return dict(
id=self.id,
url=self.url,
title=self.title,
favicon_url=self.favicon_url
)
def __str__(self):
return self.title
class SubscriptionUserRelation(models.Model):
user = models.ForeignKey(User)
folder = models.ForeignKey(Folder)
subscription = models.ForeignKey(Subscription)
def __str__(self):
return self.user.id
class SubscriptionItem(models.Model):
content = models.TextField()
published = models.DateTimeField()
title = models.TextField()
url = models.TextField()
thumbnail_url = models.TextField(blank=True)
thumbnail_processed = models.BooleanField(default=False)
is_read = models.BooleanField(default=False)
is_favorite = models.BooleanField(default=False)
subscription = models.ForeignKey(Subscription, related_name="item")
def as_json(self):
local_timezone = timezone("America/New_York")
return dict(
url=self.url,
title=self.title,
content=self.content,
content_short=self.content[:256] + "...",
published= str(self.published),
is_read=self.is_read,
is_favorite = self.is_favorite,
subscriptionTitle = self.subscription.title,
thumbnail_url = self.thumbnail_url
)
def __str__(self):
return self.title
|
{
"content_hash": "cb71614877a8508b1583b14bb3d4506c",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 69,
"avg_line_length": 26.213333333333335,
"alnum_prop": 0.7131230925737538,
"repo_name": "leemac/JellyfishRss",
"id": "ace4fbe97d6d8848c3ae592860aa780b9a4c0d23",
"size": "1966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rss/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47216"
},
{
"name": "JavaScript",
"bytes": "97149"
},
{
"name": "Python",
"bytes": "52311"
}
],
"symlink_target": ""
}
|
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404, redirect
from rapidsms.models import Contact, Connection
from contact.forms import NewContactForm, FreeSearchForm
from django.core.paginator import Paginator, InvalidPage
from django.http import Http404, HttpResponseRedirect
from rapidsms_httprouter.models import STATUS_CHOICES, DIRECTION_CHOICES, Message
from rapidsms.messages.outgoing import OutgoingMessage
from django.contrib.auth.decorators import login_required
from . import forms
from .forms import ReplyForm
from rapidsms_httprouter.router import get_router
from django.forms.util import ErrorList
def add_contact(request):
if request.method == "POST":
contact_form = NewContactForm(request.POST)
if contact_form.is_valid():
contact_form.save()
return HttpResponseRedirect("/contact/index/")
def new_contact(request):
new_contact_form = NewContactForm()
return render_to_response('contact/partials/new_contact.html', {'new_contact_form':new_contact_form})
@login_required
def view_message_history(request, connection_id):
"""
This view lists all (sms message) correspondence between
RapidSMS and a User
"""
direction_choices = DIRECTION_CHOICES
status_choices = STATUS_CHOICES
reply_form = ReplyForm()
connection = get_object_or_404(Connection, pk=connection_id)
if connection.contact:
messages = Message.objects.filter(connection__contact=connection.contact)
else:
messages = Message.objects.filter(connection=connection)
messages = messages.order_by('-date')
total_incoming = messages.filter(direction="I").count()
total_outgoing = messages.filter(direction="O").count()
latest_message = None
if total_incoming:
latest_message = messages.filter(direction="I").latest('date')
if request.method == 'POST':
reply_form = ReplyForm(request.POST)
if reply_form.is_valid():
if Connection.objects.filter(identity=reply_form.cleaned_data['recipient']).count():
text = reply_form.cleaned_data.get('message')
conn = Connection.objects.filter(identity=reply_form.cleaned_data['recipient'])[0]
in_response_to = reply_form.cleaned_data['in_response_to']
outgoing = OutgoingMessage(conn, text)
get_router().handle_outgoing(outgoing, in_response_to)
return redirect("/contact/%d/message_history/" % connection.pk)
else:
reply_form.errors.setdefault('short_description', ErrorList())
reply_form.errors['recipient'].append("This number isn't in the system")
return render_to_response("contact/message_history.html", {
"messages": messages,
"stats_latest_message": latest_message,
"stats_total_incoming": total_incoming,
"stats_total_outgoing": total_outgoing,
"connection": connection,
"direction_choices": direction_choices,
"status_choices": status_choices,
"replyForm": reply_form
}, context_instance=RequestContext(request))
|
{
"content_hash": "f8be1b03dcb47bc396f542a782e1222c",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 105,
"avg_line_length": 41.93421052631579,
"alnum_prop": 0.692187009727016,
"repo_name": "unicefuganda/edtrac",
"id": "a498b15bf1626c3b542b53ba819c04c8bd9522fd",
"size": "3187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edtrac_project/rapidsms_contact/contact/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "277434"
},
{
"name": "JavaScript",
"bytes": "190346"
},
{
"name": "Python",
"bytes": "2621572"
},
{
"name": "Shell",
"bytes": "4755"
}
],
"symlink_target": ""
}
|
from random import randint
exec(open('parse.py').read())
exec(open('interpret.py').read())
exec(open('optimize.py').read())
exec(open('machine.py').read())
Leaf = str
Node = dict
def freshStr():
return str(randint(0,10000000))
def compileExpression(env, e, heap):
if type(e) == Node:
for label in e:
children = e[label]
if label == 'Number':
n = children[0]
heap = heap + 1
return (['set ' + str(heap) + ' ' + str(n)], heap, heap)
pass # Complete 'True', 'False', 'Variable', 'Element', and 'Plus' cases for Problem #4.
def compileProgram(env, s, heap = ???): # Set initial heap default address.
if type(s) == Leaf:
if s == 'End':
return (env, [], heap)
if type(s) == Node:
for label in s:
children = s[label]
if label == 'Print':
[e, p] = children
(instsE, addr, heap) = compileExpression(env, e, heap)
(env, instsP, heap) = compileProgram(env, p, heap)
return (env, instsE + copy(addr, 5) + instsP, heap)
pass # Complete 'Assign' and 'Loop' cases for Problem #4.
def compile(s):
p = tokenizeAndParse(s)
# Add calls to type checking and optimization algorithms.
(env, insts, heap) = compileProgram({}, p)
return insts
def compileAndSimulate(s):
return simulate(compile(s))
#eof
|
{
"content_hash": "1b943e48ed874f3284bc977e63fe0d78",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 92,
"avg_line_length": 28.03921568627451,
"alnum_prop": 0.5545454545454546,
"repo_name": "lapets/pylium",
"id": "df4554f9b3d2dd557c422b8b39fa52b4e8a53990",
"size": "1774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "course/320-2015-fal/midterm/compile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "490"
},
{
"name": "Shell",
"bytes": "87"
}
],
"symlink_target": ""
}
|
"""Tests for constraints.py."""
import unittest
from compiler.front_end import attributes
from compiler.front_end import constraints
from compiler.front_end import glue
from compiler.front_end import test_util
from compiler.util import error
from compiler.util import ir_util
def _make_ir_from_emb(emb_text, name="m.emb"):
ir, unused_debug_info, errors = glue.parse_emboss_file(
name,
test_util.dict_file_reader({name: emb_text}),
stop_before_step="check_constraints")
assert not errors, repr(errors)
return ir
class ConstraintsTest(unittest.TestCase):
"""Tests constraints.check_constraints and helpers."""
def test_error_on_missing_inner_array_size(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+1] UInt:8[][1] one_byte\n")
error_array = ir.module[0].type[0].structure.field[0].type.array_type
self.assertEqual([[
error.error(
"m.emb",
error_array.base_type.array_type.element_count.source_location,
"Array dimensions can only be omitted for the outermost dimension.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_no_error_on_ok_array_size(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+1] UInt:8[1][1] one_byte\n")
self.assertEqual([], constraints.check_constraints(ir))
def test_no_error_on_ok_missing_outer_array_size(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+1] UInt:8[1][] one_byte\n")
self.assertEqual([], constraints.check_constraints(ir))
def test_no_error_on_dynamically_sized_struct_in_dynamically_sized_field(
self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+1] UInt size\n"
" 1 [+size] Bar bar\n"
"struct Bar:\n"
" 0 [+1] UInt size\n"
" 1 [+size] UInt:8[] payload\n")
self.assertEqual([], constraints.check_constraints(ir))
def test_no_error_on_dynamically_sized_struct_in_statically_sized_field(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+10] Bar bar\n"
"struct Bar:\n"
" 0 [+1] UInt size\n"
" 1 [+size] UInt:8[] payload\n")
self.assertEqual([], constraints.check_constraints(ir))
def test_no_error_non_fixed_size_outer_array_dimension(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+1] UInt size\n"
" 1 [+size] UInt:8[1][size-1] one_byte\n")
self.assertEqual([], constraints.check_constraints(ir))
def test_error_non_fixed_size_inner_array_dimension(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+1] UInt size\n"
" 1 [+size] UInt:8[size-1][1] one_byte\n")
error_array = ir.module[0].type[0].structure.field[1].type.array_type
self.assertEqual([[
error.error(
"m.emb",
error_array.base_type.array_type.element_count.source_location,
"Inner array dimensions must be constant.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_error_non_constant_inner_array_dimensions(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+1] Bar[1] one_byte\n"
# There is no dynamically-sized byte-oriented type in
# the Prelude, so this test has to make its own.
"external Bar:\n"
" [is_integer: true]\n"
" [addressable_unit_size: 8]\n")
error_array = ir.module[0].type[0].structure.field[0].type.array_type
self.assertEqual([[
error.error(
"m.emb", error_array.base_type.atomic_type.source_location,
"Array elements must be fixed size.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_error_dynamically_sized_array_elements(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+1] Bar[1] bar\n"
"struct Bar:\n"
" 0 [+1] UInt size\n"
" 1 [+size] UInt:8[] payload\n")
error_array = ir.module[0].type[0].structure.field[0].type.array_type
self.assertEqual([[
error.error(
"m.emb", error_array.base_type.atomic_type.source_location,
"Array elements must be fixed size.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_field_too_small_for_type(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+1] Bar bar\n"
"struct Bar:\n"
" 0 [+2] UInt value\n")
error_type = ir.module[0].type[0].structure.field[0].type
self.assertEqual([[
error.error(
"m.emb", error_type.source_location,
"Fixed-size type 'Bar' cannot be placed in field of size 8 bits; "
"requires 16 bits.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_dynamically_sized_field_always_too_small_for_type(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+1] bits:\n"
" 0 [+1] UInt x\n"
" 0 [+x] Bar bar\n"
"struct Bar:\n"
" 0 [+2] UInt value\n")
error_type = ir.module[0].type[0].structure.field[2].type
self.assertEqual([[
error.error(
"m.emb", error_type.source_location,
"Field of maximum size 8 bits cannot hold fixed-size type 'Bar', "
"which requires 16 bits.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_struct_field_too_big_for_type(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+2] Byte double_byte\n"
"struct Byte:\n"
" 0 [+1] UInt b\n")
error_type = ir.module[0].type[0].structure.field[0].type
self.assertEqual([[
error.error(
"m.emb", error_type.source_location,
"Fixed-size type 'Byte' cannot be placed in field of size 16 bits; "
"requires 8 bits.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_bits_field_too_big_for_type(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+9] UInt uint72\n"
' [byte_order: "LittleEndian"]\n')
error_field = ir.module[0].type[0].structure.field[0]
uint_type = ir_util.find_object(error_field.type.atomic_type.reference, ir)
uint_requirements = ir_util.get_attribute(uint_type.attribute,
attributes.STATIC_REQUIREMENTS)
self.assertEqual([[
error.error("m.emb", error_field.source_location,
"Requirements of UInt not met."),
error.note("", uint_requirements.source_location,
"Requirements specified here."),
]], error.filter_errors(constraints.check_constraints(ir)))
def test_field_type_not_allowed_in_bits(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"bits Foo:\n"
" 0 [+16] Bar bar\n"
"external Bar:\n"
" [addressable_unit_size: 8]\n")
error_type = ir.module[0].type[0].structure.field[0].type
self.assertEqual([[
error.error(
"m.emb", error_type.source_location,
"Byte-oriented type 'Bar' cannot be used in a bits field.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_arrays_allowed_in_bits(self):
ir = _make_ir_from_emb("bits Foo:\n"
" 0 [+16] Flag[16] bar\n")
self.assertEqual([], constraints.check_constraints(ir))
def test_oversized_anonymous_bit_field(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+4] bits:\n"
" 0 [+8] UInt field\n")
self.assertEqual([], constraints.check_constraints(ir))
def test_undersized_anonymous_bit_field(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+1] bits:\n"
" 0 [+32] UInt field\n")
error_type = ir.module[0].type[0].structure.field[0].type
self.assertEqual([[
error.error(
"m.emb", error_type.source_location,
"Fixed-size anonymous type cannot be placed in field of size 8 "
"bits; requires 32 bits.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_reserved_field_name(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+8] UInt restrict\n")
error_name = ir.module[0].type[0].structure.field[0].name.name
self.assertEqual([[
error.error(
"m.emb", error_name.source_location,
"C reserved word may not be used as a field name.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_reserved_type_name(self):
ir = _make_ir_from_emb("struct False:\n"
" 0 [+1] UInt foo\n")
error_name = ir.module[0].type[0].name.name
self.assertEqual([[
error.error(
"m.emb", error_name.source_location,
"Python 3 reserved word may not be used as a type name.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_reserved_enum_name(self):
ir = _make_ir_from_emb("enum Foo:\n"
" NULL = 1\n")
error_name = ir.module[0].type[0].enumeration.value[0].name.name
self.assertEqual([[
error.error(
"m.emb", error_name.source_location,
"C reserved word may not be used as an enum name.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_bits_type_in_struct_array(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+10] UInt:8[10] array\n")
self.assertEqual([], constraints.check_constraints(ir))
def test_bits_type_in_bits_array(self):
ir = _make_ir_from_emb("bits Foo:\n"
" 0 [+10] UInt:8[10] array\n")
self.assertEqual([], constraints.check_constraints(ir))
def test_explicit_size_too_small(self):
ir = _make_ir_from_emb("bits Foo:\n"
" 0 [+0] UInt:0 zero_bit\n")
error_field = ir.module[0].type[0].structure.field[0]
uint_type = ir_util.find_object(error_field.type.atomic_type.reference, ir)
uint_requirements = ir_util.get_attribute(uint_type.attribute,
attributes.STATIC_REQUIREMENTS)
self.assertEqual([[
error.error("m.emb", error_field.source_location,
"Requirements of UInt not met."),
error.note("", uint_requirements.source_location,
"Requirements specified here."),
]], error.filter_errors(constraints.check_constraints(ir)))
def test_explicit_enumeration_size_too_small(self):
ir = _make_ir_from_emb('[$default byte_order: "BigEndian"]\n'
"bits Foo:\n"
" 0 [+0] Bar:0 zero_bit\n"
"enum Bar:\n"
" BAZ = 0\n")
error_type = ir.module[0].type[0].structure.field[0].type
self.assertEqual([[
error.error("m.emb", error_type.source_location,
"Enumeration type 'Bar' cannot be 0 bits; type 'Bar' "
"must be between 1 and 64 bits, inclusive."),
]], error.filter_errors(constraints.check_constraints(ir)))
def test_explicit_size_too_big_for_field(self):
ir = _make_ir_from_emb("bits Foo:\n"
" 0 [+8] UInt:32 thirty_two_bit\n")
error_type = ir.module[0].type[0].structure.field[0].type
self.assertEqual([[
error.error(
"m.emb", error_type.source_location,
"Fixed-size type 'UInt:32' cannot be placed in field of size 8 "
"bits; requires 32 bits.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_explicit_size_too_small_for_field(self):
ir = _make_ir_from_emb("bits Foo:\n"
" 0 [+64] UInt:32 thirty_two_bit\n")
error_type = ir.module[0].type[0].structure.field[0].type
self.assertEqual([[
error.error("m.emb", error_type.source_location,
"Fixed-size type 'UInt:32' cannot be placed in field of "
"size 64 bits; requires 32 bits.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_explicit_size_too_big(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+16] UInt:128 one_twenty_eight_bit\n"
' [byte_order: "LittleEndian"]\n')
error_field = ir.module[0].type[0].structure.field[0]
uint_type = ir_util.find_object(error_field.type.atomic_type.reference, ir)
uint_requirements = ir_util.get_attribute(uint_type.attribute,
attributes.STATIC_REQUIREMENTS)
self.assertEqual([[
error.error("m.emb", error_field.source_location,
"Requirements of UInt not met."),
error.note("", uint_requirements.source_location,
"Requirements specified here."),
]], error.filter_errors(constraints.check_constraints(ir)))
def test_explicit_enumeration_size_too_big(self):
ir = _make_ir_from_emb('[$default byte_order: "BigEndian"]\n'
"struct Foo:\n"
" 0 [+9] Bar seventy_two_bit\n"
"enum Bar:\n"
" BAZ = 0\n")
error_type = ir.module[0].type[0].structure.field[0].type
self.assertEqual([[
error.error("m.emb", error_type.source_location,
"Enumeration type 'Bar' cannot be 72 bits; type 'Bar' " +
"must be between 1 and 64 bits, inclusive."),
]], error.filter_errors(constraints.check_constraints(ir)))
def test_explicit_enumeration_size_too_big_for_small_enum(self):
ir = _make_ir_from_emb('[$default byte_order: "BigEndian"]\n'
"struct Foo:\n"
" 0 [+8] Bar sixty_four_bit\n"
"enum Bar:\n"
" [maximum_bits: 63]\n"
" BAZ = 0\n")
error_type = ir.module[0].type[0].structure.field[0].type
self.assertEqual([[
error.error("m.emb", error_type.source_location,
"Enumeration type 'Bar' cannot be 64 bits; type 'Bar' " +
"must be between 1 and 63 bits, inclusive."),
]], error.filter_errors(constraints.check_constraints(ir)))
def test_explicit_size_on_fixed_size_type(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+1] Byte:8 one_byte\n"
"struct Byte:\n"
" 0 [+1] UInt b\n")
self.assertEqual([], constraints.check_constraints(ir))
def test_explicit_size_too_small_on_fixed_size_type(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+0] Byte:0 null_byte\n"
"struct Byte:\n"
" 0 [+1] UInt b\n")
error_type = ir.module[0].type[0].structure.field[0].type
self.assertEqual([[
error.error(
"m.emb", error_type.size_in_bits.source_location,
"Explicit size of 0 bits does not match fixed size (8 bits) of "
"type 'Byte'."),
error.note("m.emb", ir.module[0].type[1].source_location,
"Size specified here."),
]], error.filter_errors(constraints.check_constraints(ir)))
def test_explicit_size_too_big_on_fixed_size_type(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+2] Byte:16 double_byte\n"
"struct Byte:\n"
" 0 [+1] UInt b\n")
error_type = ir.module[0].type[0].structure.field[0].type
self.assertEqual([[
error.error(
"m.emb", error_type.size_in_bits.source_location,
"Explicit size of 16 bits does not match fixed size (8 bits) of "
"type 'Byte'."),
error.note(
"m.emb", ir.module[0].type[1].source_location,
"Size specified here."),
]], error.filter_errors(constraints.check_constraints(ir)))
def test_explicit_size_ignored_on_variable_size_type(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+1] UInt n\n"
" 1 [+n] UInt:8[] d\n"
"struct Bar:\n"
" 0 [+10] Foo:80 foo\n")
self.assertEqual([], constraints.check_constraints(ir))
def test_fixed_size_type_in_dynamically_sized_field(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+1] UInt bar\n"
" 0 [+bar] Byte one_byte\n"
"struct Byte:\n"
" 0 [+1] UInt b\n")
self.assertEqual([], constraints.check_constraints(ir))
def test_enum_in_dynamically_sized_field(self):
ir = _make_ir_from_emb('[$default byte_order: "BigEndian"]\n'
"struct Foo:\n"
" 0 [+1] UInt bar\n"
" 0 [+bar] Baz baz\n"
"enum Baz:\n"
" QUX = 0\n")
error_type = ir.module[0].type[0].structure.field[1].type
self.assertEqual(
[[
error.error("m.emb", error_type.source_location,
"Enumeration type 'Baz' cannot be placed in a "
"dynamically-sized field.")
]],
error.filter_errors(constraints.check_constraints(ir)))
def test_enum_value_too_high(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"enum Foo:\n"
" HIGH = 0x1_0000_0000_0000_0000\n")
error_value = ir.module[0].type[0].enumeration.value[0].value
self.assertEqual([
[error.error(
"m.emb", error_value.source_location,
# TODO(bolms): Try to print numbers like 2**64 in hex? (I.e., if a
# number is a round number in hex, but not in decimal, print in
# hex?)
"Value 18446744073709551616 is out of range for 64-bit unsigned " +
"enumeration.")]
], constraints.check_constraints(ir))
def test_enum_value_too_low(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"enum Foo:\n"
" LOW = -0x8000_0000_0000_0001\n")
error_value = ir.module[0].type[0].enumeration.value[0].value
self.assertEqual([
[error.error(
"m.emb", error_value.source_location,
"Value -9223372036854775809 is out of range for 64-bit signed " +
"enumeration.")]
], constraints.check_constraints(ir))
def test_enum_value_too_wide(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"enum Foo:\n"
" LOW = -1\n"
" HIGH = 0x8000_0000_0000_0000\n")
error_value = ir.module[0].type[0].enumeration.value[1].value
self.assertEqual([[
error.error(
"m.emb", error_value.source_location,
"Value 9223372036854775808 is out of range for 64-bit signed " +
"enumeration.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_enum_value_too_wide_unsigned_error_message(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"enum Foo:\n"
" LOW = -2\n"
" LOW2 = -1\n"
" HIGH = 0x8000_0000_0000_0000\n")
error_value = ir.module[0].type[0].enumeration.value[2].value
self.assertEqual([[
error.error(
"m.emb", error_value.source_location,
"Value 9223372036854775808 is out of range for 64-bit signed " +
"enumeration.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_enum_value_too_wide_small_size_error_message(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"enum Foo:\n"
" [maximum_bits: 8]\n"
" HIGH = 0x100\n")
error_value = ir.module[0].type[0].enumeration.value[0].value
self.assertEqual([[
error.error(
"m.emb", error_value.source_location,
"Value 256 is out of range for 8-bit unsigned enumeration.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_enum_value_too_wide_small_size_signed_error_message(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"enum Foo:\n"
" [maximum_bits: 8]\n"
" [is_signed: true]\n"
" HIGH = 0x80\n")
error_value = ir.module[0].type[0].enumeration.value[0].value
self.assertEqual([[
error.error(
"m.emb", error_value.source_location,
"Value 128 is out of range for 8-bit signed enumeration.")
]], error.filter_errors(constraints.check_constraints(ir)))
def test_enum_value_too_wide_multiple(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"enum Foo:\n"
" LOW = -2\n"
" LOW2 = -1\n"
" HIGH = 0x8000_0000_0000_0000\n"
" HIGH2 = 0x8000_0000_0000_0001\n")
error_value = ir.module[0].type[0].enumeration.value[2].value
error_value2 = ir.module[0].type[0].enumeration.value[3].value
self.assertEqual([
[error.error(
"m.emb", error_value.source_location,
"Value 9223372036854775808 is out of range for 64-bit signed " +
"enumeration.")],
[error.error(
"m.emb", error_value2.source_location,
"Value 9223372036854775809 is out of range for 64-bit signed " +
"enumeration.")]
], error.filter_errors(constraints.check_constraints(ir)))
def test_enum_value_too_wide_multiple_signed_error_message(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"enum Foo:\n"
" LOW = -3\n"
" LOW2 = -2\n"
" LOW3 = -1\n"
" HIGH = 0x8000_0000_0000_0000\n"
" HIGH2 = 0x8000_0000_0000_0001\n")
error_value = ir.module[0].type[0].enumeration.value[3].value
error_value2 = ir.module[0].type[0].enumeration.value[4].value
self.assertEqual([
[error.error(
"m.emb", error_value.source_location,
"Value 9223372036854775808 is out of range for 64-bit signed "
"enumeration.")],
[error.error(
"m.emb", error_value2.source_location,
"Value 9223372036854775809 is out of range for 64-bit signed "
"enumeration.")]
], error.filter_errors(constraints.check_constraints(ir)))
def test_enum_value_mixed_error_message(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"enum Foo:\n"
" LOW = -1\n"
" HIGH = 0x8000_0000_0000_0000\n"
" HIGH2 = 0x1_0000_0000_0000_0000\n")
error_value1 = ir.module[0].type[0].enumeration.value[1].value
error_value2 = ir.module[0].type[0].enumeration.value[2].value
self.assertEqual([
[error.error(
"m.emb", error_value1.source_location,
"Value 9223372036854775808 is out of range for 64-bit signed " +
"enumeration.")],
[error.error(
"m.emb", error_value2.source_location,
"Value 18446744073709551616 is out of range for 64-bit signed " +
"enumeration.")]
], error.filter_errors(constraints.check_constraints(ir)))
def test_enum_value_explicitly_signed_error_message(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"enum Foo:\n"
" [is_signed: true]\n"
" HIGH = 0x8000_0000_0000_0000\n"
" HIGH2 = 0x1_0000_0000_0000_0000\n")
error_value0 = ir.module[0].type[0].enumeration.value[0].value
error_value1 = ir.module[0].type[0].enumeration.value[1].value
self.assertEqual([
[error.error(
"m.emb", error_value0.source_location,
"Value 9223372036854775808 is out of range for 64-bit signed " +
"enumeration.")],
[error.error(
"m.emb", error_value1.source_location,
"Value 18446744073709551616 is out of range for 64-bit signed " +
"enumeration.")]
], error.filter_errors(constraints.check_constraints(ir)))
def test_enum_value_explicitly_unsigned_error_message(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"enum Foo:\n"
" [is_signed: false]\n"
" LOW = -1\n"
" HIGH = 0x8000_0000_0000_0000\n"
" HIGH2 = 0x1_0000_0000_0000_0000\n")
error_value0 = ir.module[0].type[0].enumeration.value[0].value
error_value2 = ir.module[0].type[0].enumeration.value[2].value
self.assertEqual([
[error.error(
"m.emb", error_value0.source_location,
"Value -1 is out of range for 64-bit unsigned enumeration.")],
[error.error(
"m.emb", error_value2.source_location,
"Value 18446744073709551616 is out of range for 64-bit unsigned " +
"enumeration.")]
], error.filter_errors(constraints.check_constraints(ir)))
def test_explicit_non_byte_size_array_element(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+2] UInt:4[4] nibbles\n")
error_type = ir.module[0].type[0].structure.field[0].type.array_type
self.assertEqual([
[error.error(
"m.emb", error_type.base_type.source_location,
"Array elements in structs must have sizes which are a multiple of "
"8 bits.")]
], error.filter_errors(constraints.check_constraints(ir)))
def test_implicit_non_byte_size_array_element(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"bits Nibble:\n"
" 0 [+4] UInt nibble\n"
"struct Foo:\n"
" 0 [+2] Nibble[4] nibbles\n")
error_type = ir.module[0].type[1].structure.field[0].type.array_type
self.assertEqual([
[error.error(
"m.emb", error_type.base_type.source_location,
"Array elements in structs must have sizes which are a multiple of "
"8 bits.")]
], error.filter_errors(constraints.check_constraints(ir)))
def test_bits_must_be_fixed_size(self):
ir = _make_ir_from_emb("bits Dynamic:\n"
" 0 [+3] UInt x\n"
" 3 [+3 * x] UInt:3[x] a\n")
error_type = ir.module[0].type[0]
self.assertEqual([
[error.error("m.emb", error_type.source_location,
"`bits` types must be fixed size.")]
], error.filter_errors(constraints.check_constraints(ir)))
def test_bits_must_be_small(self):
ir = _make_ir_from_emb("bits Big:\n"
" 0 [+64] UInt x\n"
" 64 [+1] UInt y\n")
error_type = ir.module[0].type[0]
self.assertEqual([
[error.error("m.emb", error_type.source_location,
"`bits` types must be 64 bits or smaller.")]
], error.filter_errors(constraints.check_constraints(ir)))
def test_constant_expressions_must_be_small(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+8] UInt x\n"
" if x < 0x1_0000_0000_0000_0000:\n"
" 8 [+1] UInt y\n")
condition = ir.module[0].type[0].structure.field[1].existence_condition
error_location = condition.function.args[1].source_location
self.assertEqual([
[error.error(
"m.emb", error_location,
"Constant value {} of expression cannot fit in a 64-bit signed or "
"unsigned integer.".format(2**64))]
], error.filter_errors(constraints.check_constraints(ir)))
def test_variable_expression_out_of_range_for_uint64(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+8] UInt x\n"
" if x + 1 < 0xffff_ffff_ffff_ffff:\n"
" 8 [+1] UInt y\n")
condition = ir.module[0].type[0].structure.field[1].existence_condition
error_location = condition.function.args[0].source_location
self.assertEqual([
[error.error(
"m.emb", error_location,
"Potential range of expression is {} to {}, which cannot fit in a "
"64-bit signed or unsigned integer.".format(1, 2**64))]
], error.filter_errors(constraints.check_constraints(ir)))
def test_variable_expression_out_of_range_for_int64(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+8] UInt x\n"
" if x - 0x8000_0000_0000_0001 < 0:\n"
" 8 [+1] UInt y\n")
condition = ir.module[0].type[0].structure.field[1].existence_condition
error_location = condition.function.args[0].source_location
self.assertEqual([
[error.error(
"m.emb", error_location,
"Potential range of expression is {} to {}, which cannot fit in a "
"64-bit signed or unsigned integer.".format(-(2**63) - 1,
2**63 - 2))]
], error.filter_errors(constraints.check_constraints(ir)))
def test_requires_expression_out_of_range_for_uint64(self):
ir = _make_ir_from_emb('[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+8] UInt x\n"
" [requires: this * 2 < 0x1_0000]\n")
attribute_list = ir.module[0].type[0].structure.field[0].attribute
error_arg = attribute_list[0].value.expression.function.args[0]
error_location = error_arg.source_location
self.assertEqual(
[[
error.error(
"m.emb", error_location,
"Potential range of expression is {} to {}, which cannot fit "
"in a 64-bit signed or unsigned integer.".format(0, 2**65-2))
]],
error.filter_errors(constraints.check_constraints(ir)))
def test_arguments_require_different_signedness_64_bits(self):
ir = _make_ir_from_emb(
'[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+1] UInt x\n"
# Left side requires uint64, right side requires int64.
" if (x + 0x8000_0000_0000_0000) + (x - 0x7fff_ffff_ffff_ffff) < 10:\n"
" 1 [+1] UInt y\n")
condition = ir.module[0].type[0].structure.field[1].existence_condition
error_expression = condition.function.args[0]
error_location = error_expression.source_location
arg0_location = error_expression.function.args[0].source_location
arg1_location = error_expression.function.args[1].source_location
self.assertEqual([
[error.error(
"m.emb", error_location,
"Either all arguments to '+' and its result must fit in a 64-bit "
"unsigned integer, or all must fit in a 64-bit signed integer."),
error.note("m.emb", arg0_location,
"Requires unsigned 64-bit integer."),
error.note("m.emb", arg1_location,
"Requires signed 64-bit integer.")]
], error.filter_errors(constraints.check_constraints(ir)))
def test_return_value_requires_different_signedness_from_arguments(self):
ir = _make_ir_from_emb(
'[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+1] UInt x\n"
# Both arguments require uint64; result fits in int64.
" if (x + 0x7fff_ffff_ffff_ffff) - 0x8000_0000_0000_0000 < 10:\n"
" 1 [+1] UInt y\n")
condition = ir.module[0].type[0].structure.field[1].existence_condition
error_expression = condition.function.args[0]
error_location = error_expression.source_location
arg0_location = error_expression.function.args[0].source_location
arg1_location = error_expression.function.args[1].source_location
self.assertEqual([
[error.error(
"m.emb", error_location,
"Either all arguments to '-' and its result must fit in a 64-bit "
"unsigned integer, or all must fit in a 64-bit signed integer."),
error.note("m.emb", arg0_location,
"Requires unsigned 64-bit integer."),
error.note("m.emb", arg1_location,
"Requires unsigned 64-bit integer."),
error.note("m.emb", error_location,
"Requires signed 64-bit integer.")]
], error.filter_errors(constraints.check_constraints(ir)))
def test_return_value_requires_different_signedness_from_one_argument(self):
ir = _make_ir_from_emb(
'[$default byte_order: "LittleEndian"]\n'
"struct Foo:\n"
" 0 [+1] UInt x\n"
# One argument requires uint64; result fits in int64.
" if (x + 0x7fff_ffff_ffff_fff0) - 0x7fff_ffff_ffff_ffff < 10:\n"
" 1 [+1] UInt y\n")
condition = ir.module[0].type[0].structure.field[1].existence_condition
error_expression = condition.function.args[0]
error_location = error_expression.source_location
arg0_location = error_expression.function.args[0].source_location
self.assertEqual([
[error.error(
"m.emb", error_location,
"Either all arguments to '-' and its result must fit in a 64-bit "
"unsigned integer, or all must fit in a 64-bit signed integer."),
error.note("m.emb", arg0_location,
"Requires unsigned 64-bit integer."),
error.note("m.emb", error_location,
"Requires signed 64-bit integer.")]
], error.filter_errors(constraints.check_constraints(ir)))
def test_checks_constancy_of_constant_references(self):
ir = _make_ir_from_emb("struct Foo:\n"
" 0 [+1] UInt x\n"
" let y = x\n"
" let z = Foo.y\n")
error_expression = ir.module[0].type[0].structure.field[2].read_transform
error_location = error_expression.source_location
note_field = ir.module[0].type[0].structure.field[1]
note_location = note_field.source_location
self.assertEqual([
[error.error("m.emb", error_location,
"Static references must refer to constants."),
error.note("m.emb", note_location, "y is not constant.")]
], error.filter_errors(constraints.check_constraints(ir)))
def test_checks_for_explicit_size_on_parameters(self):
ir = _make_ir_from_emb("struct Foo(y: UInt):\n"
" 0 [+1] UInt x\n")
error_parameter = ir.module[0].type[0].runtime_parameter[0]
error_location = error_parameter.physical_type_alias.source_location
self.assertEqual(
[[error.error("m.emb", error_location,
"Integer range of parameter must not be unbounded; it "
"must fit in a 64-bit signed or unsigned integer.")]],
error.filter_errors(constraints.check_constraints(ir)))
def test_checks_for_correct_explicit_size_on_parameters(self):
ir = _make_ir_from_emb("struct Foo(y: UInt:300):\n"
" 0 [+1] UInt x\n")
error_parameter = ir.module[0].type[0].runtime_parameter[0]
error_location = error_parameter.physical_type_alias.source_location
self.assertEqual(
[[error.error("m.emb", error_location,
"Potential range of parameter is 0 to {}, which cannot "
"fit in a 64-bit signed or unsigned integer.".format(
2**300-1))]],
error.filter_errors(constraints.check_constraints(ir)))
def test_checks_for_explicit_enum_size_on_parameters(self):
ir = _make_ir_from_emb("struct Foo(y: Bar:8):\n"
" 0 [+1] UInt x\n"
"enum Bar:\n"
" QUX = 1\n")
error_parameter = ir.module[0].type[0].runtime_parameter[0]
error_size = error_parameter.physical_type_alias.size_in_bits
error_location = error_size.source_location
self.assertEqual(
[[error.error(
"m.emb", error_location,
"Parameters with enum type may not have explicit size.")]],
error.filter_errors(constraints.check_constraints(ir)))
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "b20a91cdce15e95b2743a16bd90fc96f",
"timestamp": "",
"source": "github",
"line_count": 822,
"max_line_length": 80,
"avg_line_length": 47.42214111922141,
"alnum_prop": 0.5419050306559606,
"repo_name": "google/emboss",
"id": "9bf5a8e6cc51a55d5a88a8ee5262f5bf40494224",
"size": "39557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compiler/front_end/constraints_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1250"
},
{
"name": "C++",
"bytes": "682536"
},
{
"name": "Python",
"bytes": "954487"
},
{
"name": "Starlark",
"bytes": "33302"
},
{
"name": "Vim Script",
"bytes": "5463"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
import cv2
import numpy as np
class MultilayerConvolutionalNetwork:
"""
This class manages the deep neural network
that will be used by the agent to learn
and extrapolate the state space
"""
def __init__(self, input_width, input_height, nimages, nchannels):
self.session = tf.InteractiveSession()
self.input_width = input_width
self.input_height = input_height
self.nimages = nimages
self.nchannels = nchannels
self.a = tf.placeholder("float", [None, self.nchannels])
self.y = tf.placeholder("float", [None])
self.input_image, self.y_conv, self.h_fc1, self.train_step = self.build_network()
self.session.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
def weight_variable(self, shape, stddev = 0.01):
"""
Initialize weight with slight amount of noise to
break symmetry and prevent zero gradients
"""
initial = tf.truncated_normal(shape, stddev = stddev)
return tf.Variable(initial)
def bias_variable(self, shape, value = 0.01):
"""
Initialize ReLU neurons with slight positive initial
bias to avoid dead neurons
"""
initial = tf.constant(value, shape=shape)
return tf.Variable(initial)
def conv2d(self, x, W, stride = 1):
"""
We use a stride size of 1 and zero padded convolutions
to ensure we get the same output size as it was our input
"""
return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = "SAME")
def max_pool_2x2(self, x):
"""
Our pooling is plain old max pooling over 2x2 blocks
"""
return tf.nn.max_pool(x, ksize = [1, 2, 2, 1],
strides = [1, 2, 2, 1], padding = "SAME")
def build_weights_biases(self, weights_shape):
"""
Build the weights and bias of a convolutional layer
"""
return self.weight_variable(weights_shape), \
self.bias_variable(weights_shape[-1:])
def convolve_relu_pool(self, nn_input, weights_shape, stride = 4, pool = True):
"""
Convolve the input to the network with the weight tensor,
add the bias, apply the ReLU function and finally max pool
"""
W_conv, b_conv = self.build_weights_biases(weights_shape)
h_conv = tf.nn.relu(self.conv2d(nn_input, W_conv, stride) + b_conv)
if not pool:
return h_conv
return self.max_pool_2x2(h_conv)
def build_network(self):
"""
Sets up the deep neural network
"""
# the input is going to be reshaped to a
# 80x80 color image (4 channels)
input_image = tf.placeholder("float", [None, self.input_width,
self.input_height, self.nimages])
# create the first convolutional layers
h_pool1 = self.convolve_relu_pool(input_image, [8, 8, self.nimages, 32])
h_conv2 = self.convolve_relu_pool(h_pool1, [4, 4, 32, 64], 2, False)
h_conv3 = self.convolve_relu_pool(h_conv2, [3, 3, 64, 64], 1, False)
# create the densely connected layers
W_fc1, b_fc1 = self.build_weights_biases([5 * 5 * 64, 512])
h_conv3_flat = tf.reshape(h_conv3, [-1, 5 * 5 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)
# finally add the readout layer
W_fc2, b_fc2 = self.build_weights_biases([512, self.nchannels])
readout = tf.matmul(h_fc1, W_fc2) + b_fc2
readout_action = tf.reduce_sum(tf.mul(readout, self.a), reduction_indices=1)
cost_function = tf.reduce_mean(tf.square(self.y - readout_action))
train_step = tf.train.AdamOptimizer(1e-8).minimize(cost_function)
return input_image, readout, h_fc1, train_step
def train(self, value_batch, action_batch, state_batch):
"""
Does the actual training step
"""
self.train_step.run(feed_dict = {
self.y : value_batch,
self.a : action_batch,
self.input_image : state_batch
})
def save_variables(self, a_file, h_file, stack):
"""
Saves neural network weight variables for
debugging purposes
"""
readout_t = self.readout_act(stack)
a_file.write(",".join([str(x) for x in readout_t]) + '\n')
h_file.write(",".join([str(x) for x in self.h_fc1.eval(
feed_dict={self.input_image:[stack]})[0]]) + '\n')
def save_percepts(self, path, x_t1):
"""
Saves an image array to visualize
how the image is compressed before saving
"""
cv2.imwrite(path, np.rot90(x_t1))
def save_network(self, directory, iteration):
"""
Saves the progress of the agent
for further use later on
"""
self.saver.save(self.session, directory + '/network', global_step = iteration)
def attempt_restore(self, directory):
"""
Restors the latest file saved if
available
"""
checkpoint = tf.train.get_checkpoint_state(directory)
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.session, checkpoint.model_checkpoint_path)
return checkpoint.model_checkpoint_path
def preprocess_percepts(self, x_t1_colored, reshape = True):
"""
The raw image arrays get shrunk down and
remove any color whatsoever. Also gets it in
3 dimensions if needed
"""
x_t1_resized = cv2.resize(x_t1_colored, (self.input_width, self.input_height))
x_t1_greyscale = cv2.cvtColor(x_t1_resized, cv2.COLOR_BGR2GRAY)
ret, x_t1 = cv2.threshold(x_t1_greyscale, 1, 255, cv2.THRESH_BINARY)
"""
import time
timestamp = int(time.time())
cv2.imwrite("percepts/%d-color.png" % timestamp,
np.rot90(x_t1_colored))
cv2.imwrite("percepts/%d-resized.png" % timestamp,
np.rot90(x_t1_resized))
cv2.imwrite("percepts/%d-greyscale.png" % timestamp,
np.rot90(x_t1_greyscale))
cv2.imwrite("percepts/%d-bandw.png" % timestamp,
np.rot90(x_t1))
"""
if not reshape:
return x_t1
return np.reshape(x_t1, (80, 80, 1))
def readout_act(self, stack):
"""
Gets the best action
for a given stack of images
"""
stack = [stack] if hasattr(stack, 'shape') and len(stack.shape) == 3 else stack
return self.y_conv.eval(feed_dict = {self.input_image: stack})
def select_best_action(self, stack):
"""
Selects the action with the
highest value
"""
return np.argmax(self.readout_act(stack))
def main():
print('This module should be imported')
pass
if __name__ == "__main__":
main()
|
{
"content_hash": "585b3c002276d8c1ff107cfa9279b227",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 89,
"avg_line_length": 36.544502617801044,
"alnum_prop": 0.5851002865329513,
"repo_name": "mimoralea/king-pong",
"id": "bc2a152002e92e8d55e206713a851690baa13195",
"size": "6980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multicnet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34040"
}
],
"symlink_target": ""
}
|
"""
Flask-Snooze
--------------
Backend-agnostic REST API provider for Flask.
Links
`````
* `development version
<http://github.com/ahri/flask-snooze>`_
"""
from setuptools import setup
setup(
name='Flask-Snooze',
version='0.1.6',
url='http://github.com/ahri/flask-snooze',
license='MIT',
author='Adam Piper',
author_email='adam@ahri.net',
description='Backend agnostic REST API provider for Flask',
long_description=__doc__,
py_modules=['flask_snooze'],
test_suite="nose.collector",
zip_safe=False,
platforms='any',
include_package_data=True,
install_requires=[
'Flask>=0.8',
],
tests_require=[
'Flask-Testing>=0.3',
'nose>=1.1.2',
'Flask-SQLAlchemy>=0.16',
'SQLAlchemy>=0.7.8',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
{
"content_hash": "3a053fc2f689894fdfb59b27b3ca922f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 70,
"avg_line_length": 24.897959183673468,
"alnum_prop": 0.589344262295082,
"repo_name": "ahri/flask-snooze",
"id": "19b3a114fb42e3e9b2e9580964c5ccdfaf5ec4fa",
"size": "1220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31783"
}
],
"symlink_target": ""
}
|
"""
"""
# Python 2.6 and newer support
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.builtins import (
bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super,
filter, map, zip)
try:
unicode()
except NameError:
unicode = str
import sys
__python_version__ = dict()
try:
__python_version__['major'] = sys.version_info.major
except AttributeError:
__python_version__['major'] = sys.version_info[0]
try:
__python_version__['minor'] = sys.version_info.minor
except AttributeError:
__python_version__['minor'] = sys.version_info[1]
import time
import multiprocessing
from collections import defaultdict
from ..error import DeviceTimeoutError
from ..dictattraccessor import DictAttrAccessor
def receive_proc(return_queue, io, count):
# the return queue should never be empty after execution!
try:
return_queue.put_nowait(io.read(count))
except Exception as e:
return_queue.put_nowait(e)
class Device(object):
"""
"""
DEFAULT_ENCODING = 'ascii'
DEFAULT_RECV_COUNT = -1
SEND_TERMINATION = '\n'
def __init__(self, uuid=None, name=None, make=None, model=None, version=None, timeout=None):
object.__init__(self)
self._stdio = None
self.timeout = timeout
self._uuid = uuid
self.name = name
self.make = make
self.model = model
self.version = version
self.state = DictAttrAccessor(dict_=defaultdict(type(None)))
def send(self, message, encoding=DEFAULT_ENCODING):
"""
"""
# TODO: store termination in bytes, then add to encoded message right before send.
if not message.endswith(self.SEND_TERMINATION):
message += self.SEND_TERMINATION
if encoding:
self.stdio.write(message.encode(encoding))
else:
self.stdio.write(message)
def receive(self, count=DEFAULT_RECV_COUNT, encoding=DEFAULT_ENCODING):
"""
"""
# TODO: multiprocess approach will pickle io objects... The goal is to use a Manager instead in final implementation.
receive_queue = multiprocessing.Queue()
t_reader = multiprocessing.Process(target=receive_proc, args=(receive_queue, self.stdio, count))
t_reader.start()
t_reader.join(timeout=self.timeout)
if t_reader.is_alive():
# the read timed out!
t_reader.terminate()
t_reader.join()
raise DeviceTimeoutError(uuid=self._uuid,
name=self.name,
make=self.make,
model=self.model,
version=self.version,
message='device timed out during receive.')
received = receive_queue.get_nowait()
if isinstance(received, Exception):
raise received
if encoding:
return received.decode(encoding=encoding)
else:
return bytes(received)
@property
def stdio(self):
return self._stdio
@stdio.setter
def stdio(self, io):
self._stdio = io
|
{
"content_hash": "d0221114c6640022d74ca7279eac0c3b",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 125,
"avg_line_length": 31.40740740740741,
"alnum_prop": 0.5787146226415094,
"repo_name": "sfinucane/deviceutils",
"id": "92d45d5492d34a39cc4caa8241a0a8cbb3a7e253",
"size": "3414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deviceutils/device/device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "42599"
}
],
"symlink_target": ""
}
|
class Polinomio(list):
def sum(self, other):
'''Return the sum of itself and other polynomial'''
if len(self) > len(other):
sol = self[:]
for x in range(len(other)): sol[x] += other[x]
else:
sol = other[:]
for x in range(len(self)): sol[x] += self[x]
return sol
def product(self, other):
'''Return the product of itself and other polynomial'''
sol = [0]*(len(self)+len(other)-1)
for pos1, val1 in enumerate(self):
for pos2, val2 in enumerate(other):
sol[pos1+pos2] += val1*val2
return sol
def divide_by(self, other):
'''Return a tuple with the quotient and remainder'''
if len(other) > 2:
raise PolynomialTooLongError(0)
sol = self[:]
carry = 0
divisor = -other[0]
for x in reversed(range(len(sol))):
sol[x] = carry + sol[x]
carry = sol[x]*divisor
return sol[0], sol[1:]
class PolynomialTooLongError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
{
"content_hash": "9cb9baa67b8a71d455880c33f46b4b48",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 63,
"avg_line_length": 30.128205128205128,
"alnum_prop": 0.5285106382978724,
"repo_name": "vicmagv/pis2-actividad02",
"id": "364ec3e3daf8829fce64724769f7fc2c968c9faa",
"size": "1175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/polinomio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "17229"
},
{
"name": "Python",
"bytes": "2130"
}
],
"symlink_target": ""
}
|
import os.path
import signal
import streams_property
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin
from kafkatest.services.kafka import KafkaConfig
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.version import LATEST_0_10_0, LATEST_0_10_1, LATEST_0_10_2, LATEST_0_11_0, LATEST_1_0, LATEST_1_1
STATE_DIR = "state.dir"
class StreamsTestBaseService(KafkaPathResolverMixin, JmxMixin, Service):
"""Base class for Streams Test services providing some common settings and functionality"""
PERSISTENT_ROOT = "/mnt/streams"
# The log file contains normal log4j logs written using a file appender. stdout and stderr are handled separately
CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "streams.properties")
LOG_FILE = os.path.join(PERSISTENT_ROOT, "streams.log")
STDOUT_FILE = os.path.join(PERSISTENT_ROOT, "streams.stdout")
STDERR_FILE = os.path.join(PERSISTENT_ROOT, "streams.stderr")
JMX_LOG_FILE = os.path.join(PERSISTENT_ROOT, "jmx_tool.log")
JMX_ERR_FILE = os.path.join(PERSISTENT_ROOT, "jmx_tool.err.log")
LOG4J_CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties")
PID_FILE = os.path.join(PERSISTENT_ROOT, "streams.pid")
CLEAN_NODE_ENABLED = True
logs = {
"streams_log": {
"path": LOG_FILE,
"collect_default": True},
"streams_stdout": {
"path": STDOUT_FILE,
"collect_default": True},
"streams_stderr": {
"path": STDERR_FILE,
"collect_default": True},
"streams_log.1": {
"path": LOG_FILE + ".1",
"collect_default": True},
"streams_stdout.1": {
"path": STDOUT_FILE + ".1",
"collect_default": True},
"streams_stderr.1": {
"path": STDERR_FILE + ".1",
"collect_default": True},
"streams_log.2": {
"path": LOG_FILE + ".2",
"collect_default": True},
"streams_stdout.2": {
"path": STDOUT_FILE + ".2",
"collect_default": True},
"streams_stderr.2": {
"path": STDERR_FILE + ".2",
"collect_default": True},
"streams_log.3": {
"path": LOG_FILE + ".3",
"collect_default": True},
"streams_stdout.3": {
"path": STDOUT_FILE + ".3",
"collect_default": True},
"streams_stderr.3": {
"path": STDERR_FILE + ".3",
"collect_default": True},
"streams_log.0-1": {
"path": LOG_FILE + ".0-1",
"collect_default": True},
"streams_stdout.0-1": {
"path": STDOUT_FILE + ".0-1",
"collect_default": True},
"streams_stderr.0-1": {
"path": STDERR_FILE + ".0-1",
"collect_default": True},
"streams_log.0-2": {
"path": LOG_FILE + ".0-2",
"collect_default": True},
"streams_stdout.0-2": {
"path": STDOUT_FILE + ".0-2",
"collect_default": True},
"streams_stderr.0-2": {
"path": STDERR_FILE + ".0-2",
"collect_default": True},
"streams_log.0-3": {
"path": LOG_FILE + ".0-3",
"collect_default": True},
"streams_stdout.0-3": {
"path": STDOUT_FILE + ".0-3",
"collect_default": True},
"streams_stderr.0-3": {
"path": STDERR_FILE + ".0-3",
"collect_default": True},
"streams_log.0-4": {
"path": LOG_FILE + ".0-4",
"collect_default": True},
"streams_stdout.0-4": {
"path": STDOUT_FILE + ".0-4",
"collect_default": True},
"streams_stderr.0-4": {
"path": STDERR_FILE + ".0-4",
"collect_default": True},
"streams_log.0-5": {
"path": LOG_FILE + ".0-5",
"collect_default": True},
"streams_stdout.0-5": {
"path": STDOUT_FILE + ".0-5",
"collect_default": True},
"streams_stderr.0-5": {
"path": STDERR_FILE + ".0-5",
"collect_default": True},
"streams_log.0-6": {
"path": LOG_FILE + ".0-6",
"collect_default": True},
"streams_stdout.0-6": {
"path": STDOUT_FILE + ".0-6",
"collect_default": True},
"streams_stderr.0-6": {
"path": STDERR_FILE + ".0-6",
"collect_default": True},
"streams_log.1-1": {
"path": LOG_FILE + ".1-1",
"collect_default": True},
"streams_stdout.1-1": {
"path": STDOUT_FILE + ".1-1",
"collect_default": True},
"streams_stderr.1-1": {
"path": STDERR_FILE + ".1-1",
"collect_default": True},
"streams_log.1-2": {
"path": LOG_FILE + ".1-2",
"collect_default": True},
"streams_stdout.1-2": {
"path": STDOUT_FILE + ".1-2",
"collect_default": True},
"streams_stderr.1-2": {
"path": STDERR_FILE + ".1-2",
"collect_default": True},
"streams_log.1-3": {
"path": LOG_FILE + ".1-3",
"collect_default": True},
"streams_stdout.1-3": {
"path": STDOUT_FILE + ".1-3",
"collect_default": True},
"streams_stderr.1-3": {
"path": STDERR_FILE + ".1-3",
"collect_default": True},
"streams_log.1-4": {
"path": LOG_FILE + ".1-4",
"collect_default": True},
"streams_stdout.1-4": {
"path": STDOUT_FILE + ".1-4",
"collect_default": True},
"streams_stderr.1-4": {
"path": STDERR_FILE + ".1-4",
"collect_default": True},
"streams_log.1-5": {
"path": LOG_FILE + ".1-5",
"collect_default": True},
"streams_stdout.1-5": {
"path": STDOUT_FILE + ".1-5",
"collect_default": True},
"streams_stderr.1-5": {
"path": STDERR_FILE + ".1-5",
"collect_default": True},
"streams_log.1-6": {
"path": LOG_FILE + ".1-6",
"collect_default": True},
"streams_stdout.1-6": {
"path": STDOUT_FILE + ".1-6",
"collect_default": True},
"streams_stderr.1-6": {
"path": STDERR_FILE + ".1-6",
"collect_default": True},
"jmx_log": {
"path": JMX_LOG_FILE,
"collect_default": True},
"jmx_err": {
"path": JMX_ERR_FILE,
"collect_default": True},
}
def __init__(self, test_context, kafka, streams_class_name, user_test_args1, user_test_args2=None, user_test_args3=None, user_test_args4=None):
Service.__init__(self, test_context, num_nodes=1)
self.kafka = kafka
self.args = {'streams_class_name': streams_class_name,
'user_test_args1': user_test_args1,
'user_test_args2': user_test_args2,
'user_test_args3': user_test_args3,
'user_test_args4': user_test_args4}
self.log_level = "DEBUG"
@property
def node(self):
return self.nodes[0]
def pids(self, node):
try:
return [pid for pid in node.account.ssh_capture("cat " + self.PID_FILE, callback=int)]
except:
return []
def stop_nodes(self, clean_shutdown=True):
for node in self.nodes:
self.stop_node(node, clean_shutdown)
def stop_node(self, node, clean_shutdown=True):
self.logger.info((clean_shutdown and "Cleanly" or "Forcibly") + " stopping Streams Test on " + str(node.account))
pids = self.pids(node)
sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL
for pid in pids:
node.account.signal(pid, sig, allow_fail=True)
if clean_shutdown:
for pid in pids:
wait_until(lambda: not node.account.alive(pid), timeout_sec=120, err_msg="Streams Test process on " + str(node.account) + " took too long to exit")
node.account.ssh("rm -f " + self.PID_FILE, allow_fail=False)
def restart(self):
# We don't want to do any clean up here, just restart the process.
for node in self.nodes:
self.logger.info("Restarting Kafka Streams on " + str(node.account))
self.stop_node(node)
self.start_node(node)
def abortThenRestart(self):
# We don't want to do any clean up here, just abort then restart the process. The running service is killed immediately.
for node in self.nodes:
self.logger.info("Aborting Kafka Streams on " + str(node.account))
self.stop_node(node, False)
self.logger.info("Restarting Kafka Streams on " + str(node.account))
self.start_node(node)
def wait(self, timeout_sec=1440):
for node in self.nodes:
self.wait_node(node, timeout_sec)
def wait_node(self, node, timeout_sec=None):
for pid in self.pids(node):
wait_until(lambda: not node.account.alive(pid), timeout_sec=timeout_sec, err_msg="Streams Test process on " + str(node.account) + " took too long to exit")
def clean_node(self, node):
node.account.kill_process("streams", clean_shutdown=False, allow_fail=True)
if self.CLEAN_NODE_ENABLED:
node.account.ssh("rm -rf " + self.PERSISTENT_ROOT, allow_fail=False)
def start_cmd(self, node):
args = self.args.copy()
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \
" %(config_file)s %(user_test_args1)s %(user_test_args2)s %(user_test_args3)s" \
" %(user_test_args4)s & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing streams cmd: " + cmd)
return cmd
def prop_file(self):
cfg = KafkaConfig(**{streams_property.STATE_DIR: self.PERSISTENT_ROOT, streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()})
return cfg.render()
def start_node(self, node):
node.account.mkdirs(self.PERSISTENT_ROOT)
prop_file = self.prop_file()
node.account.create_file(self.CONFIG_FILE, prop_file)
node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('tools_log4j.properties', log_file=self.LOG_FILE))
self.logger.info("Starting StreamsTest process on " + str(node.account))
with node.account.monitor_log(self.STDOUT_FILE) as monitor:
node.account.ssh(self.start_cmd(node))
monitor.wait_until('StreamsTest instance started', timeout_sec=60, err_msg="Never saw message indicating StreamsTest finished startup on " + str(node.account))
if len(self.pids(node)) == 0:
raise RuntimeError("No process ids recorded")
class StreamsSmokeTestBaseService(StreamsTestBaseService):
"""Base class for Streams Smoke Test services providing some common settings and functionality"""
def __init__(self, test_context, kafka, command):
super(StreamsSmokeTestBaseService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsSmokeTest",
command)
class StreamsEosTestBaseService(StreamsTestBaseService):
"""Base class for Streams EOS Test services providing some common settings and functionality"""
clean_node_enabled = True
def __init__(self, test_context, kafka, command):
super(StreamsEosTestBaseService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsEosTest",
command)
def clean_node(self, node):
if self.clean_node_enabled:
super(StreamsEosTestBaseService, self).clean_node(node)
class StreamsSmokeTestDriverService(StreamsSmokeTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsSmokeTestDriverService, self).__init__(test_context, kafka, "run")
self.DISABLE_AUTO_TERMINATE = ""
def disable_auto_terminate(self):
self.DISABLE_AUTO_TERMINATE = "disableAutoTerminate"
def start_cmd(self, node):
args = self.args.copy()
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['disable_auto_terminate'] = self.DISABLE_AUTO_TERMINATE
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \
" %(config_file)s %(user_test_args1)s %(disable_auto_terminate)s" \
" & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
return cmd
class StreamsSmokeTestJobRunnerService(StreamsSmokeTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsSmokeTestJobRunnerService, self).__init__(test_context, kafka, "process")
class StreamsEosTestDriverService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsEosTestDriverService, self).__init__(test_context, kafka, "run")
class StreamsEosTestJobRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsEosTestJobRunnerService, self).__init__(test_context, kafka, "process")
class StreamsComplexEosTestJobRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsComplexEosTestJobRunnerService, self).__init__(test_context, kafka, "process-complex")
class StreamsEosTestVerifyRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsEosTestVerifyRunnerService, self).__init__(test_context, kafka, "verify")
class StreamsComplexEosTestVerifyRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsComplexEosTestVerifyRunnerService, self).__init__(test_context, kafka, "verify-complex")
class StreamsSmokeTestShutdownDeadlockService(StreamsSmokeTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsSmokeTestShutdownDeadlockService, self).__init__(test_context, kafka, "close-deadlock-test")
class StreamsBrokerCompatibilityService(StreamsTestBaseService):
def __init__(self, test_context, kafka, eosEnabled):
super(StreamsBrokerCompatibilityService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.BrokerCompatibilityTest",
eosEnabled)
class StreamsBrokerDownResilienceService(StreamsTestBaseService):
def __init__(self, test_context, kafka, configs):
super(StreamsBrokerDownResilienceService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsBrokerDownResilienceTest",
configs)
def start_cmd(self, node):
args = self.args.copy()
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \
" %(config_file)s %(user_test_args1)s %(user_test_args2)s %(user_test_args3)s" \
" %(user_test_args4)s & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing: " + cmd)
return cmd
class StreamsStandbyTaskService(StreamsTestBaseService):
def __init__(self, test_context, kafka, configs):
super(StreamsStandbyTaskService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsStandByReplicaTest",
configs)
class StreamsUpgradeTestJobRunnerService(StreamsTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsUpgradeTestJobRunnerService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsUpgradeTest",
"")
self.UPGRADE_FROM = None
self.UPGRADE_TO = None
def set_version(self, kafka_streams_version):
self.KAFKA_STREAMS_VERSION = kafka_streams_version
def set_upgrade_from(self, upgrade_from):
self.UPGRADE_FROM = upgrade_from
def set_upgrade_to(self, upgrade_to):
self.UPGRADE_TO = upgrade_to
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}
if self.UPGRADE_FROM is not None:
properties['upgrade.from'] = self.UPGRADE_FROM
if self.UPGRADE_TO == "future_version":
properties['test.future.metadata'] = "any_value"
cfg = KafkaConfig(**properties)
return cfg.render()
def start_cmd(self, node):
args = self.args.copy()
if self.KAFKA_STREAMS_VERSION in [str(LATEST_0_10_0), str(LATEST_0_10_1), str(LATEST_0_10_2), str(LATEST_0_11_0), str(LATEST_1_0), str(LATEST_1_1)]:
args['kafka'] = self.kafka.bootstrap_servers()
else:
args['kafka'] = ""
if self.KAFKA_STREAMS_VERSION == str(LATEST_0_10_0) or self.KAFKA_STREAMS_VERSION == str(LATEST_0_10_1):
args['zk'] = self.kafka.zk.connect_setting()
else:
args['zk'] = ""
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['version'] = self.KAFKA_STREAMS_VERSION
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true UPGRADE_KAFKA_STREAMS_TEST_VERSION=%(version)s " \
" %(kafka_run_class)s %(streams_class_name)s %(kafka)s %(zk)s %(config_file)s " \
" & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing: " + cmd)
return cmd
|
{
"content_hash": "71f6164007126c673d47144db068af42",
"timestamp": "",
"source": "github",
"line_count": 466,
"max_line_length": 171,
"avg_line_length": 43,
"alnum_prop": 0.5600359317297136,
"repo_name": "Ishiihara/kafka",
"id": "1d8ed270cc5eb81aa9e4e28602a46132f16b71b9",
"size": "20819",
"binary": false,
"copies": "4",
"ref": "refs/heads/trunk",
"path": "tests/kafkatest/services/streams.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "27517"
},
{
"name": "HTML",
"bytes": "5440"
},
{
"name": "Java",
"bytes": "13930177"
},
{
"name": "Python",
"bytes": "748277"
},
{
"name": "Scala",
"bytes": "5935927"
},
{
"name": "Shell",
"bytes": "93819"
},
{
"name": "XSLT",
"bytes": "7116"
}
],
"symlink_target": ""
}
|
import ai
import time
import safe_path_generator as SPG
try:
import RPi.GPIO as GPIO
import motors
import input
import magnet
TEST_MODE = False
except ImportError:
TEST_MODE = True
import sys
import pygame
pygame.mixer.init()
COLOR_AI = 1
COORDS = [(0,0), (0,3), (0,6), (1,1), (1,3), (1,5), (2,2), (2,3), (2,4), (3,0), (3,1), (3,2), (3,4), (3,5), (3,6), (4,2), (4,3), (4,4), (5,1), (5,3), (5,5), (6,0), (6,3), (6,6)]
order_arr = [[8, 2, 0, 4, 6, 5, 1, 3, 7].index(x) for x in range(9)] # which base field is accessed when (the first at time 8, the second at time 2)
COORDS.extend([(-0.22+(6.44/8*x), -0.64) for x in order_arr]) # BASE_AI from 24 - 32
COORDS.extend([(6.22-(6.44/8*x), 6.64) for x in order_arr]) # BASE_PLAYER from 33 - 41
shutdownPin = 11
ledPin = 3
old_board = [0] * 24
pieces_player = 9
pieces_taken = 0
pieces_ai = 9
# Returns the updated board index referring to a specific position in the base instead of the base in general
def resolve_base(_i, base_color):
if _i != -1: # no effect on normal fields
return _i
if base_color == COLOR_AI:
return 24 + 9 - pieces_ai
else:
# list: where to put taken pieces first in player base (such that all pieces can be placed in the worst case)
return 33 + [2, 4, 6, 5, 3, 1, 0][pieces_taken]
def resolve(_i, context_board, base_color):
global pieces_player, pieces_ai
if _i == -1: # -1 corresponds to a position in the base
return COORDS[resolve_base(_i, base_color)], base_color
else:
return COORDS[_i], context_board[_i]
def getShortSafePath(_board, start, target):
if target == -1 or start == -1:
raise RuntimeError('Position in base must be resolved first')
start_pos = COORDS[start]
target_pos = COORDS[target]
safe_path = [start_pos]
safe_path.extend(SPG.generate(start, target)) # list of tuples, first = start, last = target
safe_path.append(target_pos)
print('safe path from %i to %i is: %s' % (start, target, str(safe_path)))
return safe_path
def reset():
global pieces_player, pieces_ai
# Resets all pieces to the base, clears any records of previous boards
time.sleep(2)
print('Resetting the board')
_board = input.readBoard()
for _i, val in _board:
if val != 0:
_pos, _ = resolve(_i, _board, COLOR_AI)
_dest, _ = resolve(-1, _board, val)
if val == COLOR_AI:
pieces_ai += 1
else:
pieces_player += 1
motors.goTo(_pos[0], _pos[1])
magnet.turnOn(val)
_path = getShortSafePath(board, start, _dest)
for p in _path:
motors.goTo(p[0], p[1])
magnet.turnOff()
time.sleep(0.5)
running = True
def shutdown(channel=0, full=True):
if running:
return
print("Shutting down...")
input.shutdown()
motors.shutdown()
magnet.shutdown()
GPIO.output(ledPin, GPIO.LOW)
if full:
sys.exit(1)
else:
sys.exit(0)
def count(_board, _color):
num = 0
for val in _board:
if val == _color:
num += 1
return num
# These methods play a sound, wait for it to finish and then wait 0.5s more
def play_sound(_path):
print('playing sound: %s' % _path)
pygame.mixer.music.load(_path)
pygame.mixer.music.play()
if __name__ == "__main__" and not TEST_MODE:
try:
GPIO.setup(ledPin, GPIO.OUT)
GPIO.output(ledPin, GPIO.HIGH)
GPIO.setup(shutdownPin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(shutdownPin, GPIO.RISING, callback=shutdown)
print('Resetting motors...')
motors.reset()
while True:
running = False
print('Checking board...')
board = input.readBoard()
should_move = False
if old_board != board:
# A move is over when a stone has been moved -> same number of pieces in phase 2/3
# or when a stone has been placed -> number of player pieces increased by 1
# If a mill has just been closed (new stone isInMill), after a COLOR_AI piece has been removed
# If one color now only has two pieces left, the game is over -> clean board
num_ai_old = count(old_board, COLOR_AI)
num_ai = count(board, COLOR_AI)
num_pl_old = count(old_board, -COLOR_AI)
num_pl = count(board, -COLOR_AI)
pl_move_dest = -1
for i, v in enumerate(board):
if old_board[i] == 0 and v == -COLOR_AI: # player piece added
pl_move_dest = i
if num_ai < 3 and pieces_ai == 0: # AI just lost
print('Suppose I\'ve lost :(')
play_sound('../sounds/clap.wav')
reset()
continue
if ai.isInMill(board, pl_move_dest): # If a mill was closed, the move ends when one num_ai decreases by 1
should_move = num_ai == num_ai_old-1
if should_move:
print('Found closed mill on field %i, AI piece removed' % pl_move_dest)
else: # otherwise, both players must have the same number of pieces as before
phase_one = pieces_player != 0 and pieces_ai != 0
should_move = ((not phase_one) and num_pl == num_pl_old and num_ai == num_ai_old) \
or (phase_one and num_pl == num_pl_old+1 and num_ai == num_ai_old)
if should_move:
print('Found move to %i without any closed mills' % pl_move_dest)
if should_move:
running = True
print('board: ' + str(board))
play_sound('../sounds/ping.wav')
print('calculating move... p_ai=%i p_pl=%i' % (pieces_ai, pieces_player))
if pieces_player > 0: # ASSUME player uses all pieces from their storage before playing normally
pieces_player -= 1
old_board, moves = ai.calcMove(board, COLOR_AI, pieces_ai, pieces_player)
for move in moves:
start = move[0]
dest = move[1]
print('move: ', move[0], 'to', move[1])
# resolve coords of start and dest & color of piece
c1, color = resolve(start, board, COLOR_AI) # can only move pieces out of own base
c2, _ = resolve(dest, board, COLOR_AI) # can only put pieces in opponents base
# move piece from start to dest
motors.goTo(c1[0], c1[1])
magnet.turnOn(color)
if start == -1 or count(board, COLOR_AI) <= 3 or dest == -1:
path = getShortSafePath(board, resolve_base(start, COLOR_AI), resolve_base(dest, -COLOR_AI))
else:
path = [c1, c2]
for pos in path:
motors.goTo(pos[0], pos[1])
magnet.turnOff()
if dest == -1: # after piece has been moved to the player's base, we can update pieces_taken
pieces_taken += 1
time.sleep(0.5)
motors.goTo(0, 0)
motors.reset()
if pieces_ai > 0: # ASSUME AI uses all pieces from its storage before playing normally
pieces_ai -= 1
#old_board = input.readBoard()
print("board: " + str(old_board))
if count(old_board, -COLOR_AI) < 3 and pieces_player == 0: # AI just won
play_sound('../sounds/fanfare.wav')
reset()
continue
running = False
else:
time.sleep(1)
except KeyboardInterrupt:
shutdown(full=False)
|
{
"content_hash": "8c62bd1be391da72bde5621730d0edbc",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 198,
"avg_line_length": 40.843434343434346,
"alnum_prop": 0.5291208111784346,
"repo_name": "S7uXN37/NineMensMorrisBoard",
"id": "301c3e408c423ad7f482709823fe3e0cf0540488",
"size": "8109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "board/mills.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "83408"
},
{
"name": "Shell",
"bytes": "1037"
}
],
"symlink_target": ""
}
|
"""Adamax for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.optimizers.Adamax')
class Adamax(optimizer_v2.OptimizerV2):
"""Optimizer that implements the Adamax algorithm.
It is a variant of Adam based on the infinity norm.
Default parameters follow those provided in the paper.
Adamax is sometimes superior to adam, specially in models with embeddings.
References
see Section 7 of [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
"""
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
name='Adamax',
**kwargs):
"""Construct a new Adamax optimizer.
Initialization:
```
m_0 <- 0 (Initialize initial 1st moment vector)
v_0 <- 0 (Initialize the exponentially weighted infinity norm)
t <- 0 (Initialize timestep)
```
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section 7.1 of the paper:
```
t <- t + 1
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
v_t <- max(beta2 * v_{t-1}, abs(g))
variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
```
Similar to AdamOptimizer, the epsilon is added for numerical stability
(especially to get rid of division by zero when v_t = 0).
Contrast to AdamOptimizer, the sparse implementation of this algorithm
(used when the gradient is an IndexedSlices object, typically because of
`tf.gather` or an embedding lookup in the forward pass) only updates
variable slices and corresponding `m_t`, `v_t` terms when that part of
the variable was used in the forward pass. This means that the sparse
behavior is contrast to the dense behavior (similar to some momentum
implementations which ignore momentum unless a variable slice was actually
used).
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the exponentially weighted infinity norm.
epsilon: A small constant for numerical stability.
name: Optional name for the operations created when applying gradients.
Defaults to "Adamax".
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super(Adamax, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or backend_config.epsilon()
def _create_slots(self, var_list):
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, 'm') # Create slots for the first moments.
for var in var_list:
self.add_slot(var, 'v') # Create slots for the second moments.
def _resource_apply_dense(self, grad, var):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
beta_1_t = self._get_hyper('beta_1', var_dtype)
beta_2_t = self._get_hyper('beta_2', var_dtype)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
return training_ops.resource_apply_ada_max(
var.handle,
m.handle,
v.handle,
beta_1_power,
lr_t,
beta_1_t,
beta_2_t,
ops.convert_to_tensor(self.epsilon, var_dtype),
grad,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
beta_1_t = self._get_hyper('beta_1', var_dtype)
beta_2_t = self._get_hyper('beta_2', var_dtype)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
epsilon_t = ops.convert_to_tensor(self.epsilon, var_dtype)
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, 'm')
m_slice = array_ops.gather(m, indices)
m_t_slice = m_slice * beta_1_t + grad * (1 - beta_1_t)
with ops.control_dependencies([m_t_slice]):
m_t = self._resource_scatter_update(m, indices, m_t_slice)
# u_t = max(beta2 * u, abs(g_t))
v = self.get_slot(var, 'v')
v_slice = array_ops.gather(v, indices)
v_t_slice = math_ops.maximum(v_slice * beta_2_t, math_ops.abs(grad))
with ops.control_dependencies([v_t_slice]):
v_t = self._resource_scatter_update(v, indices, v_t_slice)
# theta_t = theta - lr / (1 - beta1^t) * m_t / u_t
var_slice = -lr_t / (1 - beta_1_power) * (
m_t_slice / (v_t_slice + epsilon_t))
with ops.control_dependencies([var_slice]):
var_update = self._resource_scatter_add(var, indices, var_slice)
return control_flow_ops.group(*[var_update, m_t, v_t])
def get_config(self):
config = super(Adamax, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._serialize_hyperparameter('decay'),
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon,
})
return config
|
{
"content_hash": "bc9e1f975b92c5d59dd66a521ce2f125",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 80,
"avg_line_length": 40.379746835443036,
"alnum_prop": 0.6542319749216301,
"repo_name": "ghchinoy/tensorflow",
"id": "6ea4a09fa4e4d21d65ea63c1482d8aaf7913fe7b",
"size": "7070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/optimizer_v2/adamax.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "699905"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "67022491"
},
{
"name": "CMake",
"bytes": "206499"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1585039"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "836400"
},
{
"name": "Jupyter Notebook",
"bytes": "1665583"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "98194"
},
{
"name": "Objective-C",
"bytes": "94022"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17600"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48407007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "476920"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
}
],
"symlink_target": ""
}
|
"""
dxf_helper.py
---------------
Manipulate DXF templates as plain text files rather
than strings inside a JSON blob
"""
import os
import json
import numpy as np
def get_json(file_name='../templates/dxf.json'):
"""
Load the JSON blob into native objects
"""
with open(file_name, 'r') as f:
t = json.load(f)
return t
def write_json(template, file_name='../templates/dxf.json'):
"""
Write a native object to a JSON blob
"""
with open(file_name, 'w') as f:
json.dump(template, f, indent=4)
def replace_whitespace(text, SAFE_SPACE='|<^>|', insert=True, reformat=False):
"""
Replace non-strippable whitepace in a string with a safe space
"""
if insert:
# replace whitespace with safe space chr
args = (' ', SAFE_SPACE)
else:
# replace safe space chr with whitespace
args = (SAFE_SPACE, ' ')
lines = [line.strip().replace(*args)
for line in str.splitlines(text)]
# remove any blank lines
if any(len(L) == 0 for L in lines):
shaped = np.reshape(lines, (-1, 2))
mask = np.ones(len(shaped), dtype=bool)
for i, v in enumerate(shaped[:, 1]):
if len(v) == 0:
mask[i] = False
lines = shaped[mask].ravel()
if reformat:
for i in range(len(lines)):
cur = lines[i].strip()
if cur.startswith('$$'):
lines[i] = cur[1:]
elif cur.startswith('${'):
lines[i] = cur[1:]
elif cur.startswith('$'):
lines[i] = '{' + cur[1:] + '}'
return '\n'.join(lines)
def write_files(template, destination='./dxf'):
"""
For a dict, write each value to destination/key
"""
os.makedirs(destination)
for key, value in template.items():
with open(os.path.join(destination, key), 'w') as f:
f.write(replace_whitespace(value, reformat=True, insert=False))
def read_files(path):
"""
For a directory full of files, retrieve it
as a dict with file_name:text
"""
template = {}
for file_name in os.listdir(path):
# skip emacs buffers
if '~' in file_name:
continue
with open(os.path.join(path, file_name), 'r') as f:
template[file_name] = replace_whitespace(
f.read(), reformat=False, insert=True)
return template
if __name__ == '__main__':
import sys
# dump files to JSON template
if 'dump' in sys.argv:
t = read_files('dxf')
write_json(t)
elif 'read' in sys.argv:
# dump JSON to files for editing
t = get_json()
write_files(t)
else:
print("run with 'read_json' to dump JSON to files")
print("Or 'dump_json' to dump files to JSON")
|
{
"content_hash": "6d5f737c826b569ea9dbfbe17dee6493",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 78,
"avg_line_length": 26.742857142857144,
"alnum_prop": 0.5548433048433048,
"repo_name": "mikedh/trimesh",
"id": "a13ad13a2ec08069a78652960e25c57e51d91b4d",
"size": "2808",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/helpers/dxfhelp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2855"
},
{
"name": "HTML",
"bytes": "580"
},
{
"name": "JavaScript",
"bytes": "5887"
},
{
"name": "Makefile",
"bytes": "1862"
},
{
"name": "Python",
"bytes": "2142314"
},
{
"name": "Shell",
"bytes": "5161"
}
],
"symlink_target": ""
}
|
from rest_framework import viewsets, permissions
from rest_framework.response import Response
from .models import Post
from .serializers import PostSerializer
from .permissions import IsAuthorOfPost
class PostViewSet(viewsets.ModelViewSet):
queryset = Post.objects.order_by('-created_at')
serializer_class = PostSerializer
def get_permissions(self):
if self.request.method in permissions.SAFE_METHODS:
return (permissions.AllowAny(),)
return (permissions.IsAuthenticated(), IsAuthorOfPost(),)
def perform_create(self, serializer):
instance = serializer.save(author=self.request.user)
return super().perform_create(serializer)
class AccountPostsViewSet(viewsets.ViewSet):
queryset = Post.objects.select_related('author').all()
serializer_class = PostSerializer
def list(self, request, account_username=None):
queryset = self.queryset.filter(author__username=account_username)
serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data)
|
{
"content_hash": "e6be656d8f5e71cdccad446c6de5a4dc",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 74,
"avg_line_length": 33.4375,
"alnum_prop": 0.7355140186915888,
"repo_name": "tomaszzacharczuk/social-website-django-angular",
"id": "afa2095d3a4d40dad3de02c312989abe99c5a412",
"size": "1070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "social_website_django_angular/posts/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "278"
},
{
"name": "HTML",
"bytes": "11452"
},
{
"name": "Python",
"bytes": "40226"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
}
|
"""Tests for Fritz!Tools button platform."""
from unittest.mock import patch
import pytest
from homeassistant.components.button import DOMAIN as BUTTON_DOMAIN, SERVICE_PRESS
from homeassistant.components.fritz.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import ATTR_ENTITY_ID, STATE_UNKNOWN
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from .const import MOCK_USER_DATA
from tests.common import MockConfigEntry
async def test_button_setup(hass: HomeAssistant, fc_class_mock, fh_class_mock):
"""Test setup of Fritz!Tools buttons."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.LOADED
buttons = hass.states.async_all(BUTTON_DOMAIN)
assert len(buttons) == 4
for button in buttons:
assert button.state == STATE_UNKNOWN
@pytest.mark.parametrize(
"entity_id, wrapper_method",
[
("button.mock_title_firmware_update", "async_trigger_firmware_update"),
("button.mock_title_reboot", "async_trigger_reboot"),
("button.mock_title_reconnect", "async_trigger_reconnect"),
("button.mock_title_cleanup", "async_trigger_cleanup"),
],
)
async def test_buttons(
hass: HomeAssistant,
entity_id: str,
wrapper_method: str,
fc_class_mock,
fh_class_mock,
):
"""Test Fritz!Tools buttons."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_USER_DATA)
entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.LOADED
button = hass.states.get(entity_id)
assert button
assert button.state == STATE_UNKNOWN
with patch(
f"homeassistant.components.fritz.common.AvmWrapper.{wrapper_method}"
) as mock_press_action:
await hass.services.async_call(
BUTTON_DOMAIN,
SERVICE_PRESS,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
mock_press_action.assert_called_once()
button = hass.states.get(entity_id)
assert button.state != STATE_UNKNOWN
|
{
"content_hash": "9571e443b62fb3fdafdee98bda1fb97c",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 82,
"avg_line_length": 31.76,
"alnum_prop": 0.6943744752308985,
"repo_name": "toddeye/home-assistant",
"id": "a2bd61327318fa951a5aa58af936096f78b409d7",
"size": "2382",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/fritz/test_button.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""Generate an updated requirements_all.txt."""
import difflib
import importlib
import os
from pathlib import Path
import pkgutil
import re
import sys
from homeassistant.util.yaml.loader import load_yaml
from script.hassfest.model import Integration
COMMENT_REQUIREMENTS = (
"Adafruit_BBIO",
"avea", # depends on bluepy
"avion",
"beacontools",
"beewi_smartclim", # depends on bluepy
"blinkt",
"bluepy",
"bme680",
"decora",
"decora_wifi",
"envirophat",
"evdev",
"face_recognition",
"i2csense",
"opencv-python-headless",
"pybluez",
"pycups",
"PySwitchbot",
"pySwitchmate",
"python-eq3bt",
"python-gammu",
"python-lirc",
"pyuserinput",
"raspihats",
"rpi-rf",
"RPi.GPIO",
"smbus-cffi",
"tensorflow",
"tf-models-official",
"VL53L1X2",
)
IGNORE_PIN = ("colorlog>2.1,<3", "urllib3")
URL_PIN = (
"https://developers.home-assistant.io/docs/"
"creating_platform_code_review.html#1-requirements"
)
CONSTRAINT_PATH = os.path.join(
os.path.dirname(__file__), "../homeassistant/package_constraints.txt"
)
CONSTRAINT_BASE = """
pycryptodome>=3.6.6
# Constrain urllib3 to ensure we deal with CVE-2019-11236 & CVE-2019-11324
urllib3>=1.24.3
# Constrain H11 to ensure we get a new enough version to support non-rfc line endings
h11>=0.12.0
# Constrain httplib2 to protect against GHSA-93xj-8mrv-444m
# https://github.com/advisories/GHSA-93xj-8mrv-444m
httplib2>=0.19.0
# gRPC 1.32+ currently causes issues on ARMv7, see:
# https://github.com/home-assistant/core/issues/40148
grpcio==1.31.0
# This is a old unmaintained library and is replaced with pycryptodome
pycrypto==1000000000.0.0
# To remove reliance on typing
btlewrap>=0.0.10
# This overrides a built-in Python package
enum34==1000000000.0.0
typing==1000000000.0.0
uuid==1000000000.0.0
"""
IGNORE_PRE_COMMIT_HOOK_ID = (
"check-executables-have-shebangs",
"check-json",
"no-commit-to-branch",
"prettier",
"python-typing-update",
)
def has_tests(module: str):
"""Test if a module has tests.
Module format: homeassistant.components.hue
Test if exists: tests/components/hue
"""
path = Path(module.replace(".", "/").replace("homeassistant", "tests"))
if not path.exists():
return False
if not path.is_dir():
return True
# Dev environments might have stale directories around
# from removed tests. Check for that.
content = [f.name for f in path.glob("*")]
# Directories need to contain more than `__pycache__`
# to exist in Git and so be seen by CI.
return content != ["__pycache__"]
def explore_module(package, explore_children):
"""Explore the modules."""
module = importlib.import_module(package)
found = []
if not hasattr(module, "__path__"):
return found
for _, name, _ in pkgutil.iter_modules(module.__path__, f"{package}."):
found.append(name)
if explore_children:
found.extend(explore_module(name, False))
return found
def core_requirements():
"""Gather core requirements out of setup.py."""
reqs_raw = re.search(
r"REQUIRES = \[(.*?)\]", Path("setup.py").read_text(), re.S
).group(1)
return [x[1] for x in re.findall(r"(['\"])(.*?)\1", reqs_raw)]
def gather_recursive_requirements(domain, seen=None):
"""Recursively gather requirements from a module."""
if seen is None:
seen = set()
seen.add(domain)
integration = Integration(Path(f"homeassistant/components/{domain}"))
integration.load_manifest()
reqs = set(integration.requirements)
for dep_domain in integration.dependencies:
reqs.update(gather_recursive_requirements(dep_domain, seen))
return reqs
def comment_requirement(req):
"""Comment out requirement. Some don't install on all systems."""
return any(ign.lower() in req.lower() for ign in COMMENT_REQUIREMENTS)
def gather_modules():
"""Collect the information."""
reqs = {}
errors = []
gather_requirements_from_manifests(errors, reqs)
gather_requirements_from_modules(errors, reqs)
for key in reqs:
reqs[key] = sorted(reqs[key], key=lambda name: (len(name.split(".")), name))
if errors:
print("******* ERROR")
print("Errors while importing: ", ", ".join(errors))
return None
return reqs
def gather_requirements_from_manifests(errors, reqs):
"""Gather all of the requirements from manifests."""
integrations = Integration.load_dir(Path("homeassistant/components"))
for domain in sorted(integrations):
integration = integrations[domain]
if not integration.manifest:
errors.append(f"The manifest for integration {domain} is invalid.")
continue
if integration.disabled:
continue
process_requirements(
errors, integration.requirements, f"homeassistant.components.{domain}", reqs
)
def gather_requirements_from_modules(errors, reqs):
"""Collect the requirements from the modules directly."""
for package in sorted(
explore_module("homeassistant.scripts", True)
+ explore_module("homeassistant.auth", True)
):
try:
module = importlib.import_module(package)
except ImportError as err:
print(f"{package.replace('.', '/')}.py: {err}")
errors.append(package)
continue
if getattr(module, "REQUIREMENTS", None):
process_requirements(errors, module.REQUIREMENTS, package, reqs)
def process_requirements(errors, module_requirements, package, reqs):
"""Process all of the requirements."""
for req in module_requirements:
if "://" in req:
errors.append(f"{package}[Only pypi dependencies are allowed: {req}]")
if req.partition("==")[1] == "" and req not in IGNORE_PIN:
errors.append(f"{package}[Please pin requirement {req}, see {URL_PIN}]")
reqs.setdefault(req, []).append(package)
def generate_requirements_list(reqs):
"""Generate a pip file based on requirements."""
output = []
for pkg, requirements in sorted(reqs.items(), key=lambda item: item[0]):
for req in sorted(requirements):
output.append(f"\n# {req}")
if comment_requirement(pkg):
output.append(f"\n# {pkg}\n")
else:
output.append(f"\n{pkg}\n")
return "".join(output)
def requirements_output(reqs):
"""Generate output for requirements."""
output = [
"-c homeassistant/package_constraints.txt\n",
"\n",
"# Home Assistant Core\n",
]
output.append("\n".join(core_requirements()))
output.append("\n")
return "".join(output)
def requirements_all_output(reqs):
"""Generate output for requirements_all."""
output = [
"# Home Assistant Core, full dependency set\n",
"-r requirements.txt\n",
]
output.append(generate_requirements_list(reqs))
return "".join(output)
def requirements_test_all_output(reqs):
"""Generate output for test_requirements."""
output = [
"# Home Assistant tests, full dependency set\n",
f"# Automatically generated by {Path(__file__).name}, do not edit\n",
"\n",
"-r requirements_test.txt\n",
]
filtered = {
requirement: modules
for requirement, modules in reqs.items()
if any(
# Always install requirements that are not part of integrations
not mdl.startswith("homeassistant.components.") or
# Install tests for integrations that have tests
has_tests(mdl)
for mdl in modules
)
}
output.append(generate_requirements_list(filtered))
return "".join(output)
def requirements_pre_commit_output():
"""Generate output for pre-commit dependencies."""
source = ".pre-commit-config.yaml"
pre_commit_conf = load_yaml(source)
reqs = []
for repo in (x for x in pre_commit_conf["repos"] if x.get("rev")):
for hook in repo["hooks"]:
if hook["id"] not in IGNORE_PRE_COMMIT_HOOK_ID:
reqs.append(f"{hook['id']}=={repo['rev'].lstrip('v')}")
reqs.extend(x for x in hook.get("additional_dependencies", ()))
output = [
f"# Automatically generated "
f"from {source} by {Path(__file__).name}, do not edit",
"",
]
output.extend(sorted(reqs))
return "\n".join(output) + "\n"
def gather_constraints():
"""Construct output for constraint file."""
return (
"\n".join(
sorted(
{
*core_requirements(),
*gather_recursive_requirements("default_config"),
*gather_recursive_requirements("mqtt"),
}
)
+ [""]
)
+ CONSTRAINT_BASE
)
def diff_file(filename, content):
"""Diff a file."""
return list(
difflib.context_diff(
[f"{line}\n" for line in Path(filename).read_text().split("\n")],
[f"{line}\n" for line in content.split("\n")],
filename,
"generated",
)
)
def main(validate):
"""Run the script."""
if not os.path.isfile("requirements_all.txt"):
print("Run this from HA root dir")
return 1
data = gather_modules()
if data is None:
return 1
reqs_file = requirements_output(data)
reqs_all_file = requirements_all_output(data)
reqs_test_all_file = requirements_test_all_output(data)
reqs_pre_commit_file = requirements_pre_commit_output()
constraints = gather_constraints()
files = (
("requirements.txt", reqs_file),
("requirements_all.txt", reqs_all_file),
("requirements_test_pre_commit.txt", reqs_pre_commit_file),
("requirements_test_all.txt", reqs_test_all_file),
("homeassistant/package_constraints.txt", constraints),
)
if validate:
errors = []
for filename, content in files:
diff = diff_file(filename, content)
if diff:
errors.append("".join(diff))
if errors:
print("ERROR - FOUND THE FOLLOWING DIFFERENCES")
print()
print()
print("\n\n".join(errors))
print()
print("Please run python3 -m script.gen_requirements_all")
return 1
return 0
for filename, content in files:
Path(filename).write_text(content)
return 0
if __name__ == "__main__":
_VAL = sys.argv[-1] == "validate"
sys.exit(main(_VAL))
|
{
"content_hash": "f090ec488738f2179ee97a19e97d97ec",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 88,
"avg_line_length": 27.406649616368288,
"alnum_prop": 0.6092758491974617,
"repo_name": "kennedyshead/home-assistant",
"id": "4fd96cb1b04b4b6c57d97909088d359be5d626c3",
"size": "10739",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "script/gen_requirements_all.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import numpy as np
import sys
import six
from six.moves import map
from six.moves import range
class sparsearray(dict):
def __init__(self,input_array=[[]]):
#import pdb;pdb.set_trace()
dict.__init__(self)
a = np.atleast_2d(input_array)
self.dtype = a.dtype
self.ndim = a.ndim
self._shape = None
if not a.size:
return
it = np.nditer(a, flags=['multi_index'])
while not it.finished:
index = tuple([i+1 for i in it.multi_index])
self.setdefault(index,it[0].item())
it.iternext()
@property
def shape(self):
#import pdb;pdb.set_trace()
if self._shape is None:
s = [0] * self.ndim
for key in self.keys():
for i,k in enumerate(key):
s[i] = max(k,s[i])
self._shape = tuple(s)
return self._shape
def todense(self):
a = np.zeros(self.shape,dtype=self.dtype)
for key,value in six.iteritems(self):
key = tuple([i-1 for i in key])
a.__setitem__(key,value)
return a
def __str__(self):
return str(self.todense())
def __repr__(self):
return repr(self.todense())
def copy(self):
#return copy.copy(self)
return self.todense()
def __setitem__(self,index,value):
if np.isscalar(value):
for key in self.iterkeys(index):
dict.__setitem__(self,key,value)
self._shape = None
else:
raise NotImplementedError
def __getslice__(self,i,j):
if j == sys.maxsize:
j = None
return self.__getitem__(slice(i,j,None))
def __getitem__(self,index):
try:
#a = [dict.__getitem__(self,key) for key in self.iterkeys(index)]
a = [self.get(key,0) for key in self.iterkeys(index)]
if len(a) == 1:
return a[0]
except ValueError:
raise IndexError # out of bound rhs indexing
#return a
#return sparsearray([a])
return np.array(a)
def iterkeys(self,index):
#import pdb; pdb.set_trace()
if not isinstance(index,tuple) and self.shape[0] == 1:
index = (1,index)
if isinstance(index, int):
key = np.unravel_index(index-1, self.shape, order='F')
yield tuple(k+1 for k in key)
elif isinstance(index,slice):
index = list(range((index.start or 1)-1,
index.stop or np.prod(self.shape),
index.step or 1))
for key in np.transpose(np.unravel_index(index, self.shape, order='F')): # 0-based
yield tuple(k+1 for k in key)
elif isinstance(index,(list,np.ndarray)):
index = np.asarray(index)-1
for key in np.transpose(np.unravel_index(index, self.shape, order='F')):
yield tuple(k+1 for k in key)
else:
assert isinstance(index,tuple),index.__class__
indices = [] # 1-based
for i,ix in enumerate(index):
if isinstance(ix,slice):
indices.append(np.arange((ix.start or 1),
(ix.stop or self.shape[i]) + 1,
ix.step or 1,
dtype=int))
else:
indices.append(np.asarray(ix))
assert len(index) == 2
indices[0].shape = (-1,1)
for key in np.broadcast(*indices):
yield tuple(map(int,key))
|
{
"content_hash": "e694137e80581924ce58dc2aa8149ad1",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 94,
"avg_line_length": 34.388888888888886,
"alnum_prop": 0.501884760366182,
"repo_name": "metamorph-inc/smop",
"id": "e527a723224f073d51a21bc31480ed5ec045ad51",
"size": "3714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smop/sparsearray.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "16951"
},
{
"name": "MATLAB",
"bytes": "84215"
},
{
"name": "Makefile",
"bytes": "1477"
},
{
"name": "Python",
"bytes": "349509"
},
{
"name": "Shell",
"bytes": "187"
}
],
"symlink_target": ""
}
|
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class SquaredDifference(function_node.FunctionNode):
"""Squared difference of input variables."""
def check_type_forward(self, in_types):
type_check.argname(in_types, ('x1', 'x2'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward(self, inputs):
self.retain_inputs((0, 1))
xp = cuda.get_array_module(*inputs)
x1, x2 = inputs
difference = x1 - x2
y = xp.square(difference)
return utils.force_array(y, dtype=x1.dtype),
def backward(self, indexes, grads):
gy, = grads
x1, x2 = self.get_retained_inputs()
difference = x1 - x2
gx = gy * 2 * difference
return gx, -gx
def squared_difference(x1, x2):
"""Squared difference of input variables.
Args:
x1 (~chainer.Variable): Input variables to be compared.
x2 (~chainer.Variable): Input variables to be compared.
Returns:
~chainer.Variable: ``(x1 - x2) ** 2`` element-wise.
"""
return SquaredDifference().apply((x1, x2))[0]
|
{
"content_hash": "52681ce802c462a893fa42b77544a6ed",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 63,
"avg_line_length": 29.681818181818183,
"alnum_prop": 0.6041347626339969,
"repo_name": "rezoo/chainer",
"id": "0f04c5c0de0ca960a187b3f8793907af9be805b7",
"size": "1306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainer/functions/math/squared_difference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "Dockerfile",
"bytes": "1238"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "4367165"
}
],
"symlink_target": ""
}
|
"""Functions for working with features in a raster dataset."""
import logging
import numpy as np
from rasterio._features import _shapes, _sieve, _rasterize, _bounds
from rasterio.dtypes import validate_dtype, can_cast_dtype, get_minimum_dtype
from rasterio.env import ensure_env
from rasterio.transform import IDENTITY, guard_transform
log = logging.getLogger(__name__)
@ensure_env
def geometry_mask(
geometries,
out_shape,
transform,
all_touched=False,
invert=False):
"""Create a mask from shapes.
By default, mask is intended for use as a
numpy mask, where pixels that overlap shapes are False.
Parameters
----------
geometries : iterable over geometries (GeoJSON-like objects)
out_shape : tuple or list
Shape of output numpy ndarray.
transform : Affine transformation object
Transformation from pixel coordinates of `image` to the
coordinate system of the input `shapes`. See the `transform`
property of dataset objects.
all_touched : boolean, optional
If True, all pixels touched by geometries will be burned in. If
false, only pixels whose center is within the polygon or that
are selected by Bresenham's line algorithm will be burned in.
invert: boolean, optional
If True, mask will be True for pixels that overlap shapes.
False by default.
Returns
-------
out : numpy ndarray of type 'bool'
Result
"""
fill, mask_value = (0, 1) if invert else (1, 0)
return rasterize(
geometries,
out_shape=out_shape,
transform=transform,
all_touched=all_touched,
fill=fill,
default_value=mask_value).astype('bool')
@ensure_env
def shapes(image, mask=None, connectivity=4, transform=IDENTITY):
"""Yield (polygon, value for each set of adjacent pixels of the same value.
Parameters
----------
image : numpy ndarray or rasterio Band object
(RasterReader, bidx namedtuple).
Data type must be one of rasterio.int16, rasterio.int32,
rasterio.uint8, rasterio.uint16, or rasterio.float32.
mask : numpy ndarray or rasterio Band object, optional
Values of False or 0 will be excluded from feature generation
Must evaluate to bool (rasterio.bool_ or rasterio.uint8)
connectivity : int, optional
Use 4 or 8 pixel connectivity for grouping pixels into features
transform : Affine transformation, optional
If not provided, feature coordinates will be generated based on pixel
coordinates
Yields
-------
tuple
A pair of (polygon, value) for each feature found in the image.
Polygons are GeoJSON-like dicts and the values are the associated value
from the image, in the data type of the image.
Note: due to floating point precision issues, values returned from a
floating point image may not exactly match the original values.
Notes
-----
The amount of memory used by this algorithm is proportional to the number
and complexity of polygons produced. This algorithm is most appropriate
for simple thematic data. Data with high pixel-to-pixel variability, such
as imagery, may produce one polygon per pixel and consume large amounts of
memory.
"""
transform = guard_transform(transform)
for s, v in _shapes(image, mask, connectivity, transform.to_gdal()):
yield s, v
@ensure_env
def sieve(image, size, out=None, mask=None, connectivity=4):
"""Replace small polygons in `image` with value of their largest neighbor.
Polygons are found for each set of neighboring pixels of the same value.
Parameters
----------
image : numpy ndarray or rasterio Band object
(RasterReader, bidx namedtuple)
Must be of type rasterio.int16, rasterio.int32, rasterio.uint8,
rasterio.uint16, or rasterio.float32
size : int
minimum polygon size (number of pixels) to retain.
out : numpy ndarray, optional
Array of same shape and data type as `image` in which to store results.
mask : numpy ndarray or rasterio Band object, optional
Values of False or 0 will be excluded from feature generation
Must evaluate to bool (rasterio.bool_ or rasterio.uint8)
connectivity : int, optional
Use 4 or 8 pixel connectivity for grouping pixels into features
Returns
-------
out : numpy ndarray
Result
Notes
-----
GDAL only supports values that can be cast to 32-bit integers for this
operation.
The amount of memory used by this algorithm is proportional to the number
and complexity of polygons found in the image. This algorithm is most
appropriate for simple thematic data. Data with high pixel-to-pixel
variability, such as imagery, may produce one polygon per pixel and consume
large amounts of memory.
"""
if out is None:
out = np.zeros(image.shape, image.dtype)
_sieve(image, size, out, mask, connectivity)
return out
@ensure_env
def rasterize(
shapes,
out_shape=None,
fill=0,
out=None,
transform=IDENTITY,
all_touched=False,
default_value=1,
dtype=None):
"""Return an image array with input geometries burned in.
Parameters
----------
shapes : iterable of (geometry, value) pairs or iterable over
geometries. `geometry` can either be an object that implements
the geo interface or GeoJSON-like object.
out_shape : tuple or list with 2 integers
Shape of output numpy ndarray.
fill : int or float, optional
Used as fill value for all areas not covered by input
geometries.
out : numpy ndarray, optional
Array of same shape and data type as `image` in which to store
results.
transform : Affine transformation object, optional
Transformation from pixel coordinates of `image` to the
coordinate system of the input `shapes`. See the `transform`
property of dataset objects.
all_touched : boolean, optional
If True, all pixels touched by geometries will be burned in. If
false, only pixels whose center is within the polygon or that
are selected by Bresenham's line algorithm will be burned in.
default_value : int or float, optional
Used as value for all geometries, if not provided in `shapes`.
dtype : rasterio or numpy data type, optional
Used as data type for results, if `out` is not provided.
Returns
-------
out : numpy ndarray
Results
Notes
-----
Valid data types for `fill`, `default_value`, `out`, `dtype` and
shape values are rasterio.int16, rasterio.int32, rasterio.uint8,
rasterio.uint16, rasterio.uint32, rasterio.float32,
rasterio.float64.
"""
valid_dtypes = (
'int16', 'int32', 'uint8', 'uint16', 'uint32', 'float32', 'float64'
)
def format_invalid_dtype(param):
return '{0} dtype must be one of: {1}'.format(
param, ', '.join(valid_dtypes)
)
def format_cast_error(param, dtype):
return '{0} cannot be cast to specified dtype: {1}'.format(param, dtype)
if fill != 0:
fill_array = np.array([fill])
if not validate_dtype(fill_array, valid_dtypes):
raise ValueError(format_invalid_dtype('fill'))
if dtype is not None and not can_cast_dtype(fill_array, dtype):
raise ValueError(format_cast_error('fill', dtype))
if default_value != 1:
default_value_array = np.array([default_value])
if not validate_dtype(default_value_array, valid_dtypes):
raise ValueError(format_invalid_dtype('default_value'))
if dtype is not None and not can_cast_dtype(default_value_array, dtype):
raise ValueError(format_cast_error('default_vaue', dtype))
if dtype is not None and np.dtype(dtype).name not in valid_dtypes:
raise ValueError(format_invalid_dtype('dtype'))
valid_shapes = []
shape_values = []
for index, item in enumerate(shapes):
if isinstance(item, (tuple, list)):
geom, value = item
else:
geom = item
value = default_value
geom = getattr(geom, '__geo_interface__', None) or geom
# not isinstance(geom, dict) or
if 'type' in geom or 'coordinates' in geom:
valid_shapes.append((geom, value))
shape_values.append(value)
else:
raise ValueError(
'Invalid geometry object at index {0}'.format(index)
)
if not valid_shapes:
raise ValueError('No valid geometry objects found for rasterize')
shape_values = np.array(shape_values)
if not validate_dtype(shape_values, valid_dtypes):
raise ValueError(format_invalid_dtype('shape values'))
if dtype is None:
dtype = get_minimum_dtype(np.append(shape_values, fill))
elif not can_cast_dtype(shape_values, dtype):
raise ValueError(format_cast_error('shape values', dtype))
if out is not None:
if np.dtype(out.dtype).name not in valid_dtypes:
raise ValueError(format_invalid_dtype('out'))
if not can_cast_dtype(shape_values, out.dtype):
raise ValueError(format_cast_error('shape values', out.dtype.name))
elif out_shape is not None:
if len(out_shape) != 2:
raise ValueError('Invalid out_shape, must be 2D')
out = np.empty(out_shape, dtype=dtype)
out.fill(fill)
else:
raise ValueError('Either an out_shape or image must be provided')
transform = guard_transform(transform)
_rasterize(valid_shapes, out, transform.to_gdal(), all_touched)
return out
def bounds(geometry, north_up=True):
"""Return a (left, bottom, right, top) bounding box.
From Fiona 1.4.8. Modified to return bbox from geometry if available.
Parameters
----------
geometry: GeoJSON-like feature, feature collection, or geometry.
Returns
-------
tuple
Bounding box: (left, bottom, right, top)
"""
if 'bbox' in geometry:
return tuple(geometry['bbox'])
geom = geometry.get('geometry') or geometry
return _bounds(geom, north_up=north_up)
|
{
"content_hash": "e3d16d44a23a48ef4f56cfbe6dad3ca3",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 80,
"avg_line_length": 33.811074918566774,
"alnum_prop": 0.6535645472061657,
"repo_name": "brendan-ward/rasterio",
"id": "4a5c672c8dac506be1da613a6bb108790e90e0a3",
"size": "10380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rasterio/features.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "34752"
},
{
"name": "Makefile",
"bytes": "539"
},
{
"name": "Python",
"bytes": "828986"
},
{
"name": "Shell",
"bytes": "2942"
}
],
"symlink_target": ""
}
|
import functools
from stravalib import model
from stravalib.client import BatchedResultsIterator
from stravalib.tests.functional import FunctionalTestBase
class ResultIteratorTest(FunctionalTestBase):
def setUp(self):
super(ResultIteratorTest, self).setUp()
self.protocol = self.client.protocol
def test_limit_call(self):
""" Test setting the limit in method call. """
result_fetcher = functools.partial(self.protocol.get, '/athlete/activities')
results = BatchedResultsIterator(entity=model.Activity, result_fetcher=result_fetcher, limit=10, per_page=2)
results = list(results)
self.assertEquals(10, len(results))
def test_multiple_iterator_calls(self):
""" Test multiple calls of the iterator. """
result_fetcher = functools.partial(self.protocol.get, '/athlete/activities')
results = BatchedResultsIterator(entity=model.Activity, result_fetcher=result_fetcher, limit=10, per_page=2)
results.limit = 10
results1 = list(results)
results2 = list(results)
self.assertEquals(10, len(results1))
self.assertEquals(len(results1), len(results2))
def test_limit_iterator(self):
""" Test setting the limit on the iterator. """
result_fetcher = functools.partial(self.protocol.get, '/athlete/activities')
results = BatchedResultsIterator(entity=model.Activity, result_fetcher=result_fetcher, limit=10, per_page=2)
results.limit = 10
results = list(results)
self.assertEquals(10, len(results))
# TODO: use a mock here to figure out how many calls are happening under the hood.
def test_empty(self):
""" Test iterating over empty results. """
# Specify two thing that we happen to know will return 0 results
def pretend_fetcher(page, per_page):
return []
ri = BatchedResultsIterator(entity=model.Shoe, result_fetcher=pretend_fetcher)
results = list(ri)
self.assertEquals(0, len(results))
|
{
"content_hash": "8edbc4d084c84189e5f3fc08fb23e095",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 116,
"avg_line_length": 39.38181818181818,
"alnum_prop": 0.644506001846722,
"repo_name": "Wisees/stravalib",
"id": "d994b37d7fb825c499086cea6e5ea1db97828911",
"size": "2166",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "stravalib/tests/functional/test_result_iterator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "176548"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from math import asin, cos, degrees, log, pi, radians, sin, sinh, sqrt
import numpy as np
import auttitude as at
from auttitude.io import dcos_line, sphere_line
# maybe add option to build grid from projecting regular
# plane grid to sphere
class SphericalGrid(object):
"""
This class represents an quasi-regular spherical grid with a given distance
between nodes.
Creates a hemi-spherical counting grid by tesselation.
Parameters:
node_spacing: Distance between nodes in degrees.
"""
def __init__(self, node_spacing=2.5):
nodes = [(0., 90.)]
spacing = radians(node_spacing)
for phi in np.arange(node_spacing, 90., node_spacing):
azimuth_spacing = degrees(2 * asin(
(sin(spacing / 2) / sin(radians(phi)))))
for theta in np.linspace(0., 360. - azimuth_spacing, int(360. // azimuth_spacing)):
nodes.append((theta + phi + node_spacing / 2, 90. - phi))
nodes.append((theta - 180 + phi + node_spacing / 2, phi - 90.))
for theta in np.arange(0., 360., node_spacing):
nodes.append(((theta + 90. + node_spacing / 2) % 360., 0.))
self.node_attitudes = nodes
self.grid = dcos_line(np.array(nodes))
@staticmethod
def optimize_k(data):
"""Optimizes the value of K from the data, using Diggle and Fisher (88)
method."""
from scipy.optimize import minimize_scalar
def obj(k): # objective function to be minimized
W = np.exp(k*(np.abs(np.dot(data, np.transpose(data)))))\
* (k/(4*pi*sinh(k+1e-9)))
np.fill_diagonal(W, 0.)
return -np.log(W.sum(axis=0)).sum()
return minimize_scalar(obj).x
def count_fisher(self, data, k=None):
"""Performs axial data counting as in Robin and Jowett (1986).
Will estimate an appropriate k if not given."""
if k is None:
k = self.optimize_k(data) # Its a better estimate than R&J 86.
return self.count(
data,
lambda nodes, data, k: np.exp(
k * (np.abs(np.dot(nodes, np.transpose(data))) - 1)),
k)
def count_kamb(self, data, theta=None):
"""Performs data counting as in Robin and Jowett (1986) based on
Kamb (1956). Will estimate an appropriate counting angle theta
if not give."""
if theta is None:
theta = (len(data) - 1.0) / (len(data) + 1.0)
else:
theta = cos(radians(theta))
return self.count(
data,
lambda nodes, data, theta: np.where(
np.abs(np.dot(nodes, np.transpose(data))) >= theta, 1, 0),
theta)
def count(self, data, function, *args, **kwargs):
"""Generic counting grid method that accepts a function which
receives the grid (or a node in the grid), the data and the additional
arguments and keyword arguments passed to this method."""
try: # Try calculating directly with numpy arrays
return function(self.grid, data, *args, **kwargs).sum(axis=1)
except (MemoryError, ValueError):
result = np.zeros(self.grid.shape[0])
for i, input_node in enumerate(self.grid):
result[i] = function(input_node, data, *args, **kwargs).sum()
return result
class CircularGrid(object):
def __init__(self, spacing=1., offset=0., **kwargs):
self.spacing = spacing
self.grid = self.build_grid(spacing, offset)
def build_grid(self, spacing, offset=0., from_=0., to_=2 * pi):
s = radians(spacing)
o = radians(offset)
theta_range = np.arange(o, 2 * pi + o, s)
theta_range = theta_range[np.logical_and(theta_range >= from_,
theta_range <= to_)]
return np.array((np.sin(theta_range), np.cos(theta_range))).T
def cdis(self, data, nodes=None, axial=False):
nodes = self.grid if nodes is None else nodes
d = np.clip(
np.dot(nodes, np.transpose(data)) / np.linalg.norm(data, axis=1),
-1, 1)
if axial:
d = np.abs(d)
return d
def count(self,
data,
aperture=None,
axial=False,
spacing=None,
offset=0,
nodes=None,
data_weight=None):
aperture = radians(aperture) / 2. if aperture is not None else radians(
self.spacing) / 2.
if nodes is None:
nodes = self.grid if spacing is None else self.build_grid(
spacing, offset)
spacing = radians(
self.spacing) / 2 if spacing is None else radians(spacing) / 2
c = cos(aperture)
n = data.shape[0]
data_weight = np.ones(n) if data_weight is None else data_weight
return np.where(self.cdis(data, nodes, axial=axial) >= c,\
data_weight, 0.).sum(axis=1)[:,None]/data_weight.sum()
def count_munro(self,
data,
weight=.9,
aperture=10.,
axial=False,
spacing=None,
offset=0,
nodes=None,
data_weight=None):
spacing = 1 if spacing is None else spacing
if nodes is None:
nodes = self.grid if spacing is None else self.build_grid(
spacing, offset)
d = self.cdis(data, nodes, axial=axial)
aperture = radians(aperture) / 2. if aperture is not None else radians(
self.spacing) / 2.
c = cos(aperture)
theta = np.arccos(d) * pi / aperture
data_weight = np.ones(
data.shape[0]) if data_weight is None else data_weight
upscale = 1. + 2. * np.power(
weight, np.arange(0., aperture, radians(spacing))).sum()
return (np.where(d >= c, data_weight, 0) * np.power(weight, theta)
).sum(axis=1)[:, None] * upscale / data_weight.sum()
class CircularStatistics(object):
# pylint: disable=too-many-instance-attributes
def __init__(self, data): # Should this really be built by default?
n = len(data)
self.resultant_vector = at.datamodels.Vector(np.sum(data, axis=0))
self.mean_resultant_vector = self.resultant_vector / n
self.mean_vector = self.resultant_vector / self.resultant_vector.length
self.resultant_length = self.resultant_vector.length
self.mean_resultant_length = self.resultant_length / n
self.resultant_vector_attitude = self.resultant_vector.attitude
self.circular_variance = 1 - self.mean_resultant_length
self.circular_standard_deviation = sqrt(
-2 * log(1 - self.circular_variance))
# self.circular_mean_direction_axial, self.circular_confidence_axial =\
# self.estimate_circular_confidence(axial=True)
# self.circular_mean_direction, self.circular_confidence =\
# self.estimate_circular_confidence(axial=False)
self.fisher_k = (n - 1) / (n - self.resultant_length)
direction_tensor = np.dot(np.transpose(data), data) / n
eigenvalues, eigenvectors = np.linalg.eigh(direction_tensor)
eigenvalues_order = (-eigenvalues).argsort()
self.eigenvalues = eigenvalues[eigenvalues_order]
self.eigenvectors = [
at.datamodels.Vector(eigenvector)
for eigenvector in eigenvectors[:, eigenvalues_order].T
]
self.eigenvectors_attitude = sphere_line(self.eigenvectors)
class SphericalStatistics(object):
# pylint: disable=too-many-instance-attributes
def __init__(self, data): # Should this really be built by default?
n = len(data)
self.resultant_vector = at.datamodels.Vector(np.sum(data, axis=0))
self.mean_resultant_vector = self.resultant_vector / n
self.mean_vector = self.resultant_vector / self.resultant_vector.length
self.resultant_length = self.resultant_vector.length
self.mean_resultant_length = self.resultant_length / n
self.resultant_vector_attitude = self.resultant_vector.attitude
self.fisher_k = (n - 1) / (n - self.resultant_length)
direction_tensor = np.dot(np.transpose(data), data) / n
eigenvalues, eigenvectors = np.linalg.eigh(direction_tensor)
eigenvalues_order = (-eigenvalues).argsort()
lambda1, lambda2, lambda3 = self.eigenvalues =\
eigenvalues[eigenvalues_order]
lambda_sum = eigenvalues.sum()
self.eigenvectors = [
at.datamodels.Vector(eigenvector)
for eigenvector in eigenvectors[:, eigenvalues_order].T
]
self.eigenvectors_attitude = sphere_line(self.eigenvectors)
# Check for divide by zero on stats?
# From Vollmer 1990
self.vollmer_P = (lambda1 - lambda2) / lambda_sum
self.vollmer_G = 2 * (lambda2 - lambda3) / lambda_sum
self.vollmer_R = 3 * lambda3 / lambda_sum
self.vollmer_classification = ("point", "girdle", "random")[np.argmax((
self.vollmer_P, self.vollmer_G, self.vollmer_R))]
self.vollmer_B = self.vollmer_P + self.vollmer_G
self.vollmer_C = log(lambda1 / lambda3)
# From Woodcock 1977
self.woodcock_Kx = log(lambda2 / lambda3)
self.woodcock_Ky = log(lambda1 / lambda2)
self.woodcock_C = log(lambda1 / lambda3)
self.woodcock_K = self.woodcock_Ky / self.woodcock_Kx
def sample_fisher(mean_vector, kappa, n):
"""Samples n vectors from von Mises-Fisher distribution."""
mean_vector = at.datamodels.Vector(mean_vector)
direction_vector = mean_vector.direction_vector
dip_vector = mean_vector.dip_vector
kappa = kappa
theta_sample = np.random.uniform(0, 2 * pi, n)
alpha_sample = np.random.vonmises(0, kappa / 2., n) # Why?
return at.datamodels.VectorSet(
((direction_vector[:, None] * np.cos(theta_sample) +
dip_vector[:, None] * np.sin(theta_sample)) * np.sin(alpha_sample) +
mean_vector[:, None] * np.cos(alpha_sample)).T)
def sample_uniform(n):
"""Sample n vectors for the uniform distribution on the sphere."""
samples = np.random.normal(size=(n, 3))
return at.datamodels.VectorSet(
samples / np.linalg.norm(samples, axis=1)[:, None])
DEFAULT_GRID = SphericalGrid(node_spacing=2.5)
|
{
"content_hash": "f795e8506ba873ce93b13f97c27b63ab",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 95,
"avg_line_length": 40.4367816091954,
"alnum_prop": 0.593992798938791,
"repo_name": "endarthur/autti",
"id": "6ae4c5c4de05eedf9e40c157337dd60f7ac76778",
"size": "10596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auttitude/stats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65939"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server import util
class TapiOamGetoamserviceInput(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, service_id=None): # noqa: E501
"""TapiOamGetoamserviceInput - a model defined in OpenAPI
:param service_id: The service_id of this TapiOamGetoamserviceInput. # noqa: E501
:type service_id: str
"""
self.openapi_types = {
'service_id': str
}
self.attribute_map = {
'service_id': 'service-id'
}
self._service_id = service_id
@classmethod
def from_dict(cls, dikt) -> 'TapiOamGetoamserviceInput':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.oam.getoamservice.Input of this TapiOamGetoamserviceInput. # noqa: E501
:rtype: TapiOamGetoamserviceInput
"""
return util.deserialize_model(dikt, cls)
@property
def service_id(self):
"""Gets the service_id of this TapiOamGetoamserviceInput.
none # noqa: E501
:return: The service_id of this TapiOamGetoamserviceInput.
:rtype: str
"""
return self._service_id
@service_id.setter
def service_id(self, service_id):
"""Sets the service_id of this TapiOamGetoamserviceInput.
none # noqa: E501
:param service_id: The service_id of this TapiOamGetoamserviceInput.
:type service_id: str
"""
self._service_id = service_id
|
{
"content_hash": "e93b3bed15ddc3a308b8e40496d52420",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 98,
"avg_line_length": 28.0625,
"alnum_prop": 0.6269487750556793,
"repo_name": "OpenNetworkingFoundation/ONFOpenTransport",
"id": "a91b822eb64aea1e53c0852193eeb17ac4379399",
"size": "1813",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "RI/flask_server/tapi_server/models/tapi_oam_getoamservice_input.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "D",
"bytes": "2562"
}
],
"symlink_target": ""
}
|
import sys
from cplex.callbacks import HeuristicCallback
from docplex.mp.callbacks.cb_mixin import *
class RoundDown(ModelCallbackMixin, HeuristicCallback):
def __init__(self, env):
HeuristicCallback.__init__(self, env)
ModelCallbackMixin.__init__(self)
@print_called('--> calling my_round_down callback... #{0}')
def __call__(self):
feas = self.get_feasibilities()
var_indices = [j for j, f in enumerate(feas) if f == self.feasibility_status.feasible]
if var_indices:
# this shows how to get back to the DOcplex variable from the index
# but is not necessary for the logic.
dvars = [self.index_to_var(v) for v in var_indices]
print('* rounded vars = [{0}]'.format(', '.join([v.name for v in dvars[:3]])))
# -- calling set-solution in cplex callback class
self.set_solution([var_indices, [0.0] * len(var_indices)])
def try_heuristic_cb_on_model(mdl):
mdl.register_callback(RoundDown)
# tweak cplex parameters
mdl.parameters.mip.tolerances.mipgap = 1.0e-6
mdl.parameters.mip.strategy.search = 0
s = mdl.solve()
assert s is not None
mdl.report()
return s
def try_heuristic_cb_on_file(filename):
from docplex.mp.model_reader import ModelReader
mdl = ModelReader.read(filename)
if mdl:
return try_heuristic_cb_on_model(mdl)
if __name__ == "__main__":
if len(sys.argv) < 2:
data_file = "data/location.lp"
expected = 499
elif len(sys.argv) == 2:
data_file = sys.argv[1]
expected = None
else:
print("Usage: heuristic_callback.py filename")
print(" filename Name of a file, with .mps, .lp, or .sav")
print(" extension, and a possible, additional .gz")
print(" extension")
sys.exit(-1)
s = try_heuristic_cb_on_file(data_file)
if expected:
assert abs(s.objective_value - expected) <= 1
|
{
"content_hash": "d81f010d21144fe15cf308ed1538804a",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 94,
"avg_line_length": 32.65573770491803,
"alnum_prop": 0.6099397590361446,
"repo_name": "IBMDecisionOptimization/docplex-examples",
"id": "f8912846a9d37cd545e3d0b38e7859257dbb27ce",
"size": "2564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/mp/callbacks/heuristic_callback.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import Movies, Directors, ScheduledMovies
admin.site.register(Movies)
admin.site.register(Directors)
admin.site.register(ScheduledMovies)
|
{
"content_hash": "d5a4d8fde8351617daadc26d25f78c74",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 54,
"avg_line_length": 30.833333333333332,
"alnum_prop": 0.8378378378378378,
"repo_name": "HenriqueLR/movie-book",
"id": "e333f46a37acabd1c495eb220881680d08d74a65",
"size": "203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/core/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1557"
},
{
"name": "HTML",
"bytes": "28219"
},
{
"name": "JavaScript",
"bytes": "348978"
},
{
"name": "Makefile",
"bytes": "271"
},
{
"name": "Python",
"bytes": "13638"
}
],
"symlink_target": ""
}
|
import unittest
from pywps import Process, Service, WPS, OWS
from pywps.app.basic import xpath_ns
from tests.common import client_for
import lxml.etree
class ExceptionsTest(unittest.TestCase):
def setUp(self):
self.client = client_for(Service(processes=[]))
def test_invalid_parameter_value(self):
resp = self.client.get('?service=wms')
exception_el = resp.xpath('/ows:ExceptionReport/ows:Exception')[0]
assert exception_el.attrib['exceptionCode'] == 'InvalidParameterValue'
assert resp.status_code == 400
assert resp.headers['Content-Type'] == 'text/xml'
def test_missing_parameter_value(self):
resp = self.client.get()
exception_el = resp.xpath('/ows:ExceptionReport/ows:Exception')[0]
assert exception_el.attrib['exceptionCode'] == 'MissingParameterValue'
assert resp.status_code == 400
assert resp.headers['Content-Type'] == 'text/xml'
def test_missing_request(self):
resp = self.client.get("?service=wps")
exception_el = resp.xpath('/ows:ExceptionReport/ows:Exception/ows:ExceptionText')[0]
# should mention something about a request
assert 'request' in exception_el.text
assert resp.headers['Content-Type'] == 'text/xml'
def test_bad_request(self):
resp = self.client.get("?service=wps&request=xyz")
exception_el = resp.xpath('/ows:ExceptionReport/ows:Exception')[0]
assert exception_el.attrib['exceptionCode'] == 'OperationNotSupported'
assert resp.headers['Content-Type'] == 'text/xml'
def load_tests(loader=None, tests=None, pattern=None):
if not loader:
loader = unittest.TestLoader()
suite_list = [
loader.loadTestsFromTestCase(ExceptionsTest),
]
return unittest.TestSuite(suite_list)
|
{
"content_hash": "effabe8eae4f7b9f5df75f6b8b83a99b",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 92,
"avg_line_length": 38.659574468085104,
"alnum_prop": 0.6741882223445239,
"repo_name": "ricardogsilva/PyWPS",
"id": "0ba46191007a259cd66ef3e00334b15076fa9546",
"size": "1817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "219754"
}
],
"symlink_target": ""
}
|
"""Useful functions for testing."""
from typing import Optional
import numpy as np
import pyspiel
def random_playout(state: pyspiel.State, seed: Optional[int] = None):
"""Plays random actions until the state is terminal."""
rng = np.random.RandomState(seed)
while not state.is_terminal():
state.apply_action(rng.choice(state.legal_actions()))
return state
|
{
"content_hash": "8fd2afc3f81d341d4be8010cf3b31f44",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 69,
"avg_line_length": 28.53846153846154,
"alnum_prop": 0.7331536388140162,
"repo_name": "deepmind/open_spiel",
"id": "64f77d9d78facb8f06086684d77c79205255442f",
"size": "966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "open_spiel/python/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6640"
},
{
"name": "C++",
"bytes": "4649139"
},
{
"name": "CMake",
"bytes": "78467"
},
{
"name": "Go",
"bytes": "18010"
},
{
"name": "Julia",
"bytes": "16727"
},
{
"name": "Jupyter Notebook",
"bytes": "148663"
},
{
"name": "Python",
"bytes": "2823600"
},
{
"name": "Rust",
"bytes": "18562"
},
{
"name": "Shell",
"bytes": "51087"
}
],
"symlink_target": ""
}
|
import argparse
import os
import os.path
import tables
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'filenames', nargs='*', type=str, help='Files to fix.')
args = parser.parse_args()
for filename in args.filenames:
with tables.open_file(filename) as fileh:
repeat = fileh.root.repeat[0]
components = filename.split('.')
components[-2] = str(repeat)
fixed_filename = ''.join(components)
if os.path.exists(fixed_filename):
sys.stderr.write(
'{} already exists.{}'.format(fixed_filename, os.linesep))
else:
os.rename(filename, fixed_filename)
|
{
"content_hash": "c57ad8e52f5c26e72cdc455a9177dccd",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 74,
"avg_line_length": 28.68,
"alnum_prop": 0.599721059972106,
"repo_name": "jgosmann/plume",
"id": "ff662a3678a14f3b31564aa7957d3928be474664",
"size": "740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plume/fix-filenumbering.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1206"
},
{
"name": "Python",
"bytes": "151360"
},
{
"name": "Shell",
"bytes": "342"
}
],
"symlink_target": ""
}
|
import math
import random
from pandac.PandaModules import Vec3
from direct.showbase import PythonUtil
from direct.directnotify import DirectNotifyGlobal
from direct.task.Task import Task
from direct.interval.FunctionInterval import Wait
from direct.interval.IntervalGlobal import Func, LerpFunc, LerpPosInterval, LerpHprInterval, LerpFunctionInterval
from direct.interval.MetaInterval import Sequence, Parallel
from direct.showbase.PythonUtil import bound as clamp
from direct.distributed.ClockDelta import globalClockDelta
from otp.otpbase import OTPGlobals
from toontown.minigame.OrthoDrive import OrthoDrive
from toontown.minigame.OrthoWalk import OrthoWalk
from toontown.toonbase import TTLocalizer
from CogdoFlyingCollisions import CogdoFlyingCollisions
from CogdoFlyingPlayer import CogdoFlyingPlayer
from CogdoFlyingGuiManager import CogdoFlyingGuiManager
from CogdoFlyingInputManager import CogdoFlyingInputManager
from CogdoFlyingCameraManager import CogdoFlyingCameraManager
from CogdoFlyingObjects import CogdoFlyingPlatform, CogdoFlyingGatherable
from CogdoFlyingLegalEagle import CogdoFlyingLegalEagle
import CogdoFlyingGameGlobals as Globals
class CogdoFlyingLocalPlayer(CogdoFlyingPlayer):
notify = DirectNotifyGlobal.directNotify.newCategory('CogdoFlyingLocalPlayer')
BroadcastPosTask = 'CogdoFlyingLocalPlayerBroadcastPos'
PlayWaitingMusicEventName = 'PlayWaitingMusicEvent'
RanOutOfTimeEventName = 'RanOutOfTimeEvent'
PropStates = PythonUtil.Enum(('Normal', 'Overdrive', 'Off'))
def __init__(self, toon, game, level, guiMgr):
CogdoFlyingPlayer.__init__(self, toon)
self.defaultTransitions = {'Inactive': ['FreeFly', 'Running'],
'FreeFly': ['Inactive',
'OutOfTime',
'Death',
'FlyingUp',
'Running',
'HitWhileFlying',
'InWhirlwind'],
'FlyingUp': ['Inactive',
'OutOfTime',
'Death',
'FreeFly',
'Running',
'HitWhileFlying',
'InWhirlwind'],
'InWhirlwind': ['Inactive',
'OutOfTime',
'Death',
'FreeFly',
'HitWhileFlying'],
'HitWhileFlying': ['Inactive',
'OutOfTime',
'Death',
'FreeFly',
'InWhirlwind'],
'Death': ['Inactive', 'OutOfTime', 'Spawn'],
'Running': ['Inactive',
'OutOfTime',
'FreeFly',
'FlyingUp',
'Refuel',
'WaitingForWin',
'HitWhileRunning'],
'HitWhileRunning': ['Inactive',
'OutOfTime',
'Death',
'Running',
'FreeFly'],
'Spawn': ['Inactive',
'OutOfTime',
'Running',
'WaitingForWin'],
'OutOfTime': ['Inactive', 'Spawn'],
'WaitingForWin': ['Inactive', 'Win'],
'Win': ['Inactive']}
self.game = game
self._level = level
self._guiMgr = guiMgr
self._inputMgr = CogdoFlyingInputManager()
self._cameraMgr = CogdoFlyingCameraManager(camera, render, self, self._level)
self.velocity = Vec3(0.0, 0.0, 0.0)
self.instantaneousVelocity = Vec3(0.0, 0.0, 0.0)
self.controlVelocity = Vec3(0.0, 0.0, 0.0)
self.fanVelocity = Vec3(0.0, 0.0, 0.0)
self.activeFans = []
self.fansStillHavingEffect = []
self.fanIndex2ToonVelocity = {}
self.legalEagleInterestRequest = {}
self.activeWhirlwind = None
self.oldPos = Vec3(0.0, 0.0, 0.0)
self.checkpointPlatform = None
self.isHeadInCeiling = False
self.isToonOnFloor = False
self.fuel = 0.0
self.score = 0
self.postSpawnState = 'Running'
self.didTimeRunOut = False
self.hasPressedCtrlYet = False
self.hasPickedUpFirstPropeller = False
self.surfacePoint = None
self.legalEagleHitting = False
self.propState = None
self.broadcastPeriod = Globals.AI.BroadcastPeriod
self.initSfx()
self.initLocalPlayerIntervals()
self.initCollisions()
self.initOrthoWalker()
self.playerNumber = -1
self.fuel = 0.0
self._guiMgr.setFuel(self.fuel)
self.setCheckpointPlatform(self._level.startPlatform)
def initSfx(self):
audioMgr = base.cogdoGameAudioMgr
self._deathSfx = audioMgr.createSfx('death')
self._hitByWhirlwindSfx = audioMgr.createSfx('toonInWhirlwind')
self._bladeBreakSfx = audioMgr.createSfx('bladeBreak')
self._collideSfx = audioMgr.createSfx('collide')
self._toonHitSfx = audioMgr.createSfx('toonHit')
self._getMemoSfx = audioMgr.createSfx('getMemo')
self._getLaffSfx = audioMgr.createSfx('getLaff')
self._getRedTapeSfx = audioMgr.createSfx('getRedTape')
self._refuelSfx = audioMgr.createSfx('refuel')
self._fanSfx = audioMgr.createSfx('fan')
self._invulDebuffSfx = audioMgr.createSfx('invulDebuff')
self._invulBuffSfx = audioMgr.createSfx('invulBuff')
self._winSfx = audioMgr.createSfx('win')
self._loseSfx = audioMgr.createSfx('lose')
self._refuelSpinSfx = audioMgr.createSfx('refuelSpin')
self._propellerSfx = audioMgr.createSfx('propeller', self.toon)
def destroySfx(self):
del self._deathSfx
del self._hitByWhirlwindSfx
del self._bladeBreakSfx
del self._collideSfx
del self._toonHitSfx
del self._propellerSfx
del self._getMemoSfx
del self._getLaffSfx
del self._refuelSfx
del self._fanSfx
del self._invulBuffSfx
del self._invulDebuffSfx
del self._getRedTapeSfx
del self._refuelSpinSfx
def setPlayerNumber(self, num):
self.playerNumber = num
def getPlayerNumber(self):
return self.playerNumber
def initOrthoWalker(self):
orthoDrive = OrthoDrive(9.778, maxFrameMove=0.5, wantSound=True)
self.orthoWalk = OrthoWalk(orthoDrive, broadcast=False, collisions=False, broadcastPeriod=Globals.AI.BroadcastPeriod)
def initLocalPlayerIntervals(self):
self.coolDownAfterHitInterval = Sequence(Wait(Globals.Gameplay.HitCooldownTime), Func(self.setEnemyHitting, False), name='coolDownAfterHitInterval-%i' % self.toon.doId)
self.deathInterval = Sequence(Func(self.resetVelocities), Parallel(Parallel(Func(self._deathSfx.play), LerpHprInterval(self.toon, 1.0, Vec3(720, 0, 0)), LerpFunctionInterval(self.toon.setScale, fromData=1.0, toData=0.1, duration=1.0), self.toon.posInterval(0.5, Vec3(0, 0, -25), other=self.toon)), Sequence(Wait(0.5), Func(base.transitions.irisOut))), Func(self.toon.stash), Wait(1.0), Func(self.toonSpawnFunc), name='%s.deathInterval' % self.__class__.__name__)
self.outOfTimeInterval = Sequence(Func(messenger.send, CogdoFlyingLocalPlayer.PlayWaitingMusicEventName), Func(self._loseSfx.play), Func(base.transitions.irisOut), Wait(1.0), Func(self.resetVelocities), Func(self._guiMgr.setMessage, '', transition=None), Func(self.toon.stash), Func(self.toonSpawnFunc), name='%s.outOfTimeInterval' % self.__class__.__name__)
self.spawnInterval = Sequence(Func(self.resetToonFunc), Func(self._cameraMgr.update, 0.0), Func(self._level.update), Func(self.toon.cnode.broadcastPosHprFull), Func(base.transitions.irisIn), Wait(0.5), Func(self.toon.setAnimState, 'TeleportIn'), Func(self.toon.unstash), Wait(1.5), Func(self.requestPostSpawnState), name='%s.spawnInterval' % self.__class__.__name__)
self.waitingForWinInterval = Sequence(Func(self._guiMgr.setMessage, TTLocalizer.CogdoFlyingGameWaiting % '.'), Wait(1.5), Func(self._guiMgr.setMessage, TTLocalizer.CogdoFlyingGameWaiting % '..'), Wait(1.5), Func(self._guiMgr.setMessage, TTLocalizer.CogdoFlyingGameWaiting % '...'), Wait(1.5), name='%s.waitingForWinInterval' % self.__class__.__name__)
self.waitingForWinSeq = Sequence(Func(self.setWaitingForWinState), Wait(4.0), Func(self.removeAllMemos), Wait(2.0), Func(self.game.distGame.d_sendRequestAction, Globals.AI.GameActions.LandOnWinPlatform, 0), Func(self.playWaitingForWinInterval), name='%s.waitingForWinSeq' % self.__class__.__name__)
self.winInterval = Sequence(Func(self._guiMgr.setMessage, ''), Wait(4.0), Func(self.game.distGame.d_sendRequestAction, Globals.AI.GameActions.WinStateFinished, 0), name='%s.winInterval' % self.__class__.__name__)
self.goSadSequence = Sequence(Wait(2.5), Func(base.transitions.irisOut, 1.5), name='%s.goSadSequence' % self.__class__.__name__)
self.introGuiSeq = Sequence(Wait(0.5), Parallel(Func(self._guiMgr.setTemporaryMessage, TTLocalizer.CogdoFlyingGameMinimapIntro, duration=5.0), Sequence(Wait(1.0), Func(self._guiMgr.presentProgressGui))), Wait(5.0), Func(self._guiMgr.setMessage, TTLocalizer.CogdoFlyingGamePickUpAPropeller), name='%s.introGuiSeq' % self.__class__.__name__)
return
def goSad(self):
self.goSadSequence.start()
def setWaitingForWinState(self):
if self.didTimeRunOut:
self.toon.b_setAnimState('Sad')
self._guiMgr.setMessage(TTLocalizer.CogdoFlyingGameOutOfTime, transition='blink')
else:
self._winSfx.play()
messenger.send(CogdoFlyingLocalPlayer.PlayWaitingMusicEventName)
self.toon.b_setAnimState('victory')
self._guiMgr.setMessage(TTLocalizer.CogdoFlyingGameYouMadeIt)
def removeAllMemos(self):
if self.didTimeRunOut:
messenger.send(CogdoFlyingLocalPlayer.RanOutOfTimeEventName)
def playWaitingForWinInterval(self):
if not self.game.distGame.isSinglePlayer():
self.waitingForWinInterval.loop()
def resetToonFunc(self):
self.resetToon(resetFuel=self.hasPickedUpFirstPropeller)
def _loopPropellerSfx(self, playRate = 1.0, volume = 1.0):
self._propellerSfx.loop(playRate=playRate, volume=1.0)
def initCollisions(self):
avatarRadius = 2.0
reach = 4.0
self.flyerCollisions = CogdoFlyingCollisions()
self.flyerCollisions.setWallBitMask(OTPGlobals.WallBitmask)
self.flyerCollisions.setFloorBitMask(OTPGlobals.FloorBitmask)
self.flyerCollisions.initializeCollisions(base.cTrav, self.toon, avatarRadius, OTPGlobals.FloorOffset, reach)
self.flyerCollisions.setCollisionsActive(0)
floorColl = CogdoFlyingPlatform.FloorCollName
ceilingColl = CogdoFlyingPlatform.CeilingCollName
self.accept('Flyer.cHeadCollSphere-enter-%s' % ceilingColl, self.__handleHeadCollisionIntoCeiling)
self.accept('Flyer.cHeadCollSphere-exit-%s' % ceilingColl, self.__handleHeadCollisionExitCeiling)
self.accept('Flyer.cFloorEventSphere-exit-%s' % floorColl, self.__handleEventCollisionExitFloor)
self.accept('Flyer.cRayNode-enter-%s' % floorColl, self.__handleRayCollisionEnterFloor)
self.accept('Flyer.cRayNode-again-%s' % floorColl, self.__handleRayCollisionAgainFloor)
def enable(self):
CogdoFlyingPlayer.enable(self)
self.toon.hideName()
def disable(self):
CogdoFlyingPlayer.disable(self)
def isLegalEagleInterestRequestSent(self, index):
if index in self.legalEagleInterestRequest:
return True
else:
return False
def setLegalEagleInterestRequest(self, index):
if index not in self.legalEagleInterestRequest:
self.legalEagleInterestRequest[index] = True
else:
CogdoFlyingLocalPlayer.notify.warning('Attempting to set an legal eagle interest request when one already exists:%s' % index)
def clearLegalEagleInterestRequest(self, index):
if index in self.legalEagleInterestRequest:
del self.legalEagleInterestRequest[index]
def setBackpackState(self, state):
if state == self.backpackState:
return
CogdoFlyingPlayer.setBackpackState(self, state)
if state in Globals.Gameplay.BackpackStates:
if state == Globals.Gameplay.BackpackStates.Normal:
messenger.send(CogdoFlyingGuiManager.ClearMessageDisplayEventName)
elif state == Globals.Gameplay.BackpackStates.Targeted:
messenger.send(CogdoFlyingGuiManager.EagleTargetingLocalPlayerEventName)
elif state == Globals.Gameplay.BackpackStates.Attacked:
messenger.send(CogdoFlyingGuiManager.EagleAttackingLocalPlayerEventName)
def requestPostSpawnState(self):
self.request(self.postSpawnState)
def toonSpawnFunc(self):
self.game.distGame.b_toonSpawn(self.toon.doId)
def __handleHeadCollisionIntoCeiling(self, collEntry):
self.isHeadInCeiling = True
self.surfacePoint = self.toon.getPos()
self._collideSfx.play()
if self.controlVelocity[2] > 0.0:
self.controlVelocity[2] = -self.controlVelocity[2] / 2.0
def __handleHeadCollisionExitCeiling(self, collEntry):
self.isHeadInCeiling = False
self.surfacePoint = None
return
def landOnPlatform(self, collEntry):
surfacePoint = collEntry.getSurfacePoint(render)
intoNodePath = collEntry.getIntoNodePath()
platform = CogdoFlyingPlatform.getFromNode(intoNodePath)
if platform is not None:
if not platform.isStartOrEndPlatform():
taskMgr.doMethodLater(0.5, self.delayedLandOnPlatform, 'delayedLandOnPlatform', extraArgs=[platform])
elif platform.isEndPlatform():
taskMgr.doMethodLater(1.0, self.delayedLandOnWinPlatform, 'delayedLandOnWinPlatform', extraArgs=[platform])
self.isToonOnFloor = True
self.controlVelocity = Vec3(0.0, 0.0, 0.0)
self.toon.setPos(render, surfacePoint)
self.toon.setHpr(0, 0, 0)
self.request('Running')
return
def __handleRayCollisionEnterFloor(self, collEntry):
fromNodePath = collEntry.getFromNodePath()
intoNodePath = collEntry.getIntoNodePath()
intoName = intoNodePath.getName()
fromName = fromNodePath.getName()
toonPos = self.toon.getPos(render)
collPos = collEntry.getSurfacePoint(render)
if toonPos.getZ() < collPos.getZ() + Globals.Gameplay.RayPlatformCollisionThreshold:
if not self.isToonOnFloor and self.state in ['FreeFly', 'FlyingUp']:
self.landOnPlatform(collEntry)
def __handleRayCollisionAgainFloor(self, collEntry):
fromNodePath = collEntry.getFromNodePath()
intoNodePath = collEntry.getIntoNodePath()
intoName = intoNodePath.getName()
fromName = fromNodePath.getName()
toonPos = self.toon.getPos(render)
collPos = collEntry.getSurfacePoint(render)
if toonPos.getZ() < collPos.getZ() + Globals.Gameplay.RayPlatformCollisionThreshold:
if not self.isToonOnFloor and self.state in ['FreeFly', 'FlyingUp']:
self.landOnPlatform(collEntry)
def __handleEventCollisionExitFloor(self, collEntry):
fromNodePath = collEntry.getFromNodePath()
intoNodePath = collEntry.getIntoNodePath()
intoName = intoNodePath.getName()
fromName = fromNodePath.getName()
if self.isToonOnFloor:
self.notify.debug('~~~Exit Floor:%s -> %s' % (intoName, fromName))
self.isToonOnFloor = False
taskMgr.remove('delayedLandOnPlatform')
taskMgr.remove('delayedLandOnWinPlatform')
if self.state not in ['FlyingUp', 'Spawn']:
self.notify.debug('Exited floor')
self.request('FreeFly')
def delayedLandOnPlatform(self, platform):
self.setCheckpointPlatform(platform)
return Task.done
def delayedLandOnWinPlatform(self, platform):
self.setCheckpointPlatform(self._level.endPlatform)
self.request('WaitingForWin')
return Task.done
def handleTimerExpired(self):
if self.state not in ['WaitingForWin', 'Win']:
self.setCheckpointPlatform(self._level.endPlatform)
self.postSpawnState = 'WaitingForWin'
self.didTimeRunOut = True
if self.state not in ['Death']:
self.request('OutOfTime')
def ready(self):
self.resetToon(resetFuel=False)
self._cameraMgr.enable()
self._cameraMgr.update()
def start(self):
CogdoFlyingPlayer.start(self)
self.toon.collisionsOff()
self.flyerCollisions.setAvatar(self.toon)
self.flyerCollisions.setCollisionsActive(1)
self._levelBounds = self._level.getBounds()
self.introGuiSeq.start()
self.request('Running')
def exit(self):
self.request('Inactive')
CogdoFlyingPlayer.exit(self)
self._cameraMgr.disable()
self.flyerCollisions.setCollisionsActive(0)
self.flyerCollisions.setAvatar(None)
taskMgr.remove('delayedLandOnFuelPlatform')
taskMgr.remove('delayedLandOnWinPlatform')
self.ignoreAll()
return
def unload(self):
self.toon.showName()
self.toon.collisionsOn()
self._destroyEventIval()
self._destroyEnemyHitIval()
CogdoFlyingPlayer.unload(self)
self._fanSfx.stop()
self.flyerCollisions.deleteCollisions()
del self.flyerCollisions
self.ignoreAll()
taskMgr.remove('delayedLandOnPlatform')
taskMgr.remove('delayedLandOnWinPlatform')
self.checkpointPlatform = None
self._cameraMgr.disable()
del self._cameraMgr
del self.game
self._inputMgr.destroy()
del self._inputMgr
self.introGuiSeq.clearToInitial()
del self.introGuiSeq
if self.goSadSequence:
self.goSadSequence.clearToInitial()
del self.goSadSequence
if self.coolDownAfterHitInterval:
self.coolDownAfterHitInterval.clearToInitial()
del self.coolDownAfterHitInterval
if self.deathInterval:
self.deathInterval.clearToInitial()
del self.deathInterval
if self.spawnInterval:
self.spawnInterval.clearToInitial()
del self.spawnInterval
if self.outOfTimeInterval:
self.outOfTimeInterval.clearToInitial()
del self.outOfTimeInterval
if self.winInterval:
self.winInterval.clearToInitial()
del self.winInterval
if self.waitingForWinInterval:
self.waitingForWinInterval.clearToInitial()
del self.waitingForWinInterval
if self.waitingForWinSeq:
self.waitingForWinSeq.clearToInitial()
del self.waitingForWinSeq
del self.activeFans[:]
del self.fansStillHavingEffect[:]
self.fanIndex2ToonVelocity.clear()
self.orthoWalk.stop()
self.orthoWalk.destroy()
del self.orthoWalk
self.destroySfx()
return
def setCheckpointPlatform(self, platform):
self.checkpointPlatform = platform
def resetVelocities(self):
self.fanVelocity = Vec3(0.0, 0.0, 0.0)
self.controlVelocity = Vec3(0.0, 0.0, 0.0)
self.velocity = Vec3(0.0, 0.0, 0.0)
def resetToon(self, resetFuel = True):
CogdoFlyingPlayer.resetToon(self)
self.resetVelocities()
del self.activeFans[:]
del self.fansStillHavingEffect[:]
self.fanIndex2ToonVelocity.clear()
self._fanSfx.stop()
spawnPos = self.checkpointPlatform.getSpawnPosForPlayer(self.getPlayerNumber(), render)
self.activeWhirlwind = None
self.toon.setPos(render, spawnPos)
self.toon.setHpr(render, 0, 0, 0)
if resetFuel:
self.resetFuel()
self.isHeadInCeiling = False
self.isToonOnFloor = True
return
def activateFlyingBroadcast(self):
self.timeSinceLastPosBroadcast = 0.0
self.lastPosBroadcast = self.toon.getPos()
self.lastHprBroadcast = self.toon.getHpr()
toon = self.toon
toon.d_clearSmoothing()
toon.sendCurrentPosition()
taskMgr.remove(self.BroadcastPosTask)
taskMgr.add(self.doBroadcast, self.BroadcastPosTask)
def shutdownFlyingBroadcast(self):
taskMgr.remove(self.BroadcastPosTask)
def doBroadcast(self, task):
dt = globalClock.getDt()
self.timeSinceLastPosBroadcast += dt
if self.timeSinceLastPosBroadcast >= self.broadcastPeriod:
self.timeSinceLastPosBroadcast = 0.0
self.toon.cnode.broadcastPosHprFull()
return Task.cont
def died(self, timestamp):
self.request('Death')
def spawn(self, timestamp):
self.request('Spawn')
def updateToonFlyingState(self, dt):
leftPressed = self._inputMgr.arrowKeys.leftPressed()
rightPressed = self._inputMgr.arrowKeys.rightPressed()
upPressed = self._inputMgr.arrowKeys.upPressed()
downPressed = self._inputMgr.arrowKeys.downPressed()
jumpPressed = self._inputMgr.arrowKeys.jumpPressed()
if not self.hasPressedCtrlYet and jumpPressed and self.isFuelLeft():
self.hasPressedCtrlYet = True
messenger.send(CogdoFlyingGuiManager.FirstPressOfCtrlEventName)
if jumpPressed and self.isFuelLeft():
if self.state == 'FreeFly' and self.isInTransition() == False:
self.notify.debug('FreeFly -> FlyingUp')
self.request('FlyingUp')
elif self.state == 'FlyingUp' and self.isInTransition() == False:
self.notify.debug('FlyingUp -> FreeFly')
self.request('FreeFly')
if leftPressed and not rightPressed:
self.toon.setH(self.toon, Globals.Gameplay.ToonTurning['turningSpeed'] * dt)
max = Globals.Gameplay.ToonTurning['maxTurningAngle']
if self.toon.getH() > max:
self.toon.setH(max)
elif rightPressed and not leftPressed:
self.toon.setH(self.toon, -1.0 * Globals.Gameplay.ToonTurning['turningSpeed'] * dt)
min = -1.0 * Globals.Gameplay.ToonTurning['maxTurningAngle']
if self.toon.getH() < min:
self.toon.setH(min)
def updateControlVelocity(self, dt):
leftPressed = self._inputMgr.arrowKeys.leftPressed()
rightPressed = self._inputMgr.arrowKeys.rightPressed()
upPressed = self._inputMgr.arrowKeys.upPressed()
downPressed = self._inputMgr.arrowKeys.downPressed()
jumpPressed = self._inputMgr.arrowKeys.jumpPressed()
if leftPressed:
self.controlVelocity[0] -= Globals.Gameplay.ToonAcceleration['turning'] * dt
if rightPressed:
self.controlVelocity[0] += Globals.Gameplay.ToonAcceleration['turning'] * dt
if upPressed:
self.controlVelocity[1] += Globals.Gameplay.ToonAcceleration['forward'] * dt
if downPressed:
self.controlVelocity[2] -= Globals.Gameplay.ToonAcceleration['activeDropDown'] * dt
self.controlVelocity[1] -= Globals.Gameplay.ToonAcceleration['activeDropBack'] * dt
if jumpPressed and self.isFuelLeft():
self.controlVelocity[2] += Globals.Gameplay.ToonAcceleration['boostUp'] * dt
minVal = -Globals.Gameplay.ToonVelMax['turning']
maxVal = Globals.Gameplay.ToonVelMax['turning']
if not leftPressed and not rightPressed or self.controlVelocity[0] > maxVal or self.controlVelocity[0] < minVal:
x = self.dampenVelocityVal(self.controlVelocity[0], 'turning', 'turning', minVal, maxVal, dt)
self.controlVelocity[0] = x
minVal = -Globals.Gameplay.ToonVelMax['backward']
maxVal = Globals.Gameplay.ToonVelMax['forward']
if not upPressed and not downPressed or self.controlVelocity[1] > maxVal or self.controlVelocity[1] < minVal:
y = self.dampenVelocityVal(self.controlVelocity[1], 'backward', 'forward', minVal, maxVal, dt)
self.controlVelocity[1] = y
if self.isFuelLeft():
minVal = -Globals.Gameplay.ToonVelMax['fall']
else:
minVal = -Globals.Gameplay.ToonVelMax['fallNoFuel']
maxVal = Globals.Gameplay.ToonVelMax['boost']
if self.controlVelocity[2] > minVal:
if (not self._inputMgr.arrowKeys.jumpPressed() or not self.isFuelLeft()) and not self.isToonOnFloor:
self.controlVelocity[2] -= Globals.Gameplay.ToonAcceleration['fall'] * dt
if self.controlVelocity[2] < 0.0 and self.isToonOnFloor:
self.controlVelocity[2] = 0.0
minVal = -Globals.Gameplay.ToonVelMax['turning']
maxVal = Globals.Gameplay.ToonVelMax['turning']
self.controlVelocity[0] = clamp(self.controlVelocity[0], minVal, maxVal)
minVal = -Globals.Gameplay.ToonVelMax['backward']
maxVal = Globals.Gameplay.ToonVelMax['forward']
self.controlVelocity[1] = clamp(self.controlVelocity[1], minVal, maxVal)
if self.isFuelLeft():
minVal = -Globals.Gameplay.ToonVelMax['fall']
else:
minVal = -Globals.Gameplay.ToonVelMax['fallNoFuel']
maxVal = Globals.Gameplay.ToonVelMax['boost']
self.controlVelocity[2] = clamp(self.controlVelocity[2], minVal, maxVal)
def updateFanVelocity(self, dt):
fanHeight = Globals.Gameplay.FanCollisionTubeHeight
min = Globals.Gameplay.FanMinPower
max = Globals.Gameplay.FanMaxPower
powerRange = max - min
for fan in self.activeFans:
blowVec = fan.getBlowDirection()
blowVec *= Globals.Gameplay.ToonAcceleration['fan'] * dt
if Globals.Gameplay.UseVariableFanPower:
distance = fan.model.getDistance(self.toon)
power = math.fabs(distance / fanHeight - 1.0) * powerRange + min
power = clamp(power, min, max)
blowVec *= power
fanVelocity = self.fanIndex2ToonVelocity[fan.index]
fanVelocity += blowVec
removeList = []
for fan in self.fansStillHavingEffect:
if fan not in self.activeFans:
blowVec = fan.getBlowDirection()
blowVec *= Globals.Gameplay.ToonDeceleration['fan'] * dt
fanVelocity = Vec3(self.fanIndex2ToonVelocity[fan.index])
lastLen = fanVelocity.length()
fanVelocity -= blowVec
if fanVelocity.length() > lastLen:
removeList.append(fan)
else:
self.fanIndex2ToonVelocity[fan.index] = fanVelocity
for fan in removeList:
self.fansStillHavingEffect.remove(fan)
del self.fanIndex2ToonVelocity[fan.index]
self.fanVelocity = Vec3(0.0, 0.0, 0.0)
for fan in self.fansStillHavingEffect:
self.fanVelocity += self.fanIndex2ToonVelocity[fan.index]
minVal = -Globals.Gameplay.ToonVelMax['fan']
maxVal = Globals.Gameplay.ToonVelMax['fan']
self.fanVelocity[0] = clamp(self.fanVelocity[0], minVal, maxVal)
self.fanVelocity[1] = clamp(self.fanVelocity[1], minVal, maxVal)
self.fanVelocity[2] = clamp(self.fanVelocity[2], minVal, maxVal)
def dampenVelocityVal(self, velocityVal, typeNeg, typePos, minVal, maxVal, dt):
if velocityVal > 0.0:
velocityVal -= Globals.Gameplay.ToonDeceleration[typePos] * dt
velocityVal = clamp(velocityVal, 0.0, maxVal)
elif velocityVal < 0.0:
velocityVal += Globals.Gameplay.ToonDeceleration[typeNeg] * dt
velocityVal = clamp(velocityVal, minVal, 0.0)
return velocityVal
def allowFuelDeath(self):
if Globals.Gameplay.DoesToonDieWithFuel:
return True
else:
return not self.isFuelLeft()
def updateToonPos(self, dt):
toonWorldY = self.toon.getY(render)
if self.hasPickedUpFirstPropeller == False:
if toonWorldY > -7.6:
self.toon.setY(-7.6)
elif toonWorldY < -35.0:
self.toon.setY(-35.0)
return
self.velocity = self.controlVelocity + self.fanVelocity
vel = self.velocity * dt
self.toon.setPos(self.toon, vel[0], vel[1], vel[2])
toonPos = self.toon.getPos()
if Globals.Dev.DisableDeath:
pass
elif toonPos[2] < 0.0 and self.state in ['FreeFly', 'FlyingUp'] and self.allowFuelDeath():
self.postSpawnState = 'Running'
self.game.distGame.b_toonDied(self.toon.doId)
if toonPos[2] > self._levelBounds[2][1]:
self.controlVelocity[2] = 0.0
self.fanVelocity[2] = 0.0
toonPos = Vec3(clamp(toonPos[0], self._levelBounds[0][0], self._levelBounds[0][1]), clamp(toonPos[1], self._levelBounds[1][0], self._levelBounds[1][1]), clamp(toonPos[2], self._levelBounds[2][0], self._levelBounds[2][1]))
if self.isHeadInCeiling and toonPos[2] > self.surfacePoint[2]:
toonPos[2] = self.surfacePoint[2]
self.toon.setPos(toonPos)
if self.toon.getY(render) < -10:
self.toon.setY(-10.0)
def printFanInfo(self, string):
if len(self.fanIndex2ToonVelocity) > 0:
self.notify.info('==AFTER %s==' % string)
self.notify.info('Fan velocity:%s' % self.fanVelocity)
if len(self.activeFans) > 0:
self.notify.info('%s' % self.activeFans)
if len(self.fanIndex2ToonVelocity) > 0:
self.notify.info('%s' % self.fanIndex2ToonVelocity)
if len(self.fansStillHavingEffect) > 0:
self.notify.info('%s' % self.fansStillHavingEffect)
def resetFuel(self):
self.setFuel(Globals.Gameplay.FuelNormalAmt)
def isFuelLeft(self):
return self.fuel > 0.0
def setFuel(self, fuel):
self.fuel = fuel
self._guiMgr.setFuel(fuel)
if self.fuel <= 0.0:
fuelState = Globals.Gameplay.FuelStates.FuelEmpty
elif self.fuel < Globals.Gameplay.FuelVeryLowAmt:
fuelState = Globals.Gameplay.FuelStates.FuelVeryLow
elif self.fuel < Globals.Gameplay.FuelLowAmt:
fuelState = Globals.Gameplay.FuelStates.FuelLow
else:
fuelState = Globals.Gameplay.FuelStates.FuelNormal
if fuelState > self.fuelState:
self.game.distGame.b_toonSetBlades(self.toon.doId, fuelState)
if fuelState < self.fuelState:
if self.state in ['FlyingUp', 'FreeFly', 'Running']:
self.game.distGame.b_toonBladeLost(self.toon.doId)
def resetBlades(self):
CogdoFlyingPlayer.resetBlades(self)
self._guiMgr.resetBlades()
def setBlades(self, fuelState):
CogdoFlyingPlayer.setBlades(self, fuelState)
self._guiMgr.setBlades(fuelState)
def bladeLost(self):
CogdoFlyingPlayer.bladeLost(self)
self._bladeBreakSfx.play(volume=0.35)
self._guiMgr.bladeLost()
def updateFuel(self, dt):
if Globals.Dev.InfiniteFuel:
self.setFuel(Globals.Gameplay.FuelNormalAmt)
elif self.state in Globals.Gameplay.DepleteFuelStates and self.fuel > 0.0:
self.setFuel(self.fuel - Globals.Gameplay.FuelBurnRate * dt)
elif self.fuel < 0.0:
self.setFuel(0.0)
def update(self, dt = 0.0):
self.instantaneousVelocity = (self.toon.getPos() - self.oldPos) / dt
self.oldPos = self.toon.getPos()
self.updateFuel(dt)
if self.isFlying():
self.updateToonFlyingState(dt)
if self.state in ['FreeFly', 'FlyingUp', 'Death']:
self.updateControlVelocity(dt)
self.updateFanVelocity(dt)
self.updateToonPos(dt)
self._cameraMgr.update(dt)
def isFlying(self):
if self.state in ['FreeFly', 'FlyingUp']:
return True
else:
return False
def pressedControlWhileRunning(self):
if self.isFuelLeft() and self.state == 'Running':
self.notify.debug('Pressed Control and have fuel')
self.request('FlyingUp')
else:
self.ignore('control')
self.ignore('lcontrol')
self.acceptOnce('control', self.pressedControlWhileRunning)
self.acceptOnce('lcontrol', self.pressedControlWhileRunning)
def setPropellerState(self, propState):
if not self.hasPickedUpFirstPropeller:
propState = CogdoFlyingLocalPlayer.PropStates.Off
if self.propState != propState:
oldState = self.propState
self.propState = propState
if self.propState == CogdoFlyingLocalPlayer.PropStates.Normal:
if not self.propellerSpinLerp.isPlaying():
self.propellerSpinLerp.loop()
self.setPropellerSpinRate(Globals.Gameplay.NormalPropSpeed)
self._guiMgr.setPropellerSpinRate(Globals.Gameplay.NormalPropSpeed)
self._loopPropellerSfx(playRate=0.7, volume=0.8)
elif self.propState == CogdoFlyingLocalPlayer.PropStates.Overdrive:
if not self.propellerSpinLerp.isPlaying():
self.propellerSpinLerp.loop()
self.setPropellerSpinRate(Globals.Gameplay.OverdrivePropSpeed)
self._guiMgr.setPropellerSpinRate(Globals.Gameplay.OverdrivePropSpeed)
self._loopPropellerSfx(playRate=1.1)
elif self.propState == CogdoFlyingLocalPlayer.PropStates.Off:
self.propellerSpinLerp.pause()
self._propellerSfx.stop()
def enterInactive(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self._inputMgr.disable()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Off)
self.shutdownFlyingBroadcast()
def filterInactive(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitInactive(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
self._inputMgr.enable()
self.activateFlyingBroadcast()
def enterSpawn(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.toon.b_setAnimState('Happy', 1.0)
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
self.spawnInterval.start()
def filterSpawn(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitSpawn(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
def enterFreeFly(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
if self.oldState in ['Running', 'HitWhileRunning']:
self.toon.jumpStart()
self.toon.setHpr(render, 0, 0, 0)
def filterFreeFly(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitFreeFly(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
def enterFlyingUp(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Overdrive)
if self.oldState in ['Running']:
self.toon.jumpStart()
self.toon.setHpr(render, 0, 0, 0)
def filterFlyingUp(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitFlyingUp(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
def enterHitWhileFlying(self, elapsedTime = 0.0):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.setEnemyHitting(True)
self._toonHitSfx.play()
self.startHitFlyingToonInterval()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
def filterHitWhileFlying(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitHitWhileFlying(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
self.enemyHitIval.clearToInitial()
self.coolDownAfterHitInterval.clearToInitial()
self.coolDownAfterHitInterval.start()
def enterInWhirlwind(self, elapsedTime = 0.0):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self._hitByWhirlwindSfx.play()
self.startHitByWhirlwindInterval()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
def filterInWhirlwind(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitInWhirlwind(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
self.eventIval.clearToInitial()
def enterHitWhileRunning(self, elapsedTime = 0.0):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.setEnemyHitting(True)
self._toonHitSfx.play()
self.toon.b_setAnimState('FallDown')
self.startHitRunningToonInterval()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
def filterHitWhileRunning(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitHitWhileRunning(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
self.enemyHitIval.clearToInitial()
self.coolDownAfterHitInterval.clearToInitial()
self.coolDownAfterHitInterval.start()
def enterRunning(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.toon.b_setAnimState('Happy', 1.0)
if self.oldState not in ['Spawn', 'HitWhileRunning', 'Inactive']:
self.toon.jumpHardLand()
self._collideSfx.play()
self.orthoWalk.start()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
self.ignore('control')
self.ignore('lcontrol')
self.acceptOnce('control', self.pressedControlWhileRunning)
self.acceptOnce('lcontrol', self.pressedControlWhileRunning)
def filterRunning(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitRunning(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
self.orthoWalk.stop()
self.ignore('control')
self.ignore('lcontrol')
def enterOutOfTime(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
if self.spawnInterval.isPlaying():
self.spawnInterval.clearToInitial()
self.ignoreAll()
self.introGuiSeq.clearToInitial()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Off)
if not Globals.Dev.NoLegalEagleAttacks:
for eagle in self.legalEaglesTargeting:
messenger.send(CogdoFlyingLegalEagle.RequestRemoveTargetEventName, [eagle.index])
taskMgr.remove('delayedLandOnPlatform')
taskMgr.remove('delayedLandOnWinPlatform')
self.outOfTimeInterval.start()
def filterOutOfTime(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitOutOfTime(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
def enterDeath(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.propellerSmoke.stop()
self.deathInterval.start()
self.toon.b_setAnimState('jumpAirborne', 1.0)
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Off)
if not Globals.Dev.NoLegalEagleAttacks:
for eagle in self.legalEaglesTargeting:
messenger.send(CogdoFlyingLegalEagle.RequestRemoveTargetEventName, [eagle.index])
def filterDeath(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitDeath(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
self.deathInterval.clearToInitial()
def enterWaitingForWin(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self.resetFuel()
self._guiMgr.hideRefuelGui()
self.waitingForWinSeq.start()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
if not Globals.Dev.NoLegalEagleAttacks:
self.game.forceClearLegalEagleInterestInToon(self.toon.doId)
def filterWaitingForWin(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitWaitingForWin(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
self.waitingForWinSeq.finish()
self.waitingForWinInterval.clearToInitial()
def enterWin(self):
CogdoFlyingLocalPlayer.notify.info("enter%s: '%s' -> '%s'" % (self.newState, self.oldState, self.newState))
self._guiMgr.stopTimer()
self.winInterval.start()
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
def filterWin(self, request, args):
if request == self.state:
return None
else:
return self.defaultFilter(request, args)
return None
def exitWin(self):
CogdoFlyingLocalPlayer.notify.debug("exit%s: '%s' -> '%s'" % (self.oldState, self.oldState, self.newState))
def _destroyEventIval(self):
if hasattr(self, 'eventIval'):
self.eventIval.clearToInitial()
del self.eventIval
def startEventIval(self, ival):
self._destroyEventIval()
self.eventIval = ival
self.eventIval.start()
def _destroyEnemyHitIval(self):
if hasattr(self, 'enemyHitIval'):
self.enemyHitIval.clearToInitial()
del self.enemyHitIval
def startEnemyHitIval(self, ival):
self._destroyEnemyHitIval()
self.enemyHitIval = ival
self.enemyHitIval.start()
def isEnemyHitting(self):
return self.legalEagleHitting
def setEnemyHitting(self, value):
self.legalEagleHitting = value
def shouldLegalEagleBeInFrame(self):
if not self.isLegalEagleTarget():
return False
else:
index = len(self.legalEaglesTargeting) - 1
eagle = self.legalEaglesTargeting[index]
return eagle.shouldBeInFrame()
def startHitRunningToonInterval(self):
dur = self.toon.getDuration('slip-backward')
self.startEnemyHitIval(Sequence(Wait(dur), Func(self.request, 'Running'), name='hitByLegalEagleIval-%i' % self.toon.doId))
def startHitFlyingToonInterval(self):
hitByEnemyPos = self.toon.getPos(render)
collVec = hitByEnemyPos - self.collPos
collVec[2] = 0.0
collVec.normalize()
collVec *= Globals.Gameplay.HitKnockbackDist
def spinPlayer(t, rand):
if rand == 0:
self.toon.setH(-(t * 720.0))
else:
self.toon.setH(t * 720.0)
direction = random.randint(0, 1)
self.startEnemyHitIval(Sequence(Parallel(LerpFunc(spinPlayer, fromData=0.0, toData=1.0, duration=Globals.Gameplay.HitKnockbackTime, blendType='easeInOut', extraArgs=[direction]), LerpPosInterval(self.toon, duration=Globals.Gameplay.HitKnockbackTime, pos=hitByEnemyPos + collVec, blendType='easeOut')), Func(self.request, 'FreeFly'), name='hitByLegalEagleIval-%i' % self.toon.doId))
def startHitByWhirlwindInterval(self):
def spinPlayer(t):
self.controlVelocity[2] = 1.0
angle = math.radians(t * (720.0 * 2 - 180))
self.toon.setPos(self.activeWhirlwind.model.getX(self.game.level.root) + math.cos(angle) * 2, self.activeWhirlwind.model.getY(self.game.level.root) + math.sin(angle) * 2, self.toon.getZ())
def movePlayerBack(t):
self.toon.setY(self.activeWhirlwind.model.getY(self.game.level.root) - t * Globals.Gameplay.WhirlwindMoveBackDist)
self.startEventIval(Sequence(Func(self._cameraMgr.freeze), Func(self.activeWhirlwind.disable), LerpFunc(spinPlayer, fromData=0.0, toData=1.0, duration=Globals.Gameplay.WhirlwindSpinTime), LerpFunc(movePlayerBack, fromData=0.0, toData=1.0, duration=Globals.Gameplay.WhirlwindMoveBackTime, blendType='easeOut'), Func(self.activeWhirlwind.enable), Func(self._cameraMgr.unfreeze), Func(self.request, 'FreeFly'), name='spinPlayerIval-%i' % self.toon.doId))
def handleEnterWhirlwind(self, whirlwind):
self.activeWhirlwind = whirlwind
self.request('InWhirlwind')
def handleEnterEnemyHit(self, enemy, collPos):
self.collPos = collPos
if self.state in ['FlyingUp', 'FreeFly']:
self.request('HitWhileFlying')
elif self.state in ['Running']:
self.request('HitWhileRunning')
def handleEnterFan(self, fan):
if fan in self.activeFans:
return
if len(self.activeFans) == 0:
self._fanSfx.loop()
self.activeFans.append(fan)
if fan.index not in self.fanIndex2ToonVelocity:
self.fanIndex2ToonVelocity[fan.index] = Vec3(0.0, 0.0, 0.0)
if fan not in self.fansStillHavingEffect:
self.fansStillHavingEffect.append(fan)
def handleExitFan(self, fan):
if fan in self.activeFans:
self.activeFans.remove(fan)
if len(self.activeFans) == 0:
self._fanSfx.stop()
def handleDebuffPowerup(self, pickupType, elapsedTime):
self._invulDebuffSfx.play()
CogdoFlyingPlayer.handleDebuffPowerup(self, pickupType, elapsedTime)
messenger.send(CogdoFlyingGuiManager.ClearMessageDisplayEventName)
def handleEnterGatherable(self, gatherable, elapsedTime):
CogdoFlyingPlayer.handleEnterGatherable(self, gatherable, elapsedTime)
if gatherable.type == Globals.Level.GatherableTypes.Memo:
self.handleEnterMemo(gatherable)
elif gatherable.type == Globals.Level.GatherableTypes.Propeller:
self.handleEnterPropeller(gatherable)
elif gatherable.type == Globals.Level.GatherableTypes.LaffPowerup:
self._getLaffSfx.play()
elif gatherable.type == Globals.Level.GatherableTypes.InvulPowerup:
self._getRedTapeSfx.play()
messenger.send(CogdoFlyingGuiManager.InvulnerableEventName)
def handleEnterMemo(self, gatherable):
self.score += 1
if self.score == 1:
self._guiMgr.presentMemoGui()
self._guiMgr.setTemporaryMessage(TTLocalizer.CogdoFlyingGameMemoIntro, 4.0)
self._guiMgr.setMemoCount(self.score)
self._getMemoSfx.play()
def handleEnterPropeller(self, gatherable):
if self.fuel < 1.0:
if not self.hasPickedUpFirstPropeller:
messenger.send(CogdoFlyingGuiManager.PickedUpFirstPropellerEventName)
self.introGuiSeq.clearToInitial()
self.hasPickedUpFirstPropeller = True
self.setPropellerState(CogdoFlyingLocalPlayer.PropStates.Normal)
self.setFuel(1.0)
self._guiMgr.update()
self._refuelSfx.play()
self._refuelSpinSfx.play(volume=0.15)
|
{
"content_hash": "6951bec5e3201f3c4d0f3c2a5b0f08ff",
"timestamp": "",
"source": "github",
"line_count": 1099,
"max_line_length": 470,
"avg_line_length": 45.06369426751592,
"alnum_prop": 0.6493891973750631,
"repo_name": "ToontownUprising/src",
"id": "17e078bbf614c635f396c7ddc4b2948d535baee3",
"size": "49525",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "toontown/cogdominium/CogdoFlyingLocalPlayer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "36"
},
{
"name": "Python",
"bytes": "16244807"
},
{
"name": "Shell",
"bytes": "11615"
}
],
"symlink_target": ""
}
|
from django.db.models import Q
from cyder.base.constants import IP_TYPE_4, IP_TYPE_6
from cyder.cydns.address_record.models import AddressRecord
from cyder.cydns.cname.models import CNAME
from cyder.cydns.mx.models import MX
from cyder.cydns.nameserver.models import Nameserver
from cyder.cydns.ptr.models import PTR
from cyder.cydns.srv.models import SRV
from cyder.cydns.txt.models import TXT
from cyder.cydns.sshfp.models import SSHFP
from cyder.cydns.view.models import View
from cyder.cydhcp.interface.static_intr.models import StaticInterface
from cyder.cydhcp.range.models import Range
from gettext import gettext as _
from cyder.core.utils import fail_mail
DEFAULT_TTL = 3600
def render_soa_only(soa, root_domain):
kwargs = {
'root_domain': root_domain.name,
'primary': soa.primary,
'contact': soa.contact,
'refresh': soa.refresh,
'retry': soa.retry,
'expire': soa.expire,
'minimum': soa.minimum,
'ttl': soa.ttl,
}
BUILD_STR = _("{root_domain}. {ttl} IN SOA {primary}. {contact}. (\n"
"\t\t{{serial}} ; Serial\n"
"\t\t{refresh} ; Refresh\n"
"\t\t{retry} ; Retry\n"
"\t\t{expire} ; Expire\n"
"\t\t{minimum} ; Minimum\n"
")\n\n".format(**kwargs))
return BUILD_STR
def render_rdtype(rdtype_set, **kwargs):
if len(rdtype_set) == 0:
return ""
rdtype_set = map(lambda obj: obj.bind_render_record(**kwargs), rdtype_set)
rdtype_set = (r.strip() for r in rdtype_set if r.strip())
if kwargs.pop('sort', True):
rdtype_set = sorted(rdtype_set, key=lambda s: s.lower())
return "\n".join(rdtype_set) + "\n"
def _render_forward_zone(default_ttl, nameserver_set, mx_set,
addressrecord_set, interface_set, cname_set, srv_set,
txt_set, sshfp_set, range_set):
BUILD_STR = ""
BUILD_STR += render_rdtype(nameserver_set)
BUILD_STR += render_rdtype(mx_set)
BUILD_STR += render_rdtype(txt_set)
BUILD_STR += render_rdtype(sshfp_set)
BUILD_STR += render_rdtype(srv_set)
BUILD_STR += render_rdtype(cname_set)
BUILD_STR += render_rdtype(interface_set, rdtype='A')
BUILD_STR += render_rdtype(addressrecord_set)
BUILD_STR += render_rdtype(range_set, sort=False)
return BUILD_STR
def render_forward_zone(view, mega_filter):
data = _render_forward_zone(
default_ttl=DEFAULT_TTL,
nameserver_set=Nameserver.objects
.filter(mega_filter)
.filter(views__name=view.name).order_by('server'),
mx_set=MX.objects
.filter(mega_filter)
.filter(views__name=view.name).order_by('server'),
addressrecord_set=AddressRecord.objects
.filter(mega_filter).filter(views__name=view.name)
.order_by('pk', 'ip_type', 'fqdn', 'ip_upper', 'ip_lower'),
interface_set=StaticInterface.objects
.filter(mega_filter, dns_enabled=True)
.filter(views__name=view.name)
.order_by('pk', 'ip_type', 'fqdn', 'ip_upper', 'ip_lower'),
cname_set=CNAME.objects
.filter(mega_filter)
.filter(views__name=view.name)
.order_by('fqdn'),
srv_set=SRV.objects
.filter(mega_filter)
.filter(views__name=view.name)
.order_by('pk', 'fqdn'),
txt_set=TXT.objects
.filter(mega_filter)
.filter(views__name=view.name)
.order_by('pk', 'fqdn'),
sshfp_set=SSHFP.objects
.filter(mega_filter)
.filter(views__name=view.name)
.order_by('pk', 'fqdn'),
range_set=Range.objects
.filter(mega_filter)
.filter(views__name=view.name)
.order_by('start_upper', 'start_lower'),
)
return data
def _render_reverse_zone(default_ttl, nameserver_set, interface_set,
ptr_set, range_set):
BUILD_STR = ''
BUILD_STR += render_rdtype(nameserver_set)
BUILD_STR += render_rdtype(ptr_set)
BUILD_STR += render_rdtype(interface_set, reverse=True, rdtype='PTR')
BUILD_STR += render_rdtype(range_set, reverse=True)
return BUILD_STR
def render_reverse_zone(view, domain_mega_filter, rdomain_mega_filter,
range_set, ip_type=IP_TYPE_4):
data = _render_reverse_zone(
default_ttl=DEFAULT_TTL,
nameserver_set=Nameserver.objects.filter(domain_mega_filter).filter(
views__name=view.name).order_by('server'),
interface_set=(
StaticInterface.objects
.filter(rdomain_mega_filter, dns_enabled=True)
.filter(views__name=view.name)
.order_by('pk', 'ip_type', 'label', 'ip_upper', 'ip_lower')),
ptr_set=PTR.objects.filter(rdomain_mega_filter).filter(
views__name=view.name).order_by('pk', 'ip_upper',
'ip_lower'),
range_set=range_set
)
return data
def build_zone_data(view, root_domain, soa, logf):
"""
This function does the heavy lifting of building a zone. It coordinates
getting all of the data out of the db into BIND format.
:param soa: The SOA corresponding to the zone being built.
:type soa: SOA
:param root_domain: The root domain of this zone.
:type root_domain: str
:returns public_file_path: The path to the zone file in the STAGEING
dir
:type public_file_path: str
:returns public_data: The data that should be written to
public_file_path
:type public_data: str
:returns view_zone_file: The path to the zone file in the STAGEING dir
:type view_zone_file: str
:param view_data: The data that should be written to view_zone_file
:type view_data: str
"""
ztype = 'reverse' if root_domain.is_reverse else 'forward'
if (soa.has_record_set(view=view, exclude_ns=True) and
not root_domain.nameserver_set.filter(views=view).exists()):
msg = ("The {0} zone has at least one record in the {1} view, but "
"there are no nameservers in that view. A zone file for {1} "
"won't be built. Use the search string 'zone=:{0} view=:{1}' "
"to find the troublesome record(s)"
.format(root_domain, view.name))
fail_mail(msg, subject="Record(s) without NS records can't be built")
logf(msg)
return ''
domains = soa.domain_set.all().order_by('name')
# Build the mega filter!
domain_mega_filter = Q(domain=root_domain)
for domain in domains:
domain_mega_filter = domain_mega_filter | Q(domain=domain)
rdomain_mega_filter = Q(reverse_domain=root_domain)
for reverse_domain in domains:
rdomain_mega_filter = rdomain_mega_filter | Q(
reverse_domain=reverse_domain)
soa_data = render_soa_only(soa=soa, root_domain=root_domain)
if root_domain.ip_type == '4':
range_set = (root_domain.get_related_ranges()
.filter(views__name=view.name)
.order_by('start_upper', 'start_lower'))
else:
range_set = []
try:
if ztype == "forward":
view_data = render_forward_zone(view, domain_mega_filter)
else:
ip_type = (IP_TYPE_6 if root_domain.name.endswith('ip6.arpa')
else IP_TYPE_4)
view_data = render_reverse_zone(
view, domain_mega_filter, rdomain_mega_filter, ip_type=ip_type,
range_set=range_set)
except View.DoesNotExist:
view_data = ""
if view_data:
view_data = soa_data + view_data
return view_data
|
{
"content_hash": "6f122baa397ada3a7d26be4d8dbdeaf1",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 79,
"avg_line_length": 34.90582959641256,
"alnum_prop": 0.5996916752312436,
"repo_name": "drkitty/cyder",
"id": "7395b994095aaa4644df73512c6956757da1399e",
"size": "7784",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cyder/cydns/cybind/zone_builder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "22868"
},
{
"name": "HTML",
"bytes": "54595"
},
{
"name": "JavaScript",
"bytes": "214688"
},
{
"name": "Makefile",
"bytes": "2375"
},
{
"name": "Puppet",
"bytes": "6422"
},
{
"name": "Python",
"bytes": "1955522"
},
{
"name": "Shell",
"bytes": "9416"
}
],
"symlink_target": ""
}
|
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class QoRTsInstaller(ClusterSetup):
def run(self, nodes, master, user, user_shell, volumes):
for node in nodes:
log.info("Installing QoRTs 0.3.17 on %s" % (node.alias))
node.ssh.execute('wget -c -P /opt/software/qorts https://github.com/hartleys/QoRTs/releases/download/v0.3.17/QoRTs_0.3.17.zip')
node.ssh.execute('cd /opt/software/qorts && unzip QoRTs_0.3.17.zip')
node.ssh.execute('chmod +x /opt/software/qorts/QoRTs_0.3.17/QoRTs.jar')
node.ssh.execute('mkdir -p /usr/local/Modules/applications/qorts/;touch /usr/local/Modules/applications/qorts/0.3.17')
node.ssh.execute('echo "#%Module" >> /usr/local/Modules/applications/qorts/0.3.17')
node.ssh.execute('echo "set root /opt/software/qorts/QoRTs_0.3.17" >> /usr/local/Modules/applications/qorts/0.3.17')
node.ssh.execute('echo -e "prepend-path\tPATH\t\$root" >> /usr/local/Modules/applications/qorts/0.3.17')
|
{
"content_hash": "b26b123820a17d6ef6546413b124ad76",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 130,
"avg_line_length": 61.5,
"alnum_prop": 0.7235772357723578,
"repo_name": "meissnert/StarCluster-Plugins",
"id": "e23caba8bd46bdbcedb9bf4c8409e9187fb19ca8",
"size": "984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qorts_0_3_17.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "152051"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import argparse
import getpass
import json
import os
import yaml
from elasticsearch.client import Elasticsearch
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', help='Elasticsearch host')
parser.add_argument('--port', type=int, help='Elasticsearch port')
parser.add_argument('--url-prefix', help='Elasticsearch URL prefix')
parser.add_argument('--no-auth', action='store_const', const=True, help='Suppress prompt for basic auth')
parser.add_argument('--ssl', action='store_const', const=True, help='Use SSL')
parser.add_argument('--no-ssl', action='store_const', const=True, help='Do not use SSL')
parser.add_argument('--index', help='Index name to create')
parser.add_argument('--old-index', help='Old index name to copy')
args = parser.parse_args()
if os.path.isfile('../config.yaml'):
filename = '../config.yaml'
elif os.path.isfile('config.yaml'):
filename = 'config.yaml'
else:
filename = ''
username = None
password = None
use_ssl = None
url_prefix = None
http_auth = None
if filename:
with open(filename) as config_file:
data = yaml.load(config_file)
host = data.get('es_host')
port = data.get('es_port')
username = data.get('es_username')
password = data.get('es_password')
url_prefix = data.get('es_url_prefix', '')
use_ssl = data.get('use_ssl')
else:
host = args.host if args.host else raw_input('Enter elasticsearch host: ')
port = args.port if args.port else int(raw_input('Enter elasticsearch port: '))
use_ssl = (args.ssl if args.ssl is not None
else args.no_ssl if args.no_ssl is not None
else raw_input('Use SSL? t/f: ').lower() in ('t', 'true'))
if args.no_auth is None:
username = raw_input('Enter optional basic-auth username: ')
password = getpass.getpass('Enter optional basic-auth password: ')
url_prefix = (args.url_prefix if args.url_prefix is not None
else raw_input('Enter optional Elasticsearch URL prefix: '))
if username and password:
http_auth = username + ':' + password
es = Elasticsearch(host=host, port=port, use_ssl=use_ssl, http_auth=http_auth, url_prefix=url_prefix)
silence_mapping = {'silence': {'properties': {'rule_name': {'index': 'not_analyzed', 'type': 'string'},
'until': {'type': 'date', 'format': 'dateOptionalTime'}}}}
ess_mapping = {'elastalert_status': {'properties': {'rule_name': {'index': 'not_analyzed', 'type': 'string'},
'@timestamp': {'format': 'dateOptionalTime', 'type': 'date'}}}}
es_mapping = {'elastalert': {'properties': {'rule_name': {'index': 'not_analyzed', 'type': 'string'},
'match_body': {'enabled': False, 'type': 'object'},
'aggregate_id': {'index': 'not_analyzed', 'type': 'string'}}}}
error_mapping = {'elastalert_error': {'properties': {'data': {'type': 'object', 'enabled': False}}}}
index = args.index if args.index is not None else raw_input('New index name? (Default elastalert_status) ')
if not index:
index = 'elastalert_status'
old_index = (args.old_index if args.old_index is not None
else raw_input('Name of existing index to copy? (Default None) '))
res = None
if old_index:
print('Downloading existing data...')
res = es.search(index=old_index, body={}, size=500000)
print('Got %s documents' % (len(res['hits']['hits'])))
es.indices.create(index)
es.indices.put_mapping(index=index, doc_type='elastalert', body=es_mapping)
es.indices.put_mapping(index=index, doc_type='elastalert_status', body=ess_mapping)
es.indices.put_mapping(index=index, doc_type='silence', body=silence_mapping)
es.indices.put_mapping(index=index, doc_type='elastalert_error', body=error_mapping)
print('New index %s created' % (index))
if res:
bulk = ''.join(['%s\n%s\n' % (json.dumps({'create': {'_type': doc['_type'], '_index': index}}),
json.dumps(doc['_source'])) for doc in res['hits']['hits']])
print('Uploading data...')
es.bulk(body=bulk, index=index)
print('Done!')
if __name__ == '__main__':
main()
|
{
"content_hash": "f9b9e1e43816616975e3339c51424d54",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 119,
"avg_line_length": 45.02970297029703,
"alnum_prop": 0.5883905013192612,
"repo_name": "thomdixon/elastalert",
"id": "eeba7f1c165fdc45f56edbe99c4a5e99f515e373",
"size": "4594",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "elastalert/create_index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "365"
},
{
"name": "Python",
"bytes": "225195"
}
],
"symlink_target": ""
}
|
from Crypto.PublicKey import RSA
from Crypto import Random
def gen_key(nbits=1024):
random_generator = Random.new().read
key = RSA.generate(nbits, random_generator)
return key
def check_key(key):
print key.can_encrypt()
print key.can_sign()
print key.has_private()
def get_pubk(key):
return key.publickey()
def encrypt(public_key, text, random):
return public_key.encrypt(text, random)
def decrypt(data):
return key.decrypt(enc_data)
if __name__ == '__main__':
key = gen_key()
check_key(key)
public_key = get_pubk(key)
text = 'abcdefgh'
enc_data = encrypt(public_key, text, random=32)
print enc_data
dec_data = decrypt(enc_data)
print dec_data
|
{
"content_hash": "fe650828f2a110055adc225da83d93c8",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 51,
"avg_line_length": 17.75609756097561,
"alnum_prop": 0.6510989010989011,
"repo_name": "ksmaheshkumar/My-Gray-Hacker-Resources",
"id": "55055f369c710760e23417347e2660cdbc48886f",
"size": "752",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Cryptography/Public_Key/RSA.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "42918"
},
{
"name": "C",
"bytes": "1779951"
},
{
"name": "C++",
"bytes": "1387885"
},
{
"name": "Go",
"bytes": "472"
},
{
"name": "HTML",
"bytes": "267097"
},
{
"name": "Java",
"bytes": "15090"
},
{
"name": "JavaScript",
"bytes": "25669"
},
{
"name": "Lua",
"bytes": "4863"
},
{
"name": "Makefile",
"bytes": "409888"
},
{
"name": "PHP",
"bytes": "10941"
},
{
"name": "Perl",
"bytes": "22496"
},
{
"name": "Python",
"bytes": "505447"
},
{
"name": "Ruby",
"bytes": "2338"
},
{
"name": "Shell",
"bytes": "7846"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.