text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
NOTE: concatenation seems very slow
"""
import treeano
import treeano.nodes as tn
@treeano.register_node("partition_axis")
class PartitionAxisNode(treeano.NodeImpl):
"""
node that returns a fraction of the input tensor
rough explanation:
x.shape == (4, 8, 12, 16, 20)
y = partition_axis(x, split_idx=2, num_splits=4, channel_axis=3)
=>
y == x[:, :, :, 8:12, :]
"""
hyperparameter_names = ("split_idx",
"num_splits",
"channel_axis")
def compute_output(self, network, in_vw):
# FIXME make default in terms of batch axis
channel_axis = network.find_hyperparameter(["channel_axis"], 1)
split_idx = network.find_hyperparameter(["split_idx"])
num_splits = network.find_hyperparameter(["num_splits"])
var = in_vw.variable
shape = in_vw.shape
num_channels = shape[channel_axis]
start_idx = (num_channels * split_idx) // num_splits
end_idx = num_channels * (split_idx + 1) // num_splits
new_shape = list(shape)
new_shape[channel_axis] = end_idx - start_idx
new_shape = tuple(new_shape)
idx = tuple([slice(None) for _ in range(channel_axis)]
+ [slice(start_idx, end_idx)])
network.create_vw(
"default",
variable=var[idx],
shape=new_shape,
tags={"output"},
)
def MultiPool2DNode(name, **kwargs):
# TODO tests
# TODO make a node that verifies hyperparameters
return tn.HyperparameterNode(
name,
tn.ConcatenateNode(
name + "_concat",
[tn.SequentialNode(name + "_seq0",
[PartitionAxisNode(name + "_part0",
split_idx=0,
num_splits=2),
tn.MaxPool2DNode(name + "_max",
ignore_border=True)]),
tn.SequentialNode(name + "_seq1",
[PartitionAxisNode(name + "_part1",
split_idx=1,
num_splits=2),
tn.MeanPool2DNode(name + "_mean")])]),
**kwargs)
|
{
"content_hash": "66850e06417a87a6a054543099a031a1",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 71,
"avg_line_length": 33.394366197183096,
"alnum_prop": 0.48629270350063264,
"repo_name": "nsauder/treeano",
"id": "9f712635743c3695f75c0e47abb0155db4205593",
"size": "2371",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "treeano/sandbox/nodes/partition_axis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1121"
},
{
"name": "JavaScript",
"bytes": "16041"
},
{
"name": "Python",
"bytes": "628343"
}
],
"symlink_target": ""
}
|
from traits.api import List
import math
from pychron.hardware.polyinomial_mapper import BaseMapper
from pychron.hardware.core.core_device import CoreDevice
from pychron.hardware.gauges.base_controller import BaseGaugeController
from pychron.hardware.labjack.base_u3_lv import BaseU3LV
PARAMS = {
"mbar": (6.8, 11.33),
"ubar": (5.0, 8.333),
"torr": (6.875, 11.46),
"mtorr": (5.075, 8.458),
"micron": (5.075, 8.458),
"Pa": (5.6, 9.333),
"kPa": (7.4, 12.33),
}
class PressureMapper(BaseMapper):
def map_measured(self, v):
c, d = PARAMS[self.units]
return 10 ** (1.667 * v - d)
def map_output(self, v):
c, d = PARAMS[self.units]
return c + 0.6 * math.log10(v)
class U3GaugeController(BaseU3LV, BaseGaugeController, CoreDevice):
poly_mappers = List
def load_additional_args(self, config):
BaseU3LV.load_additional_args(self, config)
self.poly_mappers.append(self.mapper_factory(config, "Conversion1"))
self.poly_mappers.append(self.mapper_factory(config, "Conversion2"))
self._load_gauges(config)
return True
def mapper_factory(self, config, section):
mapper = PressureMapper()
units = self.config_get(config, section, "units")
mapper.units = units
return mapper
def _read_pressure(self, gauge, *args, **kw):
idx = self.gauges.index(gauge)
v = self.read_adc_channel(idx)
return self.poly_mappers[idx].map_measured(v)
# ============= EOF =============================================
|
{
"content_hash": "7d1b4ba1e9d586083978650d1bdd8d54",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 76,
"avg_line_length": 28.6,
"alnum_prop": 0.6179275270184361,
"repo_name": "NMGRL/pychron",
"id": "299e3314769416a13512b372e5f7b1d503dcdb98",
"size": "2301",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/hardware/labjack/u3_gauge_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
}
|
import nltk
from nltk import PerceptronTagger
class PosTagger:
def __init__(self, sentence):
"""
Args:
sentence:
"""
self.sentence = sentence
self.tagger = PosTagger.get_tagger()
def pos_tag(self):
"""
Returns:
"""
tokens = nltk.word_tokenize(self.sentence)
pos_tagged_tokens = self.tagger.tag(tokens)
return pos_tagged_tokens
@staticmethod
def get_tagger():
"""
Returns:
"""
return PerceptronTagger()
|
{
"content_hash": "419fb170f9343bfb44c3148f1d4b5096",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 51,
"avg_line_length": 17.46875,
"alnum_prop": 0.5277280858676208,
"repo_name": "yardstick17/extract_phrase",
"id": "fc6e5f512229e0a4a8b4fd59b6100c136a9b0c1e",
"size": "606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nlp/pos_tagger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20045"
},
{
"name": "Shell",
"bytes": "147"
}
],
"symlink_target": ""
}
|
from imp import load_source
from getpass import getpass
from base64 import b64encode, b64decode
import logging
import os, sys
import ConfigParser
CONFIG = 'config/config.ini'
for v in sys.argv:
if 'config' in v:
CONFIG = v.split('=')[1:]
config_parser = ConfigParser.RawConfigParser()
config_parser.read(CONFIG)
defaults = {}
for k,v in config_parser.items('DEFAULT'):
defaults[k.upper()] = v
HOST_IP = defaults['RACKHD_HOST']
HOST_PORT = defaults['RACKHD_PORT']
HOST_PORT_AUTH = defaults['RACKHD_PORT_AUTH']
HTTPD_PORT = defaults['RACKHD_HTTPD_PORT']
CRED_FILE = '.passwd'
# Global logger setup: CRITICAL < ERROR < WARNING < INFO < DEBUG
LOGFORMAT = '%(asctime)s:%(name)s:%(levelname)s - %(message)s'
LOGLEVELS = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG
}
LOGGER_LVL = defaults['RACKHD_TEST_LOGLVL']
logging.basicConfig(level=LOGLEVELS[LOGGER_LVL], format=LOGFORMAT)
# Obfuscate credentials
def set_b64_cred(cred):
out = ''
with open(CRED_FILE,'a+') as file:
for (k, v) in cred.items():
new_v = b64encode(v)
out += '{0}="{1}"\n'.format(k, new_v)
file.write(out)
# Unobfuscate credentials
def get_b64_cred(req):
creds = load_source('creds',CRED_FILE)
rsp = []
for key in req:
rsp.append(b64decode(getattr(creds, key)))
return rsp
def get_cred(service):
if service == 'bmc':
return get_b64_cred(["BMC_USER", "BMC_PASS"])
elif service == 'redfish':
return get_b64_cred(["REDFISH_USER", "REDFISH_PASS"])
else:
return None
def get_bmc_cred():
return get_cred('bmc')
# Initial cred file to log bmc password information if it doesn't exist
if os.path.isfile(CRED_FILE) is False:
bmc_user = raw_input('BMC username: ')
bmc_pass = getpass('BMC password: ')
redfish_user = raw_input('Redfish username: ')
redfish_pass = getpass('Redfish password: ')
creds = {
"BMC_USER":bmc_user,
"BMC_PASS":bmc_pass,
"REDFISH_USER":redfish_user,
"REDFISH_PASS":redfish_pass
}
set_b64_cred(creds)
|
{
"content_hash": "37e627ee4b513ebd630186692db1c6ea",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 71,
"avg_line_length": 27.658227848101266,
"alnum_prop": 0.6411899313501144,
"repo_name": "jlongever/RackHD",
"id": "e4d31907b0eae50f11dc7906ad7037f510b7eb12",
"size": "2185",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/config/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "696"
},
{
"name": "Python",
"bytes": "282018"
},
{
"name": "Ruby",
"bytes": "5638"
},
{
"name": "Shell",
"bytes": "24821"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
import py2exe
setup(console=['vs_tool_agent.py'])
|
{
"content_hash": "a8a20ba791c5c707d6ae8c7c76a8ef6f",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 37,
"avg_line_length": 22.75,
"alnum_prop": 0.7142857142857143,
"repo_name": "freedot/uts",
"id": "c10b3ae1d3fee1a88743b4a7ff1b6cc7b5b4cf65",
"size": "91",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uts_tool/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "851"
},
{
"name": "C",
"bytes": "20036200"
},
{
"name": "C#",
"bytes": "64135"
},
{
"name": "C++",
"bytes": "530867"
},
{
"name": "CSS",
"bytes": "17578"
},
{
"name": "CoffeeScript",
"bytes": "2058"
},
{
"name": "HTML",
"bytes": "7698"
},
{
"name": "JavaScript",
"bytes": "317509"
},
{
"name": "Makefile",
"bytes": "7576"
},
{
"name": "Objective-C",
"bytes": "1924"
},
{
"name": "Python",
"bytes": "148757"
},
{
"name": "TypeScript",
"bytes": "32108"
}
],
"symlink_target": ""
}
|
# Copyright (C) Vladimir Prus 2002. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
import hashlib
from b2.util.utility import *
import property, feature
import b2.build.feature
from b2.exceptions import *
from b2.build.property import get_abbreviated_paths
from b2.util.sequence import unique
from b2.util.set import difference
from b2.util import cached, abbreviate_dashed
from b2.manager import get_manager
def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __cache
# A cache of property sets
# TODO: use a map of weak refs?
__cache = {}
reset ()
def create (raw_properties = []):
""" Creates a new 'PropertySet' instance for the given raw properties,
or returns an already existing one.
"""
# FIXME: propagate to callers.
if len(raw_properties) > 0 and isinstance(raw_properties[0], property.Property):
x = raw_properties
else:
x = [property.create_from_string(ps) for ps in raw_properties]
x.sort()
x = unique(x, stable=True)
# FIXME: can we do better, e.g. by directly computing
# hash value of the list?
key = tuple(x)
if not __cache.has_key (key):
__cache [key] = PropertySet(x)
return __cache [key]
def create_with_validation (raw_properties):
""" Creates new 'PropertySet' instances after checking
that all properties are valid and converting implicit
properties into gristed form.
"""
properties = [property.create_from_string(s) for s in raw_properties]
property.validate(properties)
return create(properties)
def empty ():
""" Returns PropertySet with empty set of properties.
"""
return create ()
def create_from_user_input(raw_properties, jamfile_module, location):
"""Creates a property-set from the input given by the user, in the
context of 'jamfile-module' at 'location'"""
properties = property.create_from_strings(raw_properties, True)
properties = property.translate_paths(properties, location)
properties = property.translate_indirect(properties, jamfile_module)
project_id = get_manager().projects().attributeDefault(jamfile_module, 'id', None)
if not project_id:
project_id = os.path.abspath(location)
properties = property.translate_dependencies(properties, project_id, location)
properties = property.expand_subfeatures_in_conditions(properties)
return create(properties)
def refine_from_user_input(parent_requirements, specification, jamfile_module,
location):
"""Refines requirements with requirements provided by the user.
Specially handles "-<property>value" syntax in specification
to remove given requirements.
- parent-requirements -- property-set object with requirements
to refine
- specification -- string list of requirements provided by the use
- project-module -- the module to which context indirect features
will be bound.
- location -- the path to which path features are relative."""
if not specification:
return parent_requirements
add_requirements = []
remove_requirements = []
for r in specification:
if r[0] == '-':
remove_requirements.append(r[1:])
else:
add_requirements.append(r)
if remove_requirements:
# Need to create property set, so that path features
# and indirect features are translated just like they
# are in project requirements.
ps = create_from_user_input(remove_requirements,
jamfile_module, location)
parent_requirements = create(difference(parent_requirements.all(),
ps.all()))
specification = add_requirements
requirements = create_from_user_input(specification,
jamfile_module, location)
return parent_requirements.refine(requirements)
class PropertySet:
""" Class for storing a set of properties.
- there's 1<->1 correspondence between identity and value. No
two instances of the class are equal. To maintain this property,
the 'PropertySet.create' rule should be used to create new instances.
Instances are immutable.
- each property is classified with regard to it's effect on build
results. Incidental properties have no effect on build results, from
Boost.Build point of view. Others are either free, or non-free, which we
call 'base'. Each property belong to exactly one of those categories and
it's possible to get list of properties in each category.
In addition, it's possible to get list of properties with specific
attribute.
- several operations, like and refine and as_path are provided. They all use
caching whenever possible.
"""
def __init__ (self, properties = []):
raw_properties = []
for p in properties:
raw_properties.append(p.to_raw())
self.all_ = properties
self.all_raw_ = raw_properties
self.all_set_ = set(properties)
self.incidental_ = []
self.free_ = []
self.base_ = []
self.dependency_ = []
self.non_dependency_ = []
self.conditional_ = []
self.non_conditional_ = []
self.propagated_ = []
self.link_incompatible = []
# A cache of refined properties.
self.refined_ = {}
# A cache of property sets created by adding properties to this one.
self.added_ = {}
# Cache for the default properties.
self.defaults_ = None
# Cache for the expanded properties.
self.expanded_ = None
# Cache for the expanded composite properties
self.composites_ = None
# Cache for property set with expanded subfeatures
self.subfeatures_ = None
# Cache for the property set containing propagated properties.
self.propagated_ps_ = None
# A map of features to its values.
self.feature_map_ = None
# A tuple (target path, is relative to build directory)
self.target_path_ = None
self.as_path_ = None
# A cache for already evaluated sets.
self.evaluated_ = {}
for p in raw_properties:
if not get_grist (p):
raise BaseException ("Invalid property: '%s'" % p)
att = feature.attributes (get_grist (p))
if 'propagated' in att:
self.propagated_.append (p)
if 'link_incompatible' in att:
self.link_incompatible.append (p)
for p in properties:
# A feature can be both incidental and free,
# in which case we add it to incidental.
if p.feature().incidental():
self.incidental_.append(p)
elif p.feature().free():
self.free_.append(p)
else:
self.base_.append(p)
if p.condition():
self.conditional_.append(p)
else:
self.non_conditional_.append(p)
if p.feature().dependency():
self.dependency_.append (p)
else:
self.non_dependency_.append (p)
def all(self):
return self.all_
def raw (self):
""" Returns the list of stored properties.
"""
return self.all_raw_
def __str__(self):
return ' '.join(str(p) for p in self.all_)
def base (self):
""" Returns properties that are neither incidental nor free.
"""
return self.base_
def free (self):
""" Returns free properties which are not dependency properties.
"""
return self.free_
def non_free(self):
return self.base_ + self.incidental_
def dependency (self):
""" Returns dependency properties.
"""
return self.dependency_
def non_dependency (self):
""" Returns properties that are not dependencies.
"""
return self.non_dependency_
def conditional (self):
""" Returns conditional properties.
"""
return self.conditional_
def non_conditional (self):
""" Returns properties that are not conditional.
"""
return self.non_conditional_
def incidental (self):
""" Returns incidental properties.
"""
return self.incidental_
def refine (self, requirements):
""" Refines this set's properties using the requirements passed as an argument.
"""
assert isinstance(requirements, PropertySet)
if not self.refined_.has_key (requirements):
r = property.refine(self.all_, requirements.all_)
self.refined_[requirements] = create(r)
return self.refined_[requirements]
def expand (self):
if not self.expanded_:
expanded = feature.expand(self.all_)
self.expanded_ = create(expanded)
return self.expanded_
def expand_subfeatures(self):
if not self.subfeatures_:
self.subfeatures_ = create(feature.expand_subfeatures(self.all_))
return self.subfeatures_
def evaluate_conditionals(self, context=None):
if not context:
context = self
if not self.evaluated_.has_key(context):
# FIXME: figure why the call messes up first parameter
self.evaluated_[context] = create(
property.evaluate_conditionals_in_context(self.all(), context))
return self.evaluated_[context]
def propagated (self):
if not self.propagated_ps_:
self.propagated_ps_ = create (self.propagated_)
return self.propagated_ps_
def add_defaults (self):
# FIXME: this caching is invalidated when new features
# are declare inside non-root Jamfiles.
if not self.defaults_:
expanded = feature.add_defaults(self.all_)
self.defaults_ = create(expanded)
return self.defaults_
def as_path (self):
if not self.as_path_:
def path_order (p1, p2):
i1 = p1.feature().implicit()
i2 = p2.feature().implicit()
if i1 != i2:
return i2 - i1
else:
return cmp(p1.feature().name(), p2.feature().name())
# trim redundancy
properties = feature.minimize(self.base_)
# sort according to path_order
properties.sort (path_order)
components = []
for p in properties:
if p.feature().implicit():
components.append(p.value())
else:
value = p.feature().name() + "-" + p.value()
if property.get_abbreviated_paths():
value = abbreviate_dashed(value)
components.append(value)
self.as_path_ = '/'.join (components)
return self.as_path_
def target_path (self):
""" Computes the target path that should be used for
target with these properties.
Returns a tuple of
- the computed path
- if the path is relative to build directory, a value of
'true'.
"""
if not self.target_path_:
# The <location> feature can be used to explicitly
# change the location of generated targets
l = self.get ('<location>')
if l:
computed = l[0]
is_relative = False
else:
p = self.as_path()
if hash_maybe:
p = hash_maybe(p)
# Really, an ugly hack. Boost regression test system requires
# specific target paths, and it seems that changing it to handle
# other directory layout is really hard. For that reason,
# we teach V2 to do the things regression system requires.
# The value o '<location-prefix>' is predended to the path.
prefix = self.get ('<location-prefix>')
if prefix:
if len (prefix) > 1:
raise AlreadyDefined ("Two <location-prefix> properties specified: '%s'" % prefix)
computed = os.path.join(prefix[0], p)
else:
computed = p
if not computed:
computed = "."
is_relative = True
self.target_path_ = (computed, is_relative)
return self.target_path_
def add (self, ps):
""" Creates a new property set containing the properties in this one,
plus the ones of the property set passed as argument.
"""
if not self.added_.has_key(ps):
self.added_[ps] = create(self.all_ + ps.all())
return self.added_[ps]
def add_raw (self, properties):
""" Creates a new property set containing the properties in this one,
plus the ones passed as argument.
"""
return self.add (create (properties))
def get (self, feature):
""" Returns all values of 'feature'.
"""
if type(feature) == type([]):
feature = feature[0]
if not isinstance(feature, b2.build.feature.Feature):
feature = b2.build.feature.get(feature)
if not self.feature_map_:
self.feature_map_ = {}
for v in self.all_:
if not self.feature_map_.has_key(v.feature()):
self.feature_map_[v.feature()] = []
self.feature_map_[v.feature()].append(v.value())
return self.feature_map_.get(feature, [])
@cached
def get_properties(self, feature):
"""Returns all contained properties associated with 'feature'"""
if not isinstance(feature, b2.build.feature.Feature):
feature = b2.build.feature.get(feature)
result = []
for p in self.all_:
if p.feature() == feature:
result.append(p)
return result
def __contains__(self, item):
return item in self.all_set_
def hash(p):
m = hashlib.md5()
m.update(p)
return m.hexdigest()
hash_maybe = hash if "--hash" in bjam.variable("ARGV") else None
|
{
"content_hash": "08c78df213165fb6e21c660c5ef63fd9",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 106,
"avg_line_length": 32.984848484848484,
"alnum_prop": 0.5673600629962596,
"repo_name": "ycsoft/FatCat-Server",
"id": "8c498de4d04774b1cc1e57f3a8782748cb1b9fe8",
"size": "15282",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "LIBS/boost_1_58_0/tools/build/src/build/property_set.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "195345"
},
{
"name": "Batchfile",
"bytes": "32367"
},
{
"name": "C",
"bytes": "9529739"
},
{
"name": "C#",
"bytes": "41850"
},
{
"name": "C++",
"bytes": "175536080"
},
{
"name": "CMake",
"bytes": "14812"
},
{
"name": "CSS",
"bytes": "282447"
},
{
"name": "Cuda",
"bytes": "26521"
},
{
"name": "FORTRAN",
"bytes": "1856"
},
{
"name": "Groff",
"bytes": "6163"
},
{
"name": "HTML",
"bytes": "148956564"
},
{
"name": "JavaScript",
"bytes": "174868"
},
{
"name": "Lex",
"bytes": "1290"
},
{
"name": "Makefile",
"bytes": "1045258"
},
{
"name": "Max",
"bytes": "37424"
},
{
"name": "Objective-C",
"bytes": "34644"
},
{
"name": "Objective-C++",
"bytes": "246"
},
{
"name": "PHP",
"bytes": "60249"
},
{
"name": "Perl",
"bytes": "37297"
},
{
"name": "Perl6",
"bytes": "2130"
},
{
"name": "Python",
"bytes": "1717781"
},
{
"name": "QML",
"bytes": "613"
},
{
"name": "QMake",
"bytes": "9450"
},
{
"name": "Rebol",
"bytes": "372"
},
{
"name": "Shell",
"bytes": "372652"
},
{
"name": "Tcl",
"bytes": "1205"
},
{
"name": "TeX",
"bytes": "13819"
},
{
"name": "XSLT",
"bytes": "564356"
},
{
"name": "Yacc",
"bytes": "19612"
}
],
"symlink_target": ""
}
|
import ddt
from poppy.model.helpers import origin
from tests.unit import base
@ddt.ddt
class TestOrigin(base.TestCase):
def test_origin(self):
origin_url = 'www.mywebsite.com'
port = 443
ssl = True
myorigin = origin.Origin(origin_url, port, ssl)
# test all properties
# origin
self.assertEqual(myorigin.origin, origin_url)
self.assertRaises(
AttributeError, setattr, myorigin, 'origin', origin_url)
# port
self.assertEqual(myorigin.port, port)
myorigin.port = 80
self.assertEqual(myorigin.port, 80)
# ssl
self.assertEqual(myorigin.ssl, ssl)
myorigin.ssl = True
self.assertEqual(myorigin.ssl, True)
# rules
self.assertEqual(myorigin.rules, [])
self.assertRaises(AttributeError, setattr, myorigin, 'rules', [])
|
{
"content_hash": "f70d1fe776e81bd20f0f544c48d3274b",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 73,
"avg_line_length": 25.314285714285713,
"alnum_prop": 0.618510158013544,
"repo_name": "amitgandhinz/cdn",
"id": "09bc647988c66f2b8cfa8d2904a053dd3fc5ff1f",
"size": "1472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/model/helpers/test_origin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "303888"
},
{
"name": "Shell",
"bytes": "4071"
}
],
"symlink_target": ""
}
|
from os import path
from setuptools import setup
from distutils.command.clean import clean
from distutils.dir_util import remove_tree
import apitestcase
class CustomCleanCommand(clean):
"""
Customized clean method that removes 'dist' and 'build' directories.
"""
def run(self):
clean.run(self)
if path.exists('dist'):
remove_tree('dist')
if path.exists('build'):
remove_tree('build')
setup(
name="apitestcase",
description="An integration test suite for HTTP APIs",
url="https://github.com/bramwelt/apitestcase",
version=".".join(apitestcase.__version__),
author="Trevor Bramwell",
author_email="trevor@bramwell.net",
packages=['apitestcase'],
license="MIT",
install_requires=['requests'],
long_description=open("README.rst").read(),
test_suite="test",
cmdclass={'clean': CustomCleanCommand},
)
|
{
"content_hash": "6bcb4df0cade3538651c472f1e32e074",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 72,
"avg_line_length": 26.823529411764707,
"alnum_prop": 0.6600877192982456,
"repo_name": "bramwelt/apitestcase",
"id": "d5d6459a4559f95dcc83039bf51c9ab068be46a3",
"size": "912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4101"
}
],
"symlink_target": ""
}
|
import functools
import logging
import json
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
logging.basicConfig(level=logging.DEBUG)
url = 'https://review.opendev.org/projects/'
# This is what a project looks like
'''
"openstack-attic/akanda": {
"id": "openstack-attic%2Fakanda",
"state": "READ_ONLY"
},
'''
def is_in_wanted_namespace(proj):
# only interested in openstack or x namespace (e.g. not retired
# stackforge, etc)
if proj.startswith('stackforge/') or \
proj.startswith('stackforge-attic/'):
return False
else:
return True
# Check if this project has a plugin file
def has_devstack_plugin(session, proj):
# Don't link in the deb packaging repos
if "openstack/deb-" in proj:
return False
r = session.get("https://opendev.org/%s/raw/branch/master/devstack/plugin.sh" % proj)
return r.status_code == 200
logging.debug("Getting project list from %s" % url)
r = requests.get(url)
projects = sorted(filter(is_in_wanted_namespace, json.loads(r.text[4:])))
logging.debug("Found %d projects" % len(projects))
s = requests.Session()
# sometimes gitea gives us a 500 error; retry sanely
# https://stackoverflow.com/a/35636367
retries = Retry(total=3, backoff_factor=1,
status_forcelist=[ 500 ])
s.mount('https://', HTTPAdapter(max_retries=retries))
found_plugins = filter(functools.partial(has_devstack_plugin, s), projects)
for project in found_plugins:
print(project)
|
{
"content_hash": "d308163a47daf206b488cc331809885a",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 89,
"avg_line_length": 29.056603773584907,
"alnum_prop": 0.7006493506493506,
"repo_name": "dtroyer/devstack",
"id": "d39b8018ae61f46c53654c0ead6845b4bdc12b93",
"size": "2503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/generate-devstack-plugins-list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2490"
},
{
"name": "Python",
"bytes": "44574"
},
{
"name": "Shell",
"bytes": "775213"
}
],
"symlink_target": ""
}
|
import asyncio
import pytest
import ray
from ray.experimental.serve.policy import (
RandomPolicyQueue, RandomPolicyQueueActor, RoundRobinPolicyQueueActor,
PowerOfTwoPolicyQueueActor, FixedPackingPolicyQueueActor)
pytestmark = pytest.mark.asyncio
def make_task_runner_mock():
@ray.remote(num_cpus=0)
class TaskRunnerMock:
def __init__(self):
self.query = None
self.queries = []
async def _ray_serve_call(self, request_item):
self.query = request_item
self.queries.append(request_item)
return "DONE"
def get_recent_call(self):
return self.query
def get_all_calls(self):
return self.queries
return TaskRunnerMock.remote()
@pytest.fixture(scope="session")
def task_runner_mock_actor():
yield make_task_runner_mock()
async def test_single_prod_cons_queue(serve_instance, task_runner_mock_actor):
q = RandomPolicyQueueActor.remote()
q.link.remote("svc", "backend")
q.dequeue_request.remote("backend", task_runner_mock_actor)
# Make sure we get the request result back
result = await q.enqueue_request.remote("svc", 1, "kwargs", None)
assert result == "DONE"
# Make sure it's the right request
got_work = await task_runner_mock_actor.get_recent_call.remote()
assert got_work.request_args == 1
assert got_work.request_kwargs == "kwargs"
async def test_slo(serve_instance, task_runner_mock_actor):
q = RandomPolicyQueueActor.remote()
await q.link.remote("svc", "backend")
all_request_sent = []
for i in range(10):
slo_ms = 1000 - 100 * i
all_request_sent.append(
q.enqueue_request.remote(
"svc", i, "kwargs", None, request_slo_ms=slo_ms))
for i in range(10):
await q.dequeue_request.remote("backend", task_runner_mock_actor)
await asyncio.gather(*all_request_sent)
i_should_be = 9
all_calls = await task_runner_mock_actor.get_all_calls.remote()
all_calls = all_calls[-10:]
for call in all_calls:
assert call.request_args == i_should_be
i_should_be -= 1
async def test_alter_backend(serve_instance, task_runner_mock_actor):
q = RandomPolicyQueueActor.remote()
await q.set_traffic.remote("svc", {"backend-1": 1})
await q.dequeue_request.remote("backend-1", task_runner_mock_actor)
await q.enqueue_request.remote("svc", 1, "kwargs", None)
got_work = await task_runner_mock_actor.get_recent_call.remote()
assert got_work.request_args == 1
await q.set_traffic.remote("svc", {"backend-2": 1})
await q.dequeue_request.remote("backend-2", task_runner_mock_actor)
await q.enqueue_request.remote("svc", 2, "kwargs", None)
got_work = await task_runner_mock_actor.get_recent_call.remote()
assert got_work.request_args == 2
async def test_split_traffic_random(serve_instance, task_runner_mock_actor):
q = RandomPolicyQueueActor.remote()
await q.set_traffic.remote("svc", {"backend-1": 0.5, "backend-2": 0.5})
runner_1, runner_2 = [make_task_runner_mock() for _ in range(2)]
for _ in range(20):
await q.dequeue_request.remote("backend-1", runner_1)
await q.dequeue_request.remote("backend-2", runner_2)
# assume 50% split, the probability of all 20 requests goes to a
# single queue is 0.5^20 ~ 1-6
for _ in range(20):
await q.enqueue_request.remote("svc", 1, "kwargs", None)
got_work = [
await runner.get_recent_call.remote()
for runner in (runner_1, runner_2)
]
assert [g.request_args for g in got_work] == [1, 1]
async def test_round_robin(serve_instance, task_runner_mock_actor):
q = RoundRobinPolicyQueueActor.remote()
await q.set_traffic.remote("svc", {"backend-1": 0.5, "backend-2": 0.5})
runner_1, runner_2 = [make_task_runner_mock() for _ in range(2)]
# NOTE: this is the only difference between the
# test_split_traffic_random and test_round_robin
for _ in range(10):
await q.dequeue_request.remote("backend-1", runner_1)
await q.dequeue_request.remote("backend-2", runner_2)
for _ in range(20):
await q.enqueue_request.remote("svc", 1, "kwargs", None)
got_work = [
await runner.get_recent_call.remote()
for runner in (runner_1, runner_2)
]
assert [g.request_args for g in got_work] == [1, 1]
async def test_fixed_packing(serve_instance):
packing_num = 4
q = FixedPackingPolicyQueueActor.remote(packing_num=packing_num)
await q.set_traffic.remote("svc", {"backend-1": 0.5, "backend-2": 0.5})
runner_1, runner_2 = (make_task_runner_mock() for _ in range(2))
# both the backends will get equal number of queries
# as it is packed round robin
for _ in range(packing_num):
await q.dequeue_request.remote("backend-1", runner_1)
await q.dequeue_request.remote("backend-2", runner_2)
for backend, runner in zip(["1", "2"], [runner_1, runner_2]):
for _ in range(packing_num):
input_value = "should-go-to-backend-{}".format(backend)
await q.enqueue_request.remote("svc", input_value, "kwargs", None)
all_calls = await runner.get_all_calls.remote()
for call in all_calls:
assert call.request_args == input_value
async def test_power_of_two_choices(serve_instance):
q = PowerOfTwoPolicyQueueActor.remote()
enqueue_futures = []
# First, fill the queue for backend-1 with 3 requests
await q.set_traffic.remote("svc", {"backend-1": 1.0})
for _ in range(3):
future = q.enqueue_request.remote("svc", "1", "", None)
enqueue_futures.append(future)
# Then, add a new backend, this backend should be filled next
await q.set_traffic.remote("svc", {"backend-1": 0.5, "backend-2": 0.5})
for _ in range(2):
future = q.enqueue_request.remote("svc", "2", "", None)
enqueue_futures.append(future)
runner_1, runner_2 = (make_task_runner_mock() for _ in range(2))
for _ in range(3):
await q.dequeue_request.remote("backend-1", runner_1)
await q.dequeue_request.remote("backend-2", runner_2)
await asyncio.gather(*enqueue_futures)
assert len(await runner_1.get_all_calls.remote()) == 3
assert len(await runner_2.get_all_calls.remote()) == 2
async def test_queue_remove_replicas(serve_instance):
temp_actor = make_task_runner_mock()
q = RandomPolicyQueue()
await q.dequeue_request("backend", temp_actor)
await q.remove_and_destory_replica("backend", temp_actor)
assert q.worker_queues["backend"].qsize() == 0
|
{
"content_hash": "615c328f42b4e27d21a2ca988959fc77",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 78,
"avg_line_length": 34.97894736842105,
"alnum_prop": 0.6477580499548601,
"repo_name": "stephanie-wang/ray",
"id": "ce9ab25311bf3ba4a50230b5de3478706889db9b",
"size": "6646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/experimental/serve/tests/test_queue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "29882"
},
{
"name": "C++",
"bytes": "2149909"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "5499"
},
{
"name": "Go",
"bytes": "28481"
},
{
"name": "HTML",
"bytes": "30435"
},
{
"name": "Java",
"bytes": "738348"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "4058862"
},
{
"name": "Shell",
"bytes": "88736"
},
{
"name": "Starlark",
"bytes": "121207"
},
{
"name": "TypeScript",
"bytes": "64161"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from datetime import timedelta
from database import db
from pytz import timezone
from pytz import utc
from yelp_beans.models import MeetingSpec
from yelp_beans.models import MeetingSubscription
def filter_subscriptions_by_user_data(subscriptions, user):
approved_subscriptions = []
for subscription in subscriptions:
subscription_rules = MeetingSubscription.query.filter(
MeetingSubscription.id == subscription['id']).one().user_rules
approved = apply_rules(user, subscription, subscription_rules)
if approved is not None:
approved_subscriptions.append(approved)
return approved_subscriptions
def apply_rules(user, subscription, subscription_rules):
"""
Apply logic to rules set for each subscription. In a way this authorizes who can
see the subscription. Rules can be applied in two ways: All rules must apply and
some rules must apply.
user: models.User()
subscription: Union[models.MeetingSubscription(), Dict[str, Any]]
subscription_rules: models.Rule()
"""
if isinstance(subscription, dict):
rule_logic_str = subscription.get('rule_logic')
else:
rule_logic_str = subscription.rule_logic
if rule_logic_str == 'any':
assert subscription_rules, 'You created logic for rules but don\'t have any rules!'
rule_logic = any
elif rule_logic_str == 'all':
assert subscription_rules, 'You created logic for rules but don\'t have any rules!'
rule_logic = all
else:
return subscription
rules = set()
for rule in subscription_rules:
user_rule = user.meta_data[rule.name]
subscription_rule = rule.value
if type(user_rule) is list:
rules.add(subscription_rule in user_rule)
else:
rules.add(user_rule == subscription_rule)
if rule_logic(rules):
return subscription
return None
def merge_subscriptions_with_preferences(user):
user_preferences = [
{
'subscription_id': user_subscription.subscription_id,
'datetime_id': user_subscription.preference_id
} for user_subscription in user.subscription_preferences
]
subscriptions = [
{
'id': subscription.id,
'title': subscription.title,
'office': subscription.office,
'location': subscription.location,
'size': subscription.size,
'timezone': subscription.timezone,
'rule_logic': subscription.rule_logic,
'datetime': get_subscription_dates(subscription),
} for subscription in MeetingSubscription.query.all()
]
for subscription in subscriptions:
for user_preference in user_preferences:
if subscription['id'] == user_preference['subscription_id']:
for date in subscription['datetime']:
if date['id'] == user_preference['datetime_id']:
date['active'] = True
return subscriptions
def get_subscription_dates(subscription):
dates = [
{
'id': date.id,
'date': date.datetime.replace(tzinfo=utc).isoformat(),
'active': False
}
for date in subscription.datetime
]
# Return a sorted list so that it is sorted on the frontend
return sorted(dates, key=lambda i: i['date'])
def get_specs_from_subscription(subscription):
specs = []
for subscription_datetime in subscription.datetime:
subscription_tz = timezone(subscription.timezone)
week_start = datetime.now(subscription_tz) - timedelta(days=datetime.now(subscription_tz).weekday())
week_start = week_start.replace(
hour=0, minute=0, second=0, microsecond=0)
subscription_dt = subscription_datetime.datetime.replace(tzinfo=utc).astimezone(subscription_tz)
week_iter = week_start
while week_iter.weekday() != subscription_dt.weekday():
week_iter += timedelta(days=1)
meeting_datetime = week_iter.replace(
hour=subscription_dt.hour, minute=subscription_dt.minute
).astimezone(utc)
specs.append(MeetingSpec(meeting_subscription=subscription, datetime=meeting_datetime))
return week_start, specs
def store_specs_from_subscription(subscription, week_start, specs):
"""
Idempotent function to store meeting specs for this week.
"""
current_specs = MeetingSpec.query.filter(
MeetingSpec.meeting_subscription_id == subscription.id,
MeetingSpec.datetime > week_start
).all()
if current_specs:
return
db.session.add_all(specs)
db.session.commit()
return specs
|
{
"content_hash": "412421478411dd6804ea4124ef5dd2df",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 108,
"avg_line_length": 34.093525179856115,
"alnum_prop": 0.6524583245410425,
"repo_name": "Yelp/beans",
"id": "b30f0c1e44b953f6c1ea2f4be5449ef187864629",
"size": "4739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/yelp_beans/logic/subscription.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "779"
},
{
"name": "Dockerfile",
"bytes": "561"
},
{
"name": "HTML",
"bytes": "11535"
},
{
"name": "JavaScript",
"bytes": "17852"
},
{
"name": "Makefile",
"bytes": "2602"
},
{
"name": "Python",
"bytes": "158790"
}
],
"symlink_target": ""
}
|
import os
import jinja2
template_dir = os.path.join(os.path.dirname(__file__), '../views')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
|
{
"content_hash": "99aba4dd7f427c806ddd9259b70b5459",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 76,
"avg_line_length": 27.083333333333332,
"alnum_prop": 0.6584615384615384,
"repo_name": "gnodar01/basic-blog",
"id": "fb5485087cac020d6392dc710d6891baf10fab01",
"size": "325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/jinjaenv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3808"
},
{
"name": "HTML",
"bytes": "6583"
},
{
"name": "JavaScript",
"bytes": "1534"
},
{
"name": "Python",
"bytes": "22404"
}
],
"symlink_target": ""
}
|
import inspect
import sys
from aatest import OperationError
from aatest.events import EV_PROTOCOL_RESPONSE
from aatest.events import NoSuchEvent
from saml2.samlp import NameIDPolicy
__author__ = 'roland'
def set_name_id(oper, args):
assertion = oper.conv.protocol_response[-1].assertion
oper.req_args["name_id"] = assertion.subject.name_id
def set_name_id_policy(oper, args):
oper.req_args["name_id_policy"] = NameIDPolicy(**args)
def set_user_credentials(oper, args):
_client = oper.conv.entity
_client.user = args["user"]
_client.passwd = args["password"]
def setup_logout(oper, args):
try:
resp = oper.conv.events.last_item(EV_PROTOCOL_RESPONSE)
except NoSuchEvent:
raise OperationError("No session to log out from found in previous responses")
assertion = resp.assertion
subj = assertion.subject
oper.req_args["name_id"] = subj.name_id
oper.req_args["entity_id"] = assertion.issuer.text
oper.req_args["reason"] = 'tired'
def set_message_param(oper, args):
oper.msg_param.update(args)
def set_entity_id(oper, args):
oper.req_args['entityid'] = oper.conv.entity_id
def factory(name):
for fname, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isfunction(obj):
if fname == name:
return obj
from aatest.func import factory as aafactory
return aafactory(name)
|
{
"content_hash": "d99a844c51bd4ff250517082639d6eb1",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 86,
"avg_line_length": 26.641509433962263,
"alnum_prop": 0.6855524079320113,
"repo_name": "identinetics/saml2test2",
"id": "8e6fbeced4bde5daec1e58672fb72ba6b016f216",
"size": "1412",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/saml2test/idp_test/func.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3130"
},
{
"name": "HTML",
"bytes": "17791"
},
{
"name": "JavaScript",
"bytes": "746"
},
{
"name": "Mako",
"bytes": "28696"
},
{
"name": "Python",
"bytes": "310296"
},
{
"name": "Shell",
"bytes": "263"
}
],
"symlink_target": ""
}
|
'''OpenGL extension SGIX.flush_raster
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_SGIX_flush_raster'
_DEPRECATED = False
glFlushRasterSGIX = platform.createExtensionFunction(
'glFlushRasterSGIX',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(),
doc='glFlushRasterSGIX() -> None',
argNames=(),
deprecated=_DEPRECATED,
)
def glInitFlushRasterSGIX():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
{
"content_hash": "1268a2aac365807d34ed0c39e8982ad5",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 71,
"avg_line_length": 27.36,
"alnum_prop": 0.7821637426900585,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "2b09528c715c0191eebb743b4be71caa1d2ca531",
"size": "684",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/raw/GL/SGIX/flush_raster.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import sys
try:
import cx_Oracle
print "Oracle python drivier is ok!"
except Exception, e:
print e
sys.exit(1)
finally:
sys.exit(1)
|
{
"content_hash": "6ce3ce520bb5b778a56e2516c93b99c6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 40,
"avg_line_length": 10.533333333333333,
"alnum_prop": 0.6265822784810127,
"repo_name": "stanxii/indexRiver",
"id": "d5f5444e509ad267575ed6de1d42fdfe663ef658",
"size": "194",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "data-lepus/lepus/test_driver_oracle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "13"
},
{
"name": "Batchfile",
"bytes": "32238"
},
{
"name": "C",
"bytes": "4288378"
},
{
"name": "C++",
"bytes": "242214"
},
{
"name": "CMake",
"bytes": "64332"
},
{
"name": "CSS",
"bytes": "435207"
},
{
"name": "Go",
"bytes": "428010"
},
{
"name": "HTML",
"bytes": "1256491"
},
{
"name": "JavaScript",
"bytes": "9911998"
},
{
"name": "Lua",
"bytes": "18908"
},
{
"name": "M4",
"bytes": "67130"
},
{
"name": "Makefile",
"bytes": "180149"
},
{
"name": "Nginx",
"bytes": "22262"
},
{
"name": "PHP",
"bytes": "3073045"
},
{
"name": "Pascal",
"bytes": "52142"
},
{
"name": "Perl",
"bytes": "36460"
},
{
"name": "Python",
"bytes": "238213"
},
{
"name": "Roff",
"bytes": "1031238"
},
{
"name": "SaltStack",
"bytes": "64969"
},
{
"name": "Scheme",
"bytes": "867"
},
{
"name": "Shell",
"bytes": "972490"
},
{
"name": "Smarty",
"bytes": "3562"
}
],
"symlink_target": ""
}
|
import types
class Wrapper(object):
def __init__(self,obj):
self._obj = obj
def __getattr__(self, attr):
if hasattr(self._obj, attr):
attr_value = getattr(self._obj,attr)
if isinstance(attr_value,types.MethodType):
def callable(*args, **kwargs):
return attr_value(*args, **kwargs)
return callable
else:
return attr_value
else:
raise AttributeError
|
{
"content_hash": "d3d16c3daa10acfff4eace89583e0789",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 55,
"avg_line_length": 26.85,
"alnum_prop": 0.4767225325884544,
"repo_name": "ActiveState/code",
"id": "8bd002dbd8802d0d7002fe65b80aeacf95194066",
"size": "537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/577215_Object_Wrapper/recipe-577215.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
import weakref
from .anyQt import QtGui, QtCore
class PSpyderhive(object):
def __init__(self, parent, parentwidget):
self._parent = weakref.ref(parent)
pw = parentwidget.wrapwidget()
self.widget = QtGui.QWidget(pw)
pw.setMinimumSize(800, 50)
pw.setTitleBarWidget(QtGui.QWidget())
self.widget.setMinimumSize(800, 50) # kludge :(
xp = QtGui.QSizePolicy.Expanding
policy = QtGui.QSizePolicy(xp, xp)
self.widget.setSizePolicy(policy)
# self.widget.setWidgetResizable(True)
layout = QtGui.QFormLayout()
self.l_spyderhive = QtGui.QLabel("Parent class")
self.w_spyderhive = QtGui.QComboBox()
layout.addRow(self.l_spyderhive, self.w_spyderhive)
self.widget.setLayout(layout)
policy = QtGui.QFormLayout.AllNonFixedFieldsGrow
layout.setFieldGrowthPolicy(policy)
self.w_spyderhive.currentIndexChanged.connect(self.update)
self.widget.show()
def set_candidates(self, candidates):
self.candidates = list(candidates)
self.w_spyderhive.clear()
self.w_spyderhive.addItems([""] + candidates)
def set_spyderhive(self, spyderhive):
if not spyderhive:
self.w_spyderhive.setCurrentIndex(index + 1)
else:
index = self.candidates.index(spyderhive)
self.w_spyderhive.setCurrentIndex(index + 1)
def update(self, index):
self._parent().gui_sets_spyderhive(self.candidates[index - 1])
|
{
"content_hash": "5712fe2adee52b0fc8bbf4695d363881",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 70,
"avg_line_length": 35.41860465116279,
"alnum_prop": 0.6506894287590282,
"repo_name": "agoose77/hivesystem",
"id": "27cacfe1209bc662d142a11a2dafab9774e58d79",
"size": "1523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hiveguilib/PQt/PSpyderhive.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2491478"
},
{
"name": "Shell",
"bytes": "1164"
}
],
"symlink_target": ""
}
|
'''
The thin runner is used to manage the salt thin systems.
Salt Thin is a transport-less version of Salt that can be used to run rouitines
in a standalone way. This runner has tools which generate the standalone salt
system for easy consumption.
'''
# Import Salt libs
import salt.utils.thin
def generate(extra_mods='', overwrite=False):
'''
Generate the salt-thin tarball and print the location of the tarball
Optional additional mods to include (e.g. mako) can be supplied as a comma
delimited string. Permits forcing an overwrite of the output file as well.
CLI Example:
.. code-block:: bash
salt-run thin.generate
salt-run thin.generate mako
salt-run thin.generate mako,wempy 1
salt-run thin.generate overwrite=1
'''
print(salt.utils.thin.gen_thin(__opts__['cachedir'], extra_mods, overwrite))
|
{
"content_hash": "e4c1f0b238ec49c43f3d3bf3baa50c73",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 80,
"avg_line_length": 31.142857142857142,
"alnum_prop": 0.7087155963302753,
"repo_name": "MadeiraCloud/salt",
"id": "02bdc82212abaef32c8d580682cc59003db15d6c",
"size": "896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sources/salt/runners/thin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10058"
},
{
"name": "Makefile",
"bytes": "1815"
},
{
"name": "Python",
"bytes": "4530204"
},
{
"name": "Shell",
"bytes": "169676"
}
],
"symlink_target": ""
}
|
from server.devices.producers import CogenerationUnit, PeakLoadBoiler
class SimulatedCogenerationUnit(CogenerationUnit):
"""The simulation of a cogeneration unit"""
def __init__(self, device_id, env):
super(SimulatedCogenerationUnit, self).__init__(device_id, env)
#: Saves the last powered off time to ensure minimal_off_time
self.off_time = self.env.now
#: Device can have fixed workload without internal control. Be aware of overheating!
self.overwrite_workload = None
"""Efficiency is reached only on maximum workload
at minumum workload the efficiency is decreased by 15 %"""
self.max_efficiency_loss = 0.15
def step(self):
"""Calculates new workload, produce and consume energy for the last time-step"""
if self.running:
presumable_workload = self.calculate_new_workload()
self.set_workload(presumable_workload)
self.consume_and_produce_energy()
self.power_meter.add_energy(self.get_electrical_energy_production())
self.heat_storage.add_energy(self.get_thermal_energy_production())
else:
self._workload = 0.0
def calculate_new_workload(self):
"""Selects right operating mode for workload calculation"""
if self.overwrite_workload is not None:
calculated_workload = float(self.overwrite_workload)
elif self.off_time > self.env.now:
calculated_workload = 0.0
elif self.thermal_driven:
calculated_workload = self.get_calculated_workload_thermal()
else:
calculated_workload = self.get_calculated_workload_electric()
return calculated_workload
def get_electrical_energy_production(self):
"""Returns produced electrical energy in kWh during current time-step"""
return self.current_electrical_production * (self.env.step_size / 3600.0)
def get_thermal_energy_production(self):
"""Returns produced thermal energy in kWh during current time-step"""
return self.current_thermal_production * (self.env.step_size / 3600.0)
def get_efficiency_loss_factor(self):
"""Computes efficiency loss on modulation and returns left efficiency in percent [0,1]"""
if self._workload == self.config['minimal_workload']:
return 1.0 - self.max_efficiency_loss
relative_workload = (self._workload - self.config['minimal_workload']) \
/ (1.0 - self.config['minimal_workload'])
return 1.0 - self.max_efficiency_loss * (1.0 - relative_workload)
def get_calculated_workload_thermal(self):
"""Returns workload for thermal driven mode"""
max_thermal_power = self.config['thermal_efficiency'] * self.config['max_gas_input']
min_thermal_power = max_thermal_power * self.config['minimal_workload'] \
* (1.0 - self.max_efficiency_loss)
demand = self.heat_storage.get_required_energy()
relative_demand = max(demand, min_thermal_power) / max_thermal_power
return min(relative_demand, 1.0)
def get_calculated_workload_electric(self):
"""Returns workload for electrical driven mode"""
if self.heat_storage.get_temperature() >= self.heat_storage.target_temperature:
return 0.0
max_electric_power = self.config['electrical_efficiency'] * self.config['max_gas_input']
min_electric_power = max_electric_power * self.config['minimal_workload'] \
* (1.0 - self.max_efficiency_loss)
demand = self.power_meter.current_power_consum
relative_demand = max(demand, min_electric_power) / max_electric_power
return min(relative_demand, 1.0)
def set_workload(self, calculated_workload):
"""Sets given workload, detects power-ons and tracks operating time
:param float calculated_workload: new workload for the next time-step
"""
old_workload = self._workload
# make sure that config['minimal_workload'] <= workload <= 99.0 or workload = 0
if calculated_workload >= self.config['minimal_workload']:
# detect if power has been turned on
if old_workload == 0:
self.power_on_count += 1
self.total_hours_of_operation += self.env.step_size / 3600.0
# check range because of external overwrite_workload
self._workload = max(min(calculated_workload, 1.0), 0.0)
else:
self._workload = 0.0
if self.off_time <= self.env.now:
self.off_time = self.env.now + self.config['minimal_off_time']
def consume_and_produce_energy(self):
"""Updates currently consumed and produced energy"""
self.current_gas_consumption = self._workload * self.config['max_gas_input']
self.current_electrical_production = self.current_gas_consumption * \
self.config['electrical_efficiency'] * self.get_efficiency_loss_factor()
self.current_thermal_production = self.current_gas_consumption * \
self.config['thermal_efficiency'] * self.get_efficiency_loss_factor()
self.total_gas_consumption += self.current_gas_consumption * \
(self.env.step_size / 3600.0)
self.total_thermal_production += self.current_thermal_production * \
(self.env.step_size / 3600.0)
self.total_electrical_production += self.current_electrical_production * \
(self.env.step_size / 3600.0)
class SimulatedPeakLoadBoiler(PeakLoadBoiler):
"""The simulation of a peak load boiler"""
def __init__(self, device_id, env):
super(SimulatedPeakLoadBoiler, self).__init__(device_id, env)
#: Saves the last power off time to ensure 3 min off-time
self.off_time = self.env.now
#: Device can have fixed workload without internal control. Be aware of overheating!
self.overwrite_workload = None
def step(self):
"""Calculates new workload, produce and consume energy for the last time-step"""
if self.running:
self.calculate_workload()
self.consume_and_produce_energy()
self.heat_storage.add_energy(self.get_thermal_energy_production())
else:
self._workload = 0.0
def calculate_workload(self):
"""Switches on when the heat storage is undersupplied and off if target temperature is reached.
Also detects power-ons and tracks operating time."""
if self.overwrite_workload is not None:
self._workload = float(self.overwrite_workload)
self.total_hours_of_operation += self.env.step_size / 3600.0
else:
# turn on if heat_storage is undersupplied
if self.heat_storage.undersupplied() and self.off_time <= self.env.now:
if self._workload == 0.0:
self.power_on_count += 1
self.total_hours_of_operation += self.env.step_size / 3600.0
self._workload = 1.0
# turn off if heat storage's target energy is reached
elif self.current_thermal_production >= self.heat_storage.get_required_energy():
self._workload = 0.0
if self.off_time <= self.env.now:
self.off_time = self.env.now + 3 * 60.0
def consume_and_produce_energy(self):
"""Updates currently consumed and produced energy"""
self.current_gas_consumption = self._workload * self.config['max_gas_input']
self.current_thermal_production = self.current_gas_consumption * \
self.config['thermal_efficiency']
self.total_gas_consumption += self.current_gas_consumption * \
(self.env.step_size / 3600.0)
self.total_thermal_production += self.current_thermal_production * \
(self.env.step_size / 3600.0)
def get_thermal_energy_production(self):
"""Returns produced thermal energy in kWh during current time-step"""
return self.current_thermal_production * (self.env.step_size / 3600.0)
|
{
"content_hash": "6effb7c8ba2b9406b7b7d4f5f10b8952",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 103,
"avg_line_length": 48.208333333333336,
"alnum_prop": 0.6402024941350785,
"repo_name": "SEC-i/ecoControl",
"id": "64fb61a029ecaecccae6903e2c0a0967303391ca",
"size": "8099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/forecasting/simulation/devices/producers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "866"
},
{
"name": "JavaScript",
"bytes": "88082"
},
{
"name": "Python",
"bytes": "410030"
},
{
"name": "Shell",
"bytes": "8843"
}
],
"symlink_target": ""
}
|
import numpy as np
import random
# I implemented Schmidhuber's "Compressed Network Search" but didn't use it.
# ndded for the compress/decompress functions.
#from scipy.fftpack import dct
import json
import sys
import config
from env import make_env
import time
import os
from gym.wrappers import Monitor
from nn import sigmoid, relu, passthru, softmax, sample, RNNModel
np.set_printoptions(precision=3, threshold=20, linewidth=200)
PEEK_PROB = 0.0
SIMPLE_MODE = False
with open("peek_prob.json") as f:
PEEK_PROB = json.load(f)
with open("simple_mode.json") as f:
SIMPLE_MODE = json.load(f)
final_mode = False
render_mode = True
RENDER_DELAY = False
record_video = False
MEAN_MODE = False
record_rgb = False
if record_rgb:
import imageio
def compress_2d(w, shape=None):
s = w.shape
if shape:
s = shape
c = dct(dct(w, axis=0, type=2, norm='ortho'), axis=1, type=2, norm='ortho')
return c[0:s[0], 0:s[1]]
def decompress_2d(c, shape):
c_out = np.zeros(shape)
c_out[0:c.shape[0], 0:c.shape[1]] = c
w = dct(dct(c_out.T, type=3, norm='ortho').T, type=3, norm='ortho')
return w
def compress_1d(w, shape=None, axis=0):
s = w.shape
if shape:
s = shape
c = dct(w, axis=axis, type=2, norm='ortho')
return c[0:s[0], 0:s[1]]
def decompress_1d(c, shape, axis=0):
c_out = np.zeros(shape)
c_out[0:c.shape[0], 0:c.shape[1]] = c
w = dct(c_out, axis=axis, type=3, norm='ortho')
return w
def make_model(game):
# can be extended in the future.
if game.rnn_mode:
model = RNNModel(game)
elif game.experimental_mode:
model = CustomModel(game)
else:
model = Model(game)
return model
# LSTM in a few lines of numpy
class LSTMCell:
'''Numpy LSTM cell used for inference only.'''
def __init__(self, input_size, weight, bias, forget_bias=1.0, dropout_keep_prob=0.5, train_mode=True):
self.input_size=input_size
self.W_full=weight # np.concatenate((Wxh, Whh), axis=0)
self.bias=bias
self.forget_bias=1.0
self.dropout_keep_prob = dropout_keep_prob
self.train_mode = train_mode
self.hidden_size = int(bias.shape[0]/4)
def __call__(self, x, h, c):
concat = np.concatenate((x, h), axis=1)
hidden = np.matmul(concat, self.W_full)+self.bias
i, g, f, o = np.split(hidden, 4, axis=1)
i = sigmoid(i)
g = np.tanh(g)
f = sigmoid(f+self.forget_bias)
o = sigmoid(o)
if self.train_mode:
mask = np.array(np.random.rand(self.hidden_size) < self.dropout_keep_prob).astype(np.int)
d_g = np.multiply(mask, g)
else:
d_g = self.dropout_keep_prob * g
new_c = np.multiply(c, f) + np.multiply(d_g, i)
new_h = np.multiply(np.tanh(new_c), o)
return new_h, new_c
def set_dropout_keep_prob(self, dropout_keep_prob=0.5, train_mode=True):
self.dropout_keep_prob = dropout_keep_prob
self.train_mode = train_mode
class RNNWorldModel:
''' deterministic LSTM model for cart-pole swing up task '''
def __init__(self, obs_size=5, action_size=1, hidden_size=20, dropout_keep_prob=0.5, train_mode=True, predict_future=False):
self.obs_size = obs_size
self.action_size = action_size
self.hidden_size = hidden_size
self.predict_future = predict_future
if self.predict_future:
self.shapes = [ (self.obs_size + self.action_size + self.hidden_size, 4*self.hidden_size), # LSTM weights
(self.hidden_size, 2) # predict next observation
]
else:
self.shapes = [ (self.obs_size + self.action_size + self.hidden_size, 4*self.hidden_size), # LSTM weights
]
self.dropout_keep_prob = dropout_keep_prob
self.train_mode = train_mode
self.weight = []
self.bias = []
self.param_count = 0
idx = 0
for shape in self.shapes:
self.weight.append(np.zeros(shape=shape))
self.bias.append(np.zeros(shape=shape[1]))
self.param_count += (np.product(shape) + shape[1])
idx += 1
self.init_h = np.zeros((1, self.hidden_size))
self.init_c = np.zeros((1, self.hidden_size))
self.h = self.init_h
self.c = self.init_c
self.param_count += 2*self.hidden_size
self.lstm = LSTMCell(self.obs_size-1 + self.action_size, self.weight[0], self.bias[0], dropout_keep_prob=self.dropout_keep_prob, train_mode=train_mode)
def set_dropout_keep_prob(self, dropout_keep_prob=0.5, train_mode=True):
self.dropout_keep_prob = dropout_keep_prob
self.train_mode = train_mode
self.lstm.set_dropout_keep_prob(dropout_keep_prob=self.dropout_keep_prob, train_mode=train_mode)
def get_state(self):
return np.concatenate([self.h, self.c], axis=1)
def reset_state(self):
self.h = self.init_h
self.c = self.init_c
def update(self, obs, action):
total_obs = np.concatenate([obs.flatten(), action.flatten()]).reshape((1, self.obs_size+self.action_size))
self.h, self.c = self.lstm(total_obs, self.h, self.c)
def set_model_params(self, model_params):
pointer = 0
for i in range(len(self.shapes)):
w_shape = self.shapes[i]
b_shape = self.shapes[i][1]
s_w = np.product(w_shape)
s = s_w + b_shape
chunk = np.array(model_params[pointer:pointer+s])
self.weight[i] = chunk[:s_w].reshape(w_shape)
self.bias[i] = chunk[s_w:].reshape(b_shape)
pointer += s
# rnn states
s = self.hidden_size
self.init_h = model_params[pointer:pointer+s].reshape((1, self.hidden_size))
pointer += s
self.init_c = model_params[pointer:pointer+s].reshape((1, self.hidden_size))
self.reset_state()
self.lstm = LSTMCell(self.obs_size + self.action_size, self.weight[0], self.bias[0], dropout_keep_prob=self.dropout_keep_prob, train_mode=self.train_mode)
def load_model(self, filename):
with open(filename) as f:
data = json.load(f)
print('loading file %s' % (filename))
self.data = data
model_params = np.array(data[0]) # assuming other stuff is in data
self.set_model_params(model_params)
def get_random_model_params(self, stdev=0.1):
return np.random.randn(self.param_count)*stdev
class RNNModel:
''' learning the best feed forward model for cartpole-swingup '''
def __init__(self, game):
self.env_name = game.env_name
self.layer_1 = game.layers[0]
self.layer_2 = game.layers[1]
self.world_hidden_size = self.layer_1
self.agent_hidden_size = self.layer_2
self.rnn_mode = True
self.experimental_mode = False
self.input_size = game.input_size
self.output_size = game.output_size
self.render_mode = False
self.dropout_keep_prob = 1.0
self.world_model = RNNWorldModel(obs_size=self.input_size, action_size=self.output_size, hidden_size=self.world_hidden_size, dropout_keep_prob=self.dropout_keep_prob, predict_future=False)
self.agent = Agent(layer_1=self.agent_hidden_size, layer_2=0, input_size=self.input_size+self.world_hidden_size*2, output_size=self.output_size)
self.param_count = self.world_model.param_count + self.agent.param_count
def reset(self):
''' solve for best weights for agent, aka the inner-loop '''
self.world_model.reset_state()
def make_env(self, seed=-1, render_mode=False):
self.render_mode = render_mode
self.env = make_env(self.env_name, seed=seed, render_mode=render_mode)
def get_action(self, obs):
total_obs = np.concatenate([np.array(obs).flatten(), self.world_model.get_state().flatten()])
action = self.agent.get_action(total_obs)
self.world_model.update(obs, action)
return action
def set_model_params(self, model_params):
world_model_params = model_params[:self.world_model.param_count]
agent_model_params = model_params[self.world_model.param_count:self.world_model.param_count+self.agent.param_count]
self.world_model.set_model_params(world_model_params)
self.agent.set_model_params(agent_model_params)
def load_model(self, filename):
with open(filename) as f:
data = json.load(f)
print('loading file %s' % (filename))
self.data = data
model_params = np.array(data[0]) # assuming other stuff is in data
self.set_model_params(model_params)
def get_random_model_params(self, stdev=0.1):
return np.random.randn(self.param_count)*stdev
class Agent:
''' simple feedforward model to act on world model's hidden state '''
def __init__(self, layer_1=10, layer_2=5, input_size=5+20*2, output_size=1):
self.layer_1 = layer_1
self.layer_2 = layer_2
self.input_size = input_size #
self.output_size = output_size # action space
if layer_2 == 0:
self.shapes = [ (self.input_size, self.layer_1),
(self.layer_1, self.output_size)]
else:
self.shapes = [ (self.input_size, self.layer_1),
(self.layer_1, self.layer_2),
(self.layer_2, self.output_size)]
self.activations = [np.tanh, np.tanh, np.tanh] # assumption that output is bounded between -1 and 1 (pls chk!)
self.weight = []
self.bias = []
self.param_count = 0
idx = 0
for shape in self.shapes:
self.weight.append(np.zeros(shape=shape))
self.bias.append(np.zeros(shape=shape[1]))
self.param_count += (np.product(shape) + shape[1])
idx += 1
def get_action(self, x):
h = np.array(x).flatten()
num_layers = len(self.weight)
for i in range(num_layers):
w = self.weight[i]
b = self.bias[i]
h = np.matmul(h, w) + b
h = self.activations[i](h)
return h
def set_model_params(self, model_params):
pointer = 0
for i in range(len(self.shapes)):
w_shape = self.shapes[i]
b_shape = self.shapes[i][1]
s_w = np.product(w_shape)
s = s_w + b_shape
chunk = np.array(model_params[pointer:pointer+s])
self.weight[i] = chunk[:s_w].reshape(w_shape)
self.bias[i] = chunk[s_w:].reshape(b_shape)
pointer += s
def load_model(self, filename):
with open(filename) as f:
data = json.load(f)
print('loading file %s' % (filename))
self.data = data
model_params = np.array(data[0]) # assuming other stuff is in data
self.set_model_params(model_params)
def get_random_model_params(self, stdev=0.1):
return np.random.randn(self.param_count)*stdev
def _clip(x, lo=0.0, hi=1.0):
return np.minimum(np.maximum(x, lo), hi)
class SimpleWorldModel:
''' deterministic LSTM model for cart-pole swing up task '''
def __init__(self, obs_size=16, action_size=3, hidden_size=10):
self.obs_size = obs_size
self.action_size = action_size
self.hidden_size = hidden_size
self.shapes = [ (self.obs_size + self.action_size, self.hidden_size),
(self.hidden_size, self.obs_size)]
self.weight = []
self.bias = []
self.param_count = 0
self.dt = 1.0 / 50.0 # 50 fps
self.hidden_state = np.zeros(self.hidden_size)
idx = 0
for shape in self.shapes:
self.weight.append(np.zeros(shape=shape))
self.bias.append(np.zeros(shape=shape[1]))
self.param_count += (np.product(shape) + shape[1])
idx += 1
def reset(self):
self.hidden_state = np.zeros(self.hidden_size)
def predict_next_obs(self, obs, action):
obs = np.array(obs).flatten()
new_action = np.array( [0.0, 0.0, 0.0] )
new_action[0] = _clip(action[0], lo=-1.0, hi=+1.0)
new_action[1] = _clip(action[1], lo=-1.0, hi=+1.0)
new_action[1] = (action[1]+1.0) / 2.0
new_action[2] = _clip(action[2])
h = np.concatenate([obs, new_action.flatten()])
activations = [np.tanh, passthru]
num_layers = 2
for i in range(num_layers):
w = self.weight[i]
b = self.bias[i]
h = np.matmul(h, w) + b
h = activations[i](h)
if (i == 0): # save the hidden state
self.hidden_state = h.flatten()
prediction = obs + h.flatten() * self.dt # residual
return prediction
def set_model_params(self, model_params):
pointer = 0
for i in range(len(self.shapes)):
w_shape = self.shapes[i]
b_shape = self.shapes[i][1]
s_w = np.product(w_shape)
s = s_w + b_shape
chunk = np.array(model_params[pointer:pointer+s])
self.weight[i] = chunk[:s_w].reshape(w_shape)
self.bias[i] = chunk[s_w:].reshape(b_shape)
pointer += s
def load_model(self, filename):
with open(filename) as f:
data = json.load(f)
print('loading file %s' % (filename))
self.data = data
model_params = np.array(data[0]) # assuming other stuff is in data
self.set_model_params(model_params)
def get_random_model_params(self, stdev=0.1):
return np.random.randn(self.param_count)*stdev
class CustomModel:
''' learning the best feed forward model for vae_racing '''
def __init__(self, game):
self.output_noise = game.output_noise
self.env_name = game.env_name
self.world_hidden_size = game.layers[0]
self.agent_hidden_size = game.layers[1]
self.rnn_mode = False # in the future will be useful
self.experimental_mode = True
self.peek_prob = PEEK_PROB
self.simple_mode = SIMPLE_MODE
self.peek_next = 1
self.peek = 1
self.counter = 0
self.input_size = game.input_size # observation size
self.output_size = game.output_size # action size
self.render_mode = False
self.world_model = SimpleWorldModel(obs_size=self.input_size, action_size=self.output_size, hidden_size=self.world_hidden_size)
agent_input_size = self.input_size+self.world_hidden_size
if self.simple_mode:
agent_input_size = self.input_size
self.agent = Agent(layer_1=self.agent_hidden_size, layer_2=0, input_size=agent_input_size, output_size=self.output_size)
self.param_count = self.world_model.param_count + self.agent.param_count
self.prev_action = np.zeros(self.output_size)
self.prev_prediction = None
#self.temp_obs = np.zeros(16)
#self.temp_predict = np.zeros(16)
def reset(self):
self.prev_prediction = None
self.peek_next = 1
self.peek = 1
self.counter = 0
self.world_model.reset()
def make_env(self, seed=-1, render_mode=False):
self.render_mode = render_mode
self.env = make_env(self.env_name, seed=seed, render_mode=render_mode)
def get_action(self, real_obs, t=0, mean_mode=False):
obs = real_obs
use_prediction = False
self.counter += 1 # for tracking frames in case we want to dump out rgb images
if (self.prev_prediction is not None) and (self.peek_next == 0):
obs = self.prev_prediction
use_prediction = True
if record_rgb:
video_path = "learning_vae_racing_dump"
video_path_orig = "learning_vae_racing_dump/orig"
if not os.path.exists(video_path):
os.makedirs(video_path)
if not os.path.exists(video_path_orig):
os.makedirs(video_path_orig)
img = self.env.vae.decode(obs.reshape(1, self.env.z_size)) * 255.
img = np.round(img).astype(np.uint8)
img = img.reshape(64, 64, 3)
orig_img = img
real_img = np.round(self.env.real_frame * 255.).astype(np.uint8).reshape(64, 64, 3)
extension = ".real.png"
if use_prediction:
extension = ".predict.png"
#img = 255-img
video_extension = ".truth.png"
#total_img = np.concatenate([real_img, img], axis=1)
imageio.imwrite(os.path.join(video_path_orig,(format(self.counter, "05d")+extension)), orig_img)
imageio.imwrite(os.path.join(video_path,("b_"+format(self.counter, "05d")+video_extension)), real_img)
recon_img = self.env.vae.decode(self.env.real_z.reshape(1, self.env.z_size)) * 255.
recon_img = np.round(recon_img).astype(np.uint8)
recon_img = recon_img.reshape(64, 64, 3)
imageio.imwrite(os.path.join(video_path,("a_"+format(self.counter, "05d")+video_extension)), recon_img)
#if (self.prev_prediction is not None) and render_mode:
#print("use_prediction", use_prediction)
#print("rms change real_obs", np.sqrt(np.mean(np.square(np.array(real_obs) - self.temp_obs))))
#print("rms change prediction", np.sqrt(np.mean(np.square(np.array(self.prev_prediction) - self.temp_predict))))
#self.temp_obs = real_obs
#self.temp_predict = self.prev_prediction
if self.simple_mode:
agent_obs = obs
else:
prev_hidden = self.world_model.hidden_state.flatten()
agent_obs = np.concatenate([obs.flatten(), prev_hidden]) # use previous hidden state
action = self.agent.get_action(agent_obs)
self.peek = self.peek_next
self.peek_next = 0
if (np.random.rand() < self.peek_prob):
self.peek_next = 1
self.prev_prediction = self.world_model.predict_next_obs(obs, action) # update hidden state, and predict next frame
return action
def set_model_params(self, model_params):
world_params = model_params[:self.world_model.param_count]
agent_params = model_params[self.world_model.param_count:self.world_model.param_count+self.agent.param_count]
assert len(world_params) == self.world_model.param_count, "inconsistent world model params"
assert len(agent_params) == self.agent.param_count, "inconsistent agent params"
self.world_model.set_model_params(world_params)
self.agent.set_model_params(agent_params)
def load_model(self, filename):
with open(filename) as f:
data = json.load(f)
print('loading file %s' % (filename))
self.data = data
model_params = np.array(data[0]) # assuming other stuff is in data
self.set_model_params(model_params)
def get_random_model_params(self, stdev=0.1):
return np.random.randn(self.param_count)*stdev
class Model:
''' simple feedforward model '''
def __init__(self, game):
self.output_noise = game.output_noise
self.env_name = game.env_name
self.layer_1 = game.layers[0]
self.layer_2 = game.layers[1]
self.rnn_mode = False # in the future will be useful
self.experimental_mode = False
self.time_input = 0 # use extra sinusoid input
self.sigma_bias = game.noise_bias # bias in stdev of output
self.sigma_factor = 0.5 # multiplicative in stdev of output
if game.time_factor > 0:
self.time_factor = float(game.time_factor)
self.time_input = 1
self.input_size = game.input_size
self.output_size = game.output_size
if self.layer_2 > 0:
self.shapes = [ (self.input_size + self.time_input, self.layer_1),
(self.layer_1, self.layer_2),
(self.layer_2, self.output_size)]
elif self.layer_1 == 0 and self.layer_2 == 0:
self.shapes = [ (self.input_size + self.time_input, self.output_size) ]
elif self.layer_1 > 0 and self.layer_2 == 0:
self.shapes = [ (self.input_size + self.time_input, self.layer_1),
(self.layer_1, self.output_size)]
else:
assert False, "invalid layer_2"
self.sample_output = False
if game.activation == 'relu':
self.activations = [relu, relu, passthru]
elif game.activation == 'sigmoid':
self.activations = [np.tanh, np.tanh, sigmoid]
elif game.activation == 'softmax':
self.activations = [np.tanh, np.tanh, softmax]
self.sample_output = True
elif game.activation == 'passthru':
self.activations = [np.tanh, np.tanh, passthru]
else:
self.activations = [np.tanh, np.tanh, np.tanh]
self.weight = []
self.bias = []
self.bias_log_std = []
self.bias_std = []
self.param_count = 0
idx = 0
for shape in self.shapes:
self.weight.append(np.zeros(shape=shape))
self.bias.append(np.zeros(shape=shape[1]))
self.param_count += (np.product(shape) + shape[1])
if self.output_noise[idx]:
self.param_count += shape[1]
log_std = np.zeros(shape=shape[1])
self.bias_log_std.append(log_std)
out_std = np.exp(self.sigma_factor*log_std + self.sigma_bias)
self.bias_std.append(out_std)
idx += 1
self.render_mode = False
def make_env(self, seed=-1, render_mode=False):
self.render_mode = render_mode
self.env = make_env(self.env_name, seed=seed, render_mode=render_mode)
def get_action(self, x, t=0, mean_mode=False):
# if mean_mode = True, ignore sampling.
h = np.array(x).flatten()
if self.time_input == 1:
time_signal = float(t) / self.time_factor
h = np.concatenate([h, [time_signal]])
num_layers = len(self.weight)
for i in range(num_layers):
w = self.weight[i]
b = self.bias[i]
h = np.matmul(h, w) + b
if (self.output_noise[i] and (not mean_mode)):
out_size = self.shapes[i][1]
out_std = self.bias_std[i]
output_noise = np.random.randn(out_size)*out_std
h += output_noise
h = self.activations[i](h)
if self.sample_output:
h = sample(h)
return h
def set_model_params(self, model_params):
pointer = 0
for i in range(len(self.shapes)):
w_shape = self.shapes[i]
b_shape = self.shapes[i][1]
s_w = np.product(w_shape)
s = s_w + b_shape
chunk = np.array(model_params[pointer:pointer+s])
self.weight[i] = chunk[:s_w].reshape(w_shape)
self.bias[i] = chunk[s_w:].reshape(b_shape)
'''
s_b = np.product(b_shape) # works with legacy world models Z_H_HIDDEN mode format
self.weight[i] = chunk[s_b:].reshape(w_shape)
self.bias[i] = chunk[:s_b].reshape(b_shape)
'''
pointer += s
if self.output_noise[i]:
s = b_shape
self.bias_log_std[i] = np.array(model_params[pointer:pointer+s])
self.bias_std[i] = np.exp(self.sigma_factor*self.bias_log_std[i] + self.sigma_bias)
if self.render_mode:
print("bias_std, layer", i, self.bias_std[i])
pointer += s
def load_model(self, filename):
with open(filename) as f:
data = json.load(f)
print('loading file %s' % (filename))
self.data = data
model_params = np.array(data[0]) # assuming other stuff is in data
self.set_model_params(model_params)
def get_random_model_params(self, stdev=0.1):
return np.random.randn(self.param_count)*stdev
def evaluate(model):
# run 100 times and average score, according to the reles.
model.env.seed(0)
total_reward = 0.0
N = 100
for i in range(N):
reward, t = simulate(model, train_mode=False, render_mode=False, num_episode=1)
total_reward += reward[0]
return (total_reward / float(N))
def compress_input_dct(obs):
new_obs = np.zeros((8, 8))
for i in range(obs.shape[2]):
new_obs = +compress_2d(obs[:, :, i] / 255., shape=(8, 8))
new_obs /= float(obs.shape[2])
return new_obs.flatten()
def simulate(model, train_mode=False, render_mode=True, num_episode=5, seed=-1, max_len=-1):
reward_list = []
t_list = []
is_biped = (model.env_name.find("BipedalWalker") >= 0)
orig_mode = True # hack for bipedhard's reward augmentation during training (set to false for hack)
if is_biped:
orig_mode = False
dct_compress_mode = False
max_episode_length = 1000
if train_mode and max_len > 0:
if max_len < max_episode_length:
max_episode_length = max_len
if (seed >= 0):
random.seed(seed)
np.random.seed(seed)
model.env.seed(seed)
for episode in range(num_episode):
if model.rnn_mode:
model.reset()
if model.experimental_mode:
model.reset()
obs = model.env.reset()
if dct_compress_mode and obs is not None:
obs = compress_input_dct(obs)
if obs is None:
obs = np.zeros(model.input_size)
total_reward = 0.0
stumbled = False # hack for bipedhard's reward augmentation during training. turned off.
reward_threshold = 300 # consider we have won if we got more than this
num_glimpse = 0
for t in range(max_episode_length):
if render_mode:
model.env.render("human")
if RENDER_DELAY:
time.sleep(0.01)
if model.rnn_mode:
action = model.get_action(obs)
else:
if MEAN_MODE:
action = model.get_action(obs, t=t, mean_mode=(not train_mode))
else:
action = model.get_action(obs, t=t, mean_mode=False)
prev_obs = obs
obs, reward, done, info = model.env.step(action)
if model.experimental_mode: # augment reward with prob
num_glimpse += model.peek
if dct_compress_mode:
obs = compress_input_dct(obs)
if train_mode and reward == -100 and (not orig_mode):
# hack for bipedhard's reward augmentation during training. turned off.
reward = 0
stumbled = True
if (render_mode):
pass
#print("action", action, "step reward", reward)
#print("step reward", reward)
total_reward += reward
if done:
if train_mode and (not stumbled) and (total_reward > reward_threshold) and (not orig_mode):
# hack for bipedhard's reward augmentation during training. turned off.
total_reward += 100
break
if render_mode:
print("reward", total_reward, "timesteps", t)
if model.experimental_mode:
print("percent glimpse", float(num_glimpse)/float(t+1.0))
reward_list.append(total_reward)
t_list.append(t)
return reward_list, t_list
def main():
global RENDER_DELAY
assert len(sys.argv) > 1, 'python model.py gamename path_to_mode.json'
gamename = sys.argv[1]
if gamename.startswith("bullet"):
RENDER_DELAY = True
use_model = False
game = config.games[gamename]
if len(sys.argv) > 2:
use_model = True
filename = sys.argv[2]
print("filename", filename)
the_seed = 0
if len(sys.argv) > 3:
the_seed = int(sys.argv[3])
print("seed", the_seed)
model = make_model(game)
print('model size', model.param_count)
model.make_env(render_mode=render_mode)
if use_model:
model.load_model(filename)
else:
params = model.get_random_model_params(stdev=0.5)
model.set_model_params(params)
if final_mode:
rewards = []
for i in range(100):
reward, steps_taken = simulate(model, train_mode=False, render_mode=False, num_episode=1, seed=the_seed+i)
print(i, reward)
rewards.append(reward[0])
print("seed", the_seed, "average_reward", np.mean(rewards), "standard_deviation", np.std(rewards))
else:
if record_video:
model.env = Monitor(model.env, directory='/tmp/'+gamename,video_callable=lambda episode_id: True, write_upon_reset=True, force=True)
for i in range(1):
reward, steps_taken = simulate(model,
train_mode=False, render_mode=render_mode, num_episode=1, seed=the_seed+i)
print ("terminal reward", reward, "average steps taken", np.mean(steps_taken)+1)
if __name__ == "__main__":
main()
|
{
"content_hash": "baba7174b0d8a5bb92d74f60dd23aaeb",
"timestamp": "",
"source": "github",
"line_count": 815,
"max_line_length": 192,
"avg_line_length": 32.7239263803681,
"alnum_prop": 0.6376452943382077,
"repo_name": "google/brain-tokyo-workshop",
"id": "ca479e429cbe5ff2d53629dba541578e9644ef7b",
"size": "26670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "learntopredict/carracing/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "671"
},
{
"name": "HTML",
"bytes": "1031"
},
{
"name": "Jupyter Notebook",
"bytes": "47079538"
},
{
"name": "Python",
"bytes": "1037153"
},
{
"name": "Shell",
"bytes": "6053"
}
],
"symlink_target": ""
}
|
from rqalpha.events import EVENT, Event
from rqalpha.utils import run_when_strategy_not_hold
from rqalpha.utils.logger import user_system_log
from rqalpha.utils.i18n import gettext as _
from rqalpha.utils.exception import ModifyExceptionFromType
from rqalpha.execution_context import ExecutionContext
from rqalpha.const import EXECUTION_PHASE, EXC_TYPE
from rqalpha.environment import Environment
class Strategy(object):
def __init__(self, event_bus, scope, ucontext):
self._user_context = ucontext
self._current_universe = set()
self._init = scope.get('init', None)
self._handle_bar = scope.get('handle_bar', None)
self._handle_tick = scope.get('handle_tick', None)
func_before_trading = scope.get('before_trading', None)
if func_before_trading is not None and func_before_trading.__code__.co_argcount > 1:
self._before_trading = lambda context: func_before_trading(context, None)
user_system_log.warn(_(u"deprecated parameter[bar_dict] in before_trading function."))
else:
self._before_trading = func_before_trading
self._after_trading = scope.get('after_trading', None)
if self._before_trading is not None:
event_bus.add_listener(EVENT.BEFORE_TRADING, self.before_trading)
if self._handle_bar is not None:
event_bus.add_listener(EVENT.BAR, self.handle_bar)
if self._handle_tick is not None:
event_bus.add_listener(EVENT.TICK, self.handle_tick)
if self._after_trading is not None:
event_bus.add_listener(EVENT.AFTER_TRADING, self.after_trading)
self._before_day_trading = scope.get('before_day_trading', None)
self._before_night_trading = scope.get('before_night_trading', None)
if self._before_day_trading is not None:
user_system_log.warn(_(u"[deprecated] before_day_trading is no longer used. use before_trading instead."))
if self._before_night_trading is not None:
user_system_log.warn(_(u"[deprecated] before_night_trading is no longer used. use before_trading instead."))
self._force_run_before_trading = Environment.get_instance().config.extra.force_run_init_when_pt_resume
@property
def user_context(self):
return self._user_context
def init(self):
if not self._init:
return
with ExecutionContext(EXECUTION_PHASE.ON_INIT):
with ModifyExceptionFromType(EXC_TYPE.USER_EXC):
self._init(self._user_context)
Environment.get_instance().event_bus.publish_event(Event(EVENT.POST_USER_INIT))
@run_when_strategy_not_hold
def before_trading(self, event):
self._force_run_before_trading = False
with ExecutionContext(EXECUTION_PHASE.BEFORE_TRADING):
with ModifyExceptionFromType(EXC_TYPE.USER_EXC):
self._before_trading(self._user_context)
@run_when_strategy_not_hold
def handle_bar(self, event):
if self._force_run_before_trading and (self._before_trading is not None):
self.before_trading(event)
else:
bar_dict = event.bar_dict
with ExecutionContext(EXECUTION_PHASE.ON_BAR):
with ModifyExceptionFromType(EXC_TYPE.USER_EXC):
self._handle_bar(self._user_context, bar_dict)
@run_when_strategy_not_hold
def handle_tick(self, event):
if self._force_run_before_trading and (self._before_trading is not None):
self.before_trading(event)
else:
tick = event.tick
with ExecutionContext(EXECUTION_PHASE.ON_TICK):
with ModifyExceptionFromType(EXC_TYPE.USER_EXC):
self._handle_tick(self._user_context, tick)
@run_when_strategy_not_hold
def after_trading(self, event):
with ExecutionContext(EXECUTION_PHASE.AFTER_TRADING):
with ModifyExceptionFromType(EXC_TYPE.USER_EXC):
self._after_trading(self._user_context)
|
{
"content_hash": "59bb6f276fe41eac9b49d3f81135e933",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 120,
"avg_line_length": 45.39325842696629,
"alnum_prop": 0.6574257425742575,
"repo_name": "xclxxl414/rqalpha",
"id": "76b61ae9764fb1f10ba59de30a5082017127db4f",
"size": "4645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rqalpha/core/strategy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2333905"
},
{
"name": "Python",
"bytes": "2597438"
},
{
"name": "Shell",
"bytes": "1154"
}
],
"symlink_target": ""
}
|
"""This example gets Sites under MCM requiring review."""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
site_service = client.GetService('SiteService', version='v202211')
# Create a statement to select Sites needing approval.
statement = ad_manager.StatementBuilder(version='v202211').Where(
"approvalStatus = 'REQUIRES_REVIEW'")
# Retrieve a small number of sites at a time, paging through until all sites
# have been retrieved.
while True:
response = site_service.getSitesByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for site in response['results']:
# Print out some information for each site.
print('Site with Id %d and URL "%s" was found.' %
(site['id'], site['url']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
{
"content_hash": "d789ebfe70151e82b389ae42b33ae990",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 34.5,
"alnum_prop": 0.6947996589940324,
"repo_name": "googleads/googleads-python-lib",
"id": "17eddde1bd88e9f428a2b16cdb04f1302a2332a6",
"size": "1773",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/ad_manager/v202211/site_service/get_sites_requiring_approval.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "403821"
}
],
"symlink_target": ""
}
|
"""
Tests for seqmagick.transform
"""
from cStringIO import StringIO
import functools
import logging
import unittest
from Bio import Alphabet, SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from seqmagick import transform
logging.basicConfig(level=logging.FATAL)
def _alignment_record(sequence):
return SeqRecord(Seq(sequence,
alphabet=Alphabet.Gapped(Alphabet.generic_dna)))
def seqrecord(sequence_id, sequence_text, alphabet=Alphabet.generic_dna,
description=None):
"""
Quick shortcut to make a SeqRecord
"""
record = SeqRecord(Seq(sequence_text, alphabet),
id=sequence_id)
if description:
record.description = description
return record
class PatternReplaceTestCase(unittest.TestCase):
def create_sequences(self):
return [seqrecord('test_sequence_1', 'ACTGT'),
seqrecord('test_REPLACE_2', 'ACTGT'),
seqrecord('other_sequence', 'ATGAG'), ]
def setUp(self):
super(PatternReplaceTestCase, self).setUp()
self.sequences = self.create_sequences()
def tearDown(self):
super(PatternReplaceTestCase, self).tearDown()
# from http://stackoverflow.com/questions/13923072/shortening-fasta-header-perl
def test_pattern_replace_anchored_transform_id(self):
sequences = [seqrecord('gi|351517969|ref|NW_003613580.1|', 'CAGTC',
description='gi|351517969|ref|NW_003613580.1| Cricetulus griseus unplaced genomic scaffold'),
seqrecord('gi|351517969|ref|NW_003613580.1|', 'CAGTC',
description='gi|351517969|ref|NW_003613580.1|'),
seqrecord('gi|351517969|ref|NW_003613580.1|', 'CAGTC')]
# capture the identifier after three groups of pipe-separated characters
transformed = list(transform.name_replace(sequences, r'^(?:[^|]+\|){3}([^|]+)\|', r'\1'))
self.assertEqual(str(sequences[0].seq), str(transformed[0].seq))
self.assertEqual('NW_003613580.1', transformed[0].id)
self.assertEqual('NW_003613580.1 Cricetulus griseus unplaced genomic scaffold', transformed[0].description)
self.assertEqual(str(sequences[1].seq), str(transformed[1].seq))
self.assertEqual('NW_003613580.1', transformed[1].id)
self.assertEqual('NW_003613580.1', transformed[1].description)
self.assertEqual(str(sequences[2].seq), str(transformed[2].seq))
self.assertEqual('NW_003613580.1', transformed[2].id)
self.assertEqual('<unknown description>', transformed[2].description)
# from http://stackoverflow.com/questions/15155728/modifying-fasta-headers-with-unix-command-line-tools
def test_pattern_replace_anchored_id_from_description(self):
sequences = [seqrecord('hg19_ct_UserTrack_3545_691', 'GATGG',
description='hg19_ct_UserTrack_3545_691 range=chr1:8121498-8121502 5\'pad=0 3\'pad=0 strand=+ repeatMasking=none')]
transformed = next(transform.name_replace(sequences, r'^\S+ range=(\S+)', r'\1'))
self.assertEqual(str(sequences[0].seq), str(transformed.seq))
self.assertEqual('chr1:8121498-8121502', transformed.id)
self.assertEqual('chr1:8121498-8121502 5\'pad=0 3\'pad=0 strand=+ repeatMasking=none', transformed.description)
# from http://stackoverflow.com/questions/23280240/how-to-rename-fasta-file-headers-using-sed
def test_pattern_replace_anchored_add_to_description(self):
sequences = [seqrecord('Bra000001', 'CTTAT', description='Bra000001')]
transformed = next(transform.name_replace(sequences, r'^(Bra\d+)$', r'\1 Brassica rapa'))
self.assertEqual(str(sequences[0].seq), str(transformed.seq))
self.assertEqual('Bra000001', transformed.id)
self.assertEqual('Bra000001 Brassica rapa', transformed.description)
def test_pattern_replace_anchored_remove_from_description(self):
sequences = [seqrecord('Bra000001', 'CTTAT', description='Bra000001 Brassica rapa')]
transformed = next(transform.name_replace(sequences, r' .*$', ''))
self.assertEqual(str(sequences[0].seq), str(transformed.seq))
self.assertEqual('Bra000001', transformed.id)
self.assertEqual('Bra000001', transformed.description)
def test_pattern_replace_anchored_nomatch(self):
sequences = [seqrecord('hello', 'A', description='hello friend')]
transformed = next(transform.name_replace(sequences, r'^hello$', 'bye'))
self.assertEqual(str(sequences[0].seq), str(transformed.seq))
self.assertEqual('hello', transformed.id)
self.assertEqual('hello friend', transformed.description)
def test_pattern_replace_anchored_match(self):
sequences = [seqrecord('hello', 'A', description='hello friend'),
seqrecord('hello', 'A')]
transformed = list(transform.name_replace(sequences, r'^hello\b', 'bye'))
self.assertEqual(str(sequences[0].seq), str(transformed[0].seq))
self.assertEqual('bye', transformed[0].id)
self.assertEqual('bye friend', transformed[0].description)
self.assertEqual(str(sequences[1].seq), str(transformed[1].seq))
self.assertEqual('bye', transformed[1].id)
self.assertEqual('<unknown description>', transformed[1].description)
def test_pattern_replace_none(self):
result = transform.name_replace(self.sequences, 'ZZZ', 'MATCH')
result = list(result)
self.assertEqual(self.sequences, result)
def test_pattern_replace_static(self):
result = transform.name_replace(self.sequences, '_REPLACE_',
'_DONE_')
result = list(result)
expected = self.create_sequences()
expected[1].id = 'test_DONE_2'
self.assertEqual(self.sequences, result)
def test_pattern_replace_case_insensitive(self):
"""
Substitutions are case insensitive
"""
result = transform.name_replace(self.sequences, '_replace_',
'_DONE_')
result = list(result)
expected = self.create_sequences()
expected[1].id = 'test_DONE_2'
self.assertEqual(self.sequences, result)
def test_pattern_replace_group(self):
"""
Make sure capturing groups work
"""
result = transform.name_replace(self.sequences, '_(repl)ace_',
'_DONE-\\1_')
result = list(result)
expected = self.create_sequences()
expected[1].id = 'test_DONE-repl_2'
self.assertEqual(self.sequences, result)
class SqueezeTestCase(unittest.TestCase):
def setUp(self):
super(SqueezeTestCase, self).setUp()
self.sequences = [
seqrecord('sequence_1', 'AC-G--'),
seqrecord('sequence_2', '-C-GT-'),
seqrecord('sequence_3', '-T-AG-'),
]
def test_gap_proportion(self):
actual = transform.gap_proportion(self.sequences)
self.assertEqual([2./3, 0.0, 1.0, 0.0, 1./3, 1.0], actual)
def test_basic_squeeze(self):
result = list(transform.squeeze(self.sequences, 1.0))
self.assertEqual([4, 4, 4], [len(i) for i in result])
self.assertEqual([i.id for i in self.sequences], [i.id for i in result])
expected = [
seqrecord('sequence_1', 'ACG-'),
seqrecord('sequence_2', '-CGT'),
seqrecord('sequence_3', '-TAG'),
]
self.assertEqual([str(i.seq) for i in expected],
[str(i.seq) for i in result])
def test_squeeze_none(self):
"""
Threshold of 0.001 - nothing should be squeezed.
"""
result = list(transform.squeeze(self.sequences, 1.01))
self.assertEqual([str(i.seq) for i in self.sequences],
[str(i.seq) for i in result])
class SeqPatternTestCase(unittest.TestCase):
def setUp(self):
super(SeqPatternTestCase, self).setUp()
self.sequences = [
seqrecord('sequence_1', 'AC-G--'),
seqrecord('sequence_2', '-C-GT-'),
seqrecord('sequence_3', '-T-AG-'),
]
self.tests = [('^$', []),
('.*', self.sequences),
('^AC', [self.sequences[0]]),
('^ac', []),
('^ac(?i)', [self.sequences[0]])]
def test_include(self):
result = transform.seq_include(self.sequences, '^$')
for regex, expected in self.tests:
result = list(transform.seq_include(self.sequences, regex))
self.assertEqual(expected, result)
def test_exclude(self):
result = transform.seq_include(self.sequences, '^$')
for regex, expected_include in self.tests:
expected = [i for i in self.sequences if i not in expected_include]
result = list(transform.seq_exclude(self.sequences, regex))
self.assertEqual(expected, result)
class HeadTestCase(unittest.TestCase):
"""
Test for transform.head
"""
def setUp(self):
self.sequences = [seqrecord('sequence{0}'.format(i), 'A'*(i+1))
for i in xrange(100)]
def test_zero(self):
result = list(transform.head(self.sequences, '0'))
self.assertEqual([], result)
def test_more_seqs_than_available(self):
"""
Specifying more sequences than are in input records should return
them all
"""
result = list(transform.head(self.sequences, '10000'))
self.assertEqual([s.id for s in self.sequences],
[r.id for r in result])
self.assertEqual([str(s.seq) for s in self.sequences],
[str(r.seq) for r in result])
def test_values(self):
"""
Try specifying some values.
"""
for h in xrange(len(self.sequences) + 1):
result = list(transform.head(self.sequences, str(h)))
self.assertEqual(h, len(result))
self.assertEqual([s.id for s in self.sequences[:h]],
[r.id for r in result])
self.assertEqual([str(s.seq) for s in self.sequences[:h]],
[str(r.seq) for r in result])
def test_minus_zero(self):
"""
Test that -0 returns all sequences
"""
result = list(transform.head(self.sequences, '-0'))
self.assertEqual([s.id for s in self.sequences],
[r.id for r in result])
self.assertEqual([str(s.seq) for s in self.sequences],
[str(r.seq) for r in result])
def test_minus_values(self):
"""
Try specifying some minus values.
"""
for h in xrange(1, len(self.sequences) + 1):
result = list(transform.head(self.sequences, str(-h)))
self.assertEqual(h, len(self.sequences) - len(result))
self.assertEqual([s.id for s in self.sequences[:-h]],
[r.id for r in result])
self.assertEqual([str(s.seq) for s in self.sequences[:-h]],
[str(r.seq) for r in result])
class TailTestCase(unittest.TestCase):
def setUp(self):
self.records = [
seqrecord('sequence_1', 'AC-G--'),
seqrecord('sequence_2', '-C-GT-'),
seqrecord('sequence_3', '-T-AG-'),
]
def _do_test(self, size):
actual = list(transform.tail(self.records, str(size)))
expected = self.records[-size:]
self.assertEqual([e.id for e in expected], [a.id for a in actual])
self.assertEqual([str(e.seq) for e in expected], [str(a.seq) for a in actual])
def test_tail_1(self):
self._do_test(1)
def test_tail_2(self):
self._do_test(2)
def test_tail_3(self):
self._do_test(3)
def test_plus_zero(self):
"""
Test that +0 returns all sequences
"""
result = list(transform.tail(self.records, '+0'))
self.assertEqual([s.id for s in self.records],
[r.id for r in result])
self.assertEqual([str(s.seq) for s in self.records],
[str(r.seq) for r in result])
def test_plus_values(self):
"""
Try specifying some plus values.
"""
for h in xrange(1, len(self.records) + 1):
result = list(transform.tail(self.records, '+{}'.format(h)))
self.assertEqual(len(self.records) + 1 - h, len(result))
self.assertEqual([s.id for s in self.records[h-1:]],
[r.id for r in result])
self.assertEqual([str(s.seq) for s in self.records[h-1:]],
[str(r.seq) for r in result])
class IsolateRegionTestCase(unittest.TestCase):
def setUp(self):
self.sequences = [_alignment_record('--A--ACTGGACGTATTC-CCCC'),
_alignment_record('--AGCACTGGA---ATTC-CCCC')]
def test_no_isolation(self):
result = list(transform.isolate_region(self.sequences, 0,
len(self.sequences[0])))
self.assertEqual(self.sequences, result)
def test_single_loc(self):
start = 2
end = 3
result = list(transform.isolate_region(self.sequences, start, end))
for seq in result:
self.assertEqual('--A--------------------', str(seq.seq))
def test_middle(self):
expected = ['--A--ACTGGA------------', '--AGCACTGGA------------']
start = 1
end = 11
actual = list(transform.isolate_region(self.sequences, start, end))
actual = [str(s.seq) for s in actual]
self.assertEqual(expected, actual)
def test_invalid(self):
self.assertRaises(ValueError, transform.isolate_region(
self.sequences, 5, 5).next)
self.assertRaises(ValueError, transform.isolate_region(
self.sequences, 10, 5).next)
class MinUngapLengthTestCase(unittest.TestCase):
def setUp(self):
self.sequences = [_alignment_record('--AAC--'),
_alignment_record('AAAA...'),
_alignment_record('-------'),
_alignment_record('ACGRAGT')]
def test_none_pass(self):
result = list(transform.min_ungap_length_discard(self.sequences, 8))
self.assertEqual([], result)
def test_all_pass(self):
result = list(transform.min_ungap_length_discard(self.sequences, 0))
self.assertEqual(self.sequences, result)
def test_partial(self):
result = transform.min_ungap_length_discard(self.sequences, 4)
self.assertEqual([self.sequences[1], self.sequences[3]], list(result))
class IncludeExcludeMixIn(object):
def setUp(self):
ids = """sequenceid1
sequenceid2
sequenceid4
"""
self.handle = StringIO(ids)
self.sequences = [seqrecord("sequenceid1", "AAA"),
seqrecord("sequenceid2", "BBB"),
seqrecord("sequenceid3", "CCC"),
seqrecord("sequenceid4", "DDD",
description='sequence id 4'),
seqrecord("test", "EEE",
description='test sequence'), ]
class IncludeFromFileTestCase(IncludeExcludeMixIn, unittest.TestCase):
def test_filter(self):
expected = [self.sequences[0], self.sequences[1], self.sequences[3]]
actual = list(transform.include_from_file(self.sequences, self.handle))
self.assertEqual(3, len(actual))
self.assertEqual(expected, actual)
class ExcludeFromFileTestCase(IncludeExcludeMixIn, unittest.TestCase):
def test_filter(self):
expected = [self.sequences[2], self.sequences[4]]
actual = list(transform.exclude_from_file(self.sequences, self.handle))
self.assertEqual(2, len(actual))
self.assertEqual(expected, actual)
class NameIncludeTestCase(IncludeExcludeMixIn, unittest.TestCase):
def test_filter_id(self):
expected = self.sequences[:2]
actual = list(transform.name_include(self.sequences, r'sequenceid[12]'))
self.assertEqual(2, len(actual))
self.assertEqual(expected, actual)
def test_filter_description(self):
expected = self.sequences[3:]
actual = list(transform.name_include(self.sequences, r'sequence id 4|test seq'))
self.assertEqual(2, len(actual))
self.assertEqual(expected, actual)
class NameExcludeTestCase(IncludeExcludeMixIn, unittest.TestCase):
def test_filter_id(self):
expected = self.sequences[2:]
actual = list(transform.name_exclude(self.sequences, r'sequenceid[12]'))
self.assertEqual(3, len(actual))
self.assertEqual(expected, actual)
def test_filter_description(self):
expected = self.sequences[:3]
actual = list(transform.name_exclude(self.sequences, r'sequence id 4|test seq'))
self.assertEqual(expected, actual)
class CutTestCase(unittest.TestCase):
def setUp(self):
self.sequences = [SeqRecord(Seq("ABC"), id="sequenceid1"),
SeqRecord(Seq("BCD"), id="sequenceid2"),
SeqRecord(Seq("DEF"), id="sequence id 4"),
SeqRecord(Seq("EFG"), id="test sequence"), ]
def test_no_sequences(self):
actual = list(transform._cut_sequences(self.sequences, slice(0, 0)))
for sequence in actual:
self.assertEqual(0, len(sequence))
def test_full_sequence(self):
actual = list(transform._cut_sequences(self.sequences, slice(0, 3)))
self.assertEqual(['ABC', 'BCD', 'DEF', 'EFG'], [str(s.seq) for s in
actual])
def test_cut_sequences(self):
actual = list(transform._cut_sequences(self.sequences, slice(0, 2)))
self.assertEqual(['AB', 'BC', 'DE', 'EF'], [str(s.seq) for s in
actual])
actual = list(transform._cut_sequences(self.sequences, slice(1, None)))
self.assertEqual(['BC', 'CD', 'EF', 'FG'], [str(s.seq) for s in
actual])
class CodonWarningTableTestCase(unittest.TestCase):
def warn(self, *args, **kwargs):
self.warnings.append((args, kwargs))
def setUp(self):
self.warnings = []
self.warning_dict = transform.CodonWarningTable({'UUU': 'F'})
self.old_warn = transform.logging.warn
transform.logging.warn = self.warn
def tearDown(self):
transform.logging.warn = self.old_warn
def test_nowarn(self):
actual = self.warning_dict['UUU']
self.assertEqual('F', actual)
self.assertEqual([], self.warnings)
def test_warn(self):
codon = 'UU-'
actual = self.warning_dict[codon]
self.assertEqual('X', actual)
self.assertEqual([(("Unknown Codon: %s", codon), {})], self.warnings)
class TranslateTestCase(unittest.TestCase):
def test_dna_protein_nogap(self):
sequences = [seqrecord('A', 'TTTTTATAA')]
expected = ['FL*']
actual = transform.translate(sequences, 'dna2protein')
self.assertEqual(expected, [str(i.seq) for i in actual])
def test_dna_protein_nogap_stop(self):
sequences = [seqrecord('A', 'TTTTTATAA')]
expected = ['FL']
actual = transform.translate(sequences, 'dna2proteinstop')
self.assertEqual(expected, [str(i.seq) for i in actual])
def test_dna_protein_gap(self):
sequences = [seqrecord('A', 'TTTTT-TAA')]
expected = ['FX*']
actual = transform.translate(sequences, 'dna2protein')
self.assertEqual(expected, [str(i.seq) for i in actual])
def test_dna_protein_gap_stop(self):
sequences = [seqrecord('A', '---TTATAA')]
expected = ['-L']
actual = transform.translate(sequences, 'dna2proteinstop')
self.assertEqual(expected, [str(i.seq) for i in actual])
class UngapSequencesTestCase(unittest.TestCase):
def test_dot_gap(self):
sequences = [SeqRecord(Seq("AAA"), id="s1"),
SeqRecord(Seq("A.G"), id="s2"),
SeqRecord(Seq(".A."), id="s3"),]
ungapped = list(transform.ungap_sequences(sequences))
self.assertEqual(["AAA", "AG", "A"], [str(s.seq) for s in ungapped])
def test_dash_gap(self):
sequences = [SeqRecord(Seq("AAA"), id="s1"),
SeqRecord(Seq("A-G"), id="s2"),
SeqRecord(Seq("-A-"), id="s3"),]
ungapped = list(transform.ungap_sequences(sequences))
self.assertEqual(["AAA", "AG", "A"], [str(s.seq) for s in ungapped])
# Name Modification functions
class IdModifyMixin(object):
"""
Mixin to ease testing name prefix and suffix
"""
def setUp(self):
self.input_fp = StringIO(self.initial_fasta)
self.output_fp = StringIO()
def test_modify(self):
records = SeqIO.parse(self.input_fp, 'fasta')
records = self.__class__.modify_fn(records)
SeqIO.write(records, self.output_fp, 'fasta')
self.assertEqual(self.target_fasta, self.output_fp.getvalue().strip())
class NamePrefixTestCase(IdModifyMixin, unittest.TestCase):
initial_fasta = """>seq1
ACGT
>gi|260674|gb|S52561.1| {long terminal repeat} [human immunodeficiency virus type]
ACGT"""
target_fasta = """>pre.seq1
ACGT
>pre.gi|260674|gb|S52561.1| {long terminal repeat} [human immunodeficiency virus type]
ACGT"""
modify_fn = functools.partial(transform.name_insert_prefix, prefix="pre.")
class NameSuffixTestCase(IdModifyMixin, unittest.TestCase):
initial_fasta = """>seq1
ACGT
>gi|260674|gb|S52561.1| {long terminal repeat} [human immunodeficiency virus type]
ACGT"""
target_fasta = """>seq1.post
ACGT
>gi|260674|gb|S52561.1|.post {long terminal repeat} [human immunodeficiency virus type]
ACGT"""
modify_fn = functools.partial(transform.name_append_suffix, suffix=".post")
class MultiCutTestCase(unittest.TestCase):
def setUp(self):
self.inputs = [seqrecord("Sequence 1", "ACGT--TCAGA")]
def test_multicut(self):
actual = list(transform.multi_cut_sequences(self.inputs,
[slice(None, 2), slice(8, None)]))
self.assertEqual(['ACAGA'], [str(s.seq) for s in actual])
class MultiMaskSequences(unittest.TestCase):
def setUp(self):
self.sequences = [SeqRecord(Seq("AAA"), id="sequenceid1"),
SeqRecord(Seq("BBB"), id="sequenceid2"),
SeqRecord(Seq("DDDD"), id="sequence id 4"),
SeqRecord(Seq("EEE"), id="test sequence"), ]
def test_mask_whole(self):
masks = [slice(0, 200)]
actual = list(transform.multi_mask_sequences(self.sequences, masks))
self.assertEqual(len(self.sequences), len(actual))
for e, a in zip(self.sequences, actual):
self.assertEqual(e.id, a.id)
self.assertEqual('-'*len(e), str(a.seq))
def test_mask(self):
masks = [slice(1, 2)]
actual = list(transform.multi_mask_sequences(self.sequences, masks))
self.assertEqual(len(self.sequences), len(actual))
self.assertEqual(['A-A', 'B-B', 'D-DD', 'E-E'],
[str(a.seq) for a in actual])
class RecordBufferTestCase(unittest.TestCase):
def setUp(self):
self.sequences = [SeqRecord(Seq("AAA"), id="s1"),
SeqRecord(Seq("A-G"), id="s2"),
SeqRecord(Seq("-A-"), id="s3"),]
self.seq_iter = iter(self.sequences)
def _compare(self, records):
self.assertEqual(len(self.sequences), len(records))
for e, a in zip(self.sequences, records):
self.assertEqual(e.id, a.id)
self.assertEqual(e.description, a.description)
self.assertEqual(str(e.seq), str(a.seq))
def test_single_pass(self):
with transform._record_buffer(self.seq_iter) as iter_f:
records = list(iter_f())
self._compare(records)
def test_multi_pass(self):
with transform._record_buffer(self.seq_iter) as iter_f:
records = list(iter_f())
self._compare(records)
records = list(iter_f())
self._compare(records)
class DropColumnsTestCase(unittest.TestCase):
def setUp(self):
self.sequences = [SeqRecord(Seq("AAA"), id="s1"),
SeqRecord(Seq("A-G"), id="s2"),
SeqRecord(Seq("-A-"), id="s3"),]
def test_basic(self):
r = list(transform.drop_columns(self.sequences, [slice(1, None)]))
self.assertEqual([i.id for i in self.sequences],
[i.id for i in r])
self.assertEqual(['A', 'A', '-'], [str(i.seq) for i in r])
def test_multi(self):
r = list(transform.drop_columns(self.sequences, [slice(0, 1), slice(2, None)]))
self.assertEqual([i.id for i in self.sequences],
[i.id for i in r])
self.assertEqual(['A', '-', 'A'], [str(i.seq) for i in r])
class DashesCleanupTestCase(unittest.TestCase):
def setUp(self):
self.sequences = [SeqRecord(Seq("A~-.?~GT"), id="s1"),
SeqRecord(Seq("A-GGGG?-"), id="s2"),
SeqRecord(Seq("-A-:ACA-"), id="s3"),
SeqRecord(Seq("ACTGGTCA"), id="s4"),]
def test_basic(self):
actual = list(transform.dashes_cleanup(self.sequences))
actual = [(i.id, str(i.seq)) for i in actual]
self.assertEqual(
[('s1', 'A-----GT'),
('s2', 'A-GGGG--'),
('s3', '-A--ACA-'),
('s4', 'ACTGGTCA')], actual)
|
{
"content_hash": "48d0eb101b596564dad4797762ad70fe",
"timestamp": "",
"source": "github",
"line_count": 671,
"max_line_length": 146,
"avg_line_length": 38.14157973174367,
"alnum_prop": 0.5971163990153557,
"repo_name": "theboocock/ancient_dna_pipeline",
"id": "f3b6dd79f0dad988ff490578b2152c2a309e1c59",
"size": "25593",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/seqmagick/seqmagick/test/test_transform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "494754"
},
{
"name": "C++",
"bytes": "276798"
},
{
"name": "Groff",
"bytes": "25164"
},
{
"name": "Java",
"bytes": "36594"
},
{
"name": "JavaScript",
"bytes": "22549"
},
{
"name": "Makefile",
"bytes": "9447"
},
{
"name": "Perl",
"bytes": "33073"
},
{
"name": "Python",
"bytes": "342459"
},
{
"name": "R",
"bytes": "12143"
},
{
"name": "Shell",
"bytes": "46220"
}
],
"symlink_target": ""
}
|
import sys
from ete3 import Tree
from collections import defaultdict
def bissect_tree(treeObj):
subtree1 = ''
subtree2 = ''
for node in treeObj.traverse("postorder"):
if node.dist > length_cutoff: #split the tree here
if node.is_leaf():
continue
elif len(node) < 4: #edit this to change the minimum number of sequences in the smaller subtree
continue
else:
subtree1 = node.detach()
subtree2 = treeObj
return (subtree1, subtree2)
return (treeObj, 'Zilch')
def get_leaf_names(treeObj):
names = []
for leaf in treeObj:
names.append(leaf.name)
return names
length_cutoff = float(sys.argv[1])
#tree = Tree(sys.argv[2])
tree_bits = [] #holds the contents of the ufboot-esque files to print out at end
ufbootfile = open(sys.argv[2])
count_lines = 0
for line in ufbootfile:
count_lines += 1
tree = Tree(line.rstrip())
trees_done = []
trees_todo = []
trees_todo.append(tree)
while len(trees_todo) > 0:
(s1, s2) = bissect_tree(trees_todo.pop())
if s2 == 'Zilch': #no problem branches
trees_done.append(s1)
else:
trees_done.append(s1) #This relies on postorder and levelorder (gradually go deeper into tree), to prevent an infinite loop when cutting
trees_todo.append(s2)
if len(tree_bits) == 0: #first bootstrap, so just write the bits
for t in trees_done:
tree_bits.append([t])
else:
#check to see if compatible with existing tree bits from earlier bootstraps
for t in trees_done:
matched = 0
tree_names = get_leaf_names(t)
#for element in tree_bits:
for i in range(len(tree_bits)):
element_taxa = get_leaf_names(tree_bits[i][0])
if set(tree_names) == set(element_taxa): #they are compatible
tree_bits[i].append(t)
matched = 1
if matched == 0:
tree_bits.append([t])
#Write out the results
counter = 0
#for tree in trees_done:
# tree.write(outfile=sys.argv[2][:-4] + "_" + str(counter) + ".tre")
# counter += 1
for trees in tree_bits:
outfile_name = sys.argv[2][:-7] + "_" + str(counter) + ".boot"
counter += 1
outh = open(outfile_name, "w")
for t in trees:
outh.write(t.write() + "\n")
outh.close()
|
{
"content_hash": "48d0ee152a0eb4a808bbf5d711ea06db",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 148,
"avg_line_length": 32.473684210526315,
"alnum_prop": 0.5753646677471637,
"repo_name": "Tancata/phylo",
"id": "ff2354287529e3309106528546d4b9e09f8d7eba",
"size": "2927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cut_bootstrapped_tree_on_long_branch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "155099"
},
{
"name": "Shell",
"bytes": "1600"
}
],
"symlink_target": ""
}
|
import unittest
from dart.model.event import Event
from dart.model.event import EventData
from dart.model.exception import DartValidationException
from dart.schema.base import default_and_validate
from dart.schema.event import event_schema
class TestEventSchema(unittest.TestCase):
def test_event_schema(self):
state = None
e = Event(data=EventData('test-event', state=state))
obj_before = e.to_dict()
e = default_and_validate(e, event_schema())
# state should be defaulted to INACTIVE
self.assertNotEqual(obj_before, e.to_dict())
def test_event_schema_invalid(self):
with self.assertRaises(DartValidationException) as context:
name = None
e = Event(data=EventData(name))
# should fail because the name is missing
default_and_validate(e, event_schema())
self.assertTrue(isinstance(context.exception, DartValidationException))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "08fcf764c869ce14dd3a4b3023cec909",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 32.29032258064516,
"alnum_prop": 0.6823176823176823,
"repo_name": "RetailMeNotSandbox/dart",
"id": "3326f0a11d9e08f4a96c5858445e403b8c1f6f28",
"size": "1001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/dart/test/schema/test_event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103727"
},
{
"name": "HTML",
"bytes": "67636"
},
{
"name": "JavaScript",
"bytes": "2762304"
},
{
"name": "Nginx",
"bytes": "996"
},
{
"name": "PLpgSQL",
"bytes": "1475"
},
{
"name": "Python",
"bytes": "1025954"
},
{
"name": "Ruby",
"bytes": "5523"
},
{
"name": "Shell",
"bytes": "3100"
}
],
"symlink_target": ""
}
|
import argparse
import json
import os
import sys
from . import TimeoutError, run_script
def main():
parser = argparse.ArgumentParser(description='Run tests.')
parser.add_argument('action', nargs='+', choices=['build', 'test'])
parser.add_argument('--workspace', required=True, help='Workspace to build and test.')
parser.add_argument('--scheme', required=True, help='Scheme to build and test.')
parser.add_argument('--target', help='Test target.')
parser.add_argument('--retries', type=int, default=4, help='The maximum number of times to retry a set of tests without progress.')
parser.add_argument('--timeout', type=int, default=120, help='The number of seconds to wait without output before failing a test run.')
parser.add_argument('--partition', type=int, default=0, help='The partition index to run.')
parser.add_argument('--partition-count', dest='partition_count', type=int, default=1, help='The total number of partitions.')
parser.add_argument('--devices', default='iPhone 5,9.0;iPad 2,9.0')
args = parser.parse_args()
xctool_path = '/usr/local/bin/xctool'
build_path = os.path.abspath('build')
try:
os.makedirs(build_path)
except:
pass
for action in args.action:
if action == 'build':
build_tests(xctool_path=xctool_path, workspace=args.workspace, scheme=args.scheme, build_path=build_path, timeout=args.timeout)
elif action == 'test':
if not args.target:
print_message('Target is required when testing')
exit(1)
run_tests(xctool_path=xctool_path, workspace=args.workspace, scheme=args.scheme, target=args.target, build_path=build_path, partition=args.partition, partition_count=args.partition_count, devices=parse_devices(args.devices), retries=args.retries, timeout=args.timeout)
def print_message(message):
message = 'xctool-runner: ' + message
print '=' * len(message)
print message
print '=' * len(message)
sys.stdout.flush()
def parse_devices(string):
devices = []
for name, version in [device_spec.split(',', 1) for device_spec in string.split(';')]:
devices.append(dict(
destination='platform=iOS Simulator,OS={version},name={name}'.format(version=version, name=name),
description='{name} / iOS {version}'.format(version=version, name=name),
name=name,
version=version,
))
return devices
def build_tests(xctool_path, workspace, scheme, build_path, timeout):
print_message('Building tests')
try:
script = '{xctool_path} -workspace "{workspace}" -scheme "{scheme}" -sdk iphonesimulator CONFIGURATION_BUILD_DIR="{build_path}" -derivedDataPath="{build_path}" build-tests -reporter pretty'.format(
xctool_path=xctool_path,
workspace=workspace,
scheme=scheme,
build_path=build_path,
)
script_result, _ = run_script(script, timeout)
if script_result != 0:
print_message('Failed to build tests')
exit(1)
except TimeoutError:
print_message('Timed out building tests')
exit(1)
def get_all_tests(xctool_path, workspace, scheme, build_path, target, timeout):
print_message('Listing tests')
stream_json_path = os.path.join(build_path, 'stream.json')
try:
script = '{xctool_path} -workspace "{workspace}" -scheme "{scheme}" -sdk iphonesimulator CONFIGURATION_BUILD_DIR="{build_path}" -derivedDataPath="{build_path}" run-tests -listTestsOnly -only {target} -reporter pretty -reporter json-stream:{stream_json_path}'.format(
xctool_path=xctool_path,
workspace=workspace,
scheme=scheme,
build_path=build_path,
target=target,
stream_json_path=stream_json_path,
)
script_result, _ = run_script(script, timeout)
if script_result != 0:
print_message('Failed to list tests')
exit(1)
except TimeoutError:
print_message('Timed out listing tests')
exit(1)
tests = []
with open(stream_json_path) as f:
for line in f.readlines():
event = json.loads(line)
if event['event'] == 'begin-test':
tests.append(dict(
class_name=event['className'],
method_name=event['methodName'],
))
return tests
def get_partitions(elements, count):
partitions = []
division = float(len(elements)) / float(count)
for i in xrange(0, count):
start = int(round(division * float(i)))
end = int(round(division * float(i + 1)))
partition = elements[start:end]
partitions.append(partition)
return partitions
def run_tests(xctool_path, workspace, scheme, build_path, target, partition, partition_count, devices, retries, timeout):
tests = get_all_tests(xctool_path=xctool_path, workspace=workspace, scheme=scheme, build_path=build_path, target=target, timeout=timeout)
print_message('Got list of tests')
partitions = get_partitions(tests, partition_count)
partitioned_tests = partitions[partition]
for test in tests:
marker = '>' if test in partitioned_tests else ' '
print '\t{marker} {class_name}.{method_name}'.format(marker=marker, class_name=test['class_name'], method_name=test['method_name'])
for device in devices:
attempt = 1
remaining_tests = partitioned_tests
while remaining_tests and attempt <= retries + 1:
attempt_description = 'attempt {attempt}'.format(attempt=attempt)
print_message('Running {test_count} test(s) on {device_description} ({attempt_description})'.format(test_count=len(remaining_tests), device_description=device['description'], attempt_description=attempt_description))
for test in remaining_tests:
print '\t> {class_name}.{method_name}'.format(class_name=test['class_name'], method_name=test['method_name'])
stream_json_path = os.path.join(build_path, 'stream.json')
try:
os.remove(stream_json_path)
except:
pass
try:
script = '{xctool_path} -workspace "{workspace}" -scheme "{scheme}" -sdk iphonesimulator -destination "{destination}" CONFIGURATION_BUILD_DIR="{build_path}" -derivedDataPath="{build_path}" run-tests -freshSimulator -resetSimulator -only {target} -reporter pretty -reporter json-stream:{stream_json_path}'.format(
xctool_path=xctool_path,
workspace=workspace,
scheme=scheme,
destination=device['destination'],
build_path=build_path,
target='{target}:{tests}'.format(target=target, tests=','.join(['{}/{}'.format(test['class_name'], test['method_name']) for test in remaining_tests])),
stream_json_path=stream_json_path,
)
run_script(script, timeout)
except TimeoutError:
print_message('Timed out running tests')
failed_tests = list(remaining_tests)
with open(stream_json_path) as f:
for line in f.readlines():
event = json.loads(line)
if event['event'] == 'end-test' and event['succeeded'] == True:
failed_tests.remove(dict(
class_name=event['className'],
method_name=event['methodName'],
))
if failed_tests:
print_message('{failure_count} of {test_count} test(s) FAILED on {device_description} ({attempt_description})'.format(failure_count=len(failed_tests), test_count=len(remaining_tests), device_description=device['description'], attempt_description=attempt_description))
if len(failed_tests) < len(remaining_tests):
attempt = 1
else:
attempt += 1
remaining_tests = failed_tests
if remaining_tests:
print_message('Tests FAILED on {device_description} too many times without progress'.format(device_description=device['description']))
exit(1)
print_message('Tests PASSED on {device_description}'.format(device_description=device['description']))
print_message('All tests PASSED on all devices')
|
{
"content_hash": "ee0dfd83c85c48121a94e31c2fea0395",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 328,
"avg_line_length": 42.72636815920398,
"alnum_prop": 0.6137633907778295,
"repo_name": "CrossWaterBridge/python-xctool-runner",
"id": "dda0ea94d6c49cdaf263c558769fa4e4ee7369e5",
"size": "8588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xctool_runner/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11368"
}
],
"symlink_target": ""
}
|
import copy
import mock
from openstackclient.common import exceptions
from openstackclient.network.v2 import network
from openstackclient.tests import fakes
from openstackclient.tests.identity.v2_0 import fakes as identity_fakes_v2
from openstackclient.tests.identity.v3 import fakes as identity_fakes_v3
from openstackclient.tests.network import common
RESOURCE = 'network'
RESOURCES = 'networks'
FAKE_ID = 'iditty'
FAKE_NAME = 'noo'
FAKE_PROJECT = 'yaa'
RECORD = {
'id': FAKE_ID,
'name': FAKE_NAME,
'admin_state_up': True,
'router:external': True,
'status': 'ACTIVE',
'subnets': ['a', 'b'],
'tenant_id': FAKE_PROJECT,
}
COLUMNS = ['ID', 'Name', 'Subnets']
RESPONSE = {RESOURCE: copy.deepcopy(RECORD)}
FILTERED = [
(
'id',
'name',
'project_id',
'router_type',
'state',
'status',
'subnets',
),
(
FAKE_ID,
FAKE_NAME,
FAKE_PROJECT,
'External',
'UP',
'ACTIVE',
'a, b',
),
]
class TestCreateNetwork(common.TestNetworkBase):
def test_create_no_options(self):
arglist = [
FAKE_NAME,
]
verifylist = [
('name', FAKE_NAME),
('admin_state', True),
('shared', None),
('project', None),
]
mocker = mock.Mock(return_value=copy.deepcopy(RESPONSE))
self.app.client_manager.network.create_network = mocker
cmd = network.CreateNetwork(self.app, self.namespace)
parsed_args = self.check_parser(cmd, arglist, verifylist)
result = list(cmd.take_action(parsed_args))
mocker.assert_called_with({
RESOURCE: {
'admin_state_up': True,
'name': FAKE_NAME,
}
})
self.assertEqual(FILTERED, result)
def test_create_all_options(self):
arglist = [
"--disable",
"--share",
"--project", identity_fakes_v3.project_name,
"--project-domain", identity_fakes_v3.domain_name,
FAKE_NAME,
]
verifylist = [
('admin_state', False),
('shared', True),
('project', identity_fakes_v3.project_name),
('project_domain', identity_fakes_v3.domain_name),
('name', FAKE_NAME),
]
mocker = mock.Mock(return_value=copy.deepcopy(RESPONSE))
self.app.client_manager.network.create_network = mocker
identity_client = identity_fakes_v3.FakeIdentityv3Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
self.app.client_manager.identity = identity_client
self.projects_mock = self.app.client_manager.identity.projects
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes_v3.PROJECT),
loaded=True,
)
self.domains_mock = self.app.client_manager.identity.domains
self.domains_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes_v3.DOMAIN),
loaded=True,
)
cmd = network.CreateNetwork(self.app, self.namespace)
parsed_args = self.check_parser(cmd, arglist, verifylist)
result = list(cmd.take_action(parsed_args))
mocker.assert_called_with({
RESOURCE: {
'admin_state_up': False,
'name': FAKE_NAME,
'shared': True,
'tenant_id': identity_fakes_v3.project_id,
}
})
self.assertEqual(FILTERED, result)
def test_create_other_options(self):
arglist = [
"--enable",
"--no-share",
FAKE_NAME,
]
verifylist = [
('admin_state', True),
('shared', False),
('name', FAKE_NAME),
]
mocker = mock.Mock(return_value=copy.deepcopy(RESPONSE))
self.app.client_manager.network.create_network = mocker
cmd = network.CreateNetwork(self.app, self.namespace)
parsed_args = self.check_parser(cmd, arglist, verifylist)
result = list(cmd.take_action(parsed_args))
mocker.assert_called_with({
RESOURCE: {
'admin_state_up': True,
'name': FAKE_NAME,
'shared': False,
}
})
self.assertEqual(FILTERED, result)
def test_create_with_project_identityv2(self):
arglist = [
"--project", identity_fakes_v2.project_name,
FAKE_NAME,
]
verifylist = [
('admin_state', True),
('shared', None),
('name', FAKE_NAME),
('project', identity_fakes_v2.project_name),
]
mocker = mock.Mock(return_value=copy.deepcopy(RESPONSE))
self.app.client_manager.network.create_network = mocker
identity_client = identity_fakes_v2.FakeIdentityv2Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
self.app.client_manager.identity = identity_client
self.projects_mock = self.app.client_manager.identity.tenants
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes_v2.PROJECT),
loaded=True,
)
cmd = network.CreateNetwork(self.app, self.namespace)
parsed_args = self.check_parser(cmd, arglist, verifylist)
result = list(cmd.take_action(parsed_args))
mocker.assert_called_with({
RESOURCE: {
'admin_state_up': True,
'name': FAKE_NAME,
'tenant_id': identity_fakes_v2.project_id,
}
})
self.assertEqual(FILTERED, result)
def test_create_with_domain_identityv2(self):
arglist = [
"--project", identity_fakes_v3.project_name,
"--project-domain", identity_fakes_v3.domain_name,
FAKE_NAME,
]
verifylist = [
('admin_state', True),
('shared', None),
('project', identity_fakes_v3.project_name),
('project_domain', identity_fakes_v3.domain_name),
('name', FAKE_NAME),
]
mocker = mock.Mock(return_value=copy.deepcopy(RESPONSE))
self.app.client_manager.network.create_network = mocker
identity_client = identity_fakes_v2.FakeIdentityv2Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
self.app.client_manager.identity = identity_client
self.projects_mock = self.app.client_manager.identity.tenants
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes_v2.PROJECT),
loaded=True,
)
cmd = network.CreateNetwork(self.app, self.namespace)
parsed_args = self.check_parser(cmd, arglist, verifylist)
self.assertRaises(
AttributeError,
cmd.take_action,
parsed_args,
)
class TestDeleteNetwork(common.TestNetworkBase):
def test_delete(self):
arglist = [
FAKE_NAME,
]
verifylist = [
('networks', [FAKE_NAME]),
]
lister = mock.Mock(return_value={RESOURCES: [copy.deepcopy(RECORD)]})
self.app.client_manager.network.list_networks = lister
mocker = mock.Mock(return_value=None)
self.app.client_manager.network.delete_network = mocker
cmd = network.DeleteNetwork(self.app, self.namespace)
parsed_args = self.check_parser(cmd, arglist, verifylist)
result = cmd.take_action(parsed_args)
mocker.assert_called_with(FAKE_ID)
self.assertEqual(None, result)
@mock.patch(
'openstackclient.api.network_v2.APIv2.network_list'
)
class TestListNetwork(common.TestNetworkBase):
def setUp(self):
super(TestListNetwork, self).setUp()
# Get the command object to test
self.cmd = network.ListNetwork(self.app, self.namespace)
self.NETWORK_LIST = [
copy.deepcopy(RECORD),
copy.deepcopy(RECORD),
]
def test_network_list_no_options(self, n_mock):
n_mock.return_value = self.NETWORK_LIST
arglist = []
verifylist = [
('external', False),
('long', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
n_mock.assert_called_with(
external=False,
)
self.assertEqual(tuple(COLUMNS), columns)
datalist = [
(FAKE_ID, FAKE_NAME, 'a, b'),
(FAKE_ID, FAKE_NAME, 'a, b'),
]
self.assertEqual(datalist, list(data))
def test_list_external(self, n_mock):
n_mock.return_value = self.NETWORK_LIST
arglist = [
'--external',
]
verifylist = [
('external', True),
('long', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
n_mock.assert_called_with(
external=True,
)
self.assertEqual(tuple(COLUMNS), columns)
datalist = [
(FAKE_ID, FAKE_NAME, 'a, b'),
(FAKE_ID, FAKE_NAME, 'a, b'),
]
self.assertEqual(datalist, list(data))
def test_network_list_long(self, n_mock):
n_mock.return_value = self.NETWORK_LIST
arglist = [
'--long',
]
verifylist = [
('long', True),
('external', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
n_mock.assert_called_with(
external=False,
)
collist = (
'ID',
'Name',
'Status',
'Project',
'State',
'Shared',
'Subnets',
'Network Type',
'Router Type',
)
self.assertEqual(columns, collist)
dataitem = (
FAKE_ID,
FAKE_NAME,
'ACTIVE',
FAKE_PROJECT,
'UP',
'',
'a, b',
'',
'External',
)
datalist = [
dataitem,
dataitem,
]
self.assertEqual(list(data), datalist)
class TestSetNetwork(common.TestNetworkBase):
def test_set_this(self):
arglist = [
FAKE_NAME,
'--enable',
'--name', 'noob',
'--share',
]
verifylist = [
('identifier', FAKE_NAME),
('admin_state', True),
('name', 'noob'),
('shared', True),
]
lister = mock.Mock(return_value={RESOURCES: [copy.deepcopy(RECORD)]})
self.app.client_manager.network.list_networks = lister
mocker = mock.Mock(return_value=None)
self.app.client_manager.network.update_network = mocker
cmd = network.SetNetwork(self.app, self.namespace)
parsed_args = self.check_parser(cmd, arglist, verifylist)
result = cmd.take_action(parsed_args)
exp = {'admin_state_up': True, 'name': 'noob', 'shared': True}
exp_record = {RESOURCE: exp}
mocker.assert_called_with(FAKE_ID, exp_record)
self.assertEqual(None, result)
def test_set_that(self):
arglist = [
FAKE_NAME,
'--disable',
'--no-share',
]
verifylist = [
('identifier', FAKE_NAME),
('admin_state', False),
('shared', False),
]
lister = mock.Mock(return_value={RESOURCES: [copy.deepcopy(RECORD)]})
self.app.client_manager.network.list_networks = lister
mocker = mock.Mock(return_value=None)
self.app.client_manager.network.update_network = mocker
cmd = network.SetNetwork(self.app, self.namespace)
parsed_args = self.check_parser(cmd, arglist, verifylist)
result = cmd.take_action(parsed_args)
exp = {'admin_state_up': False, 'shared': False}
exp_record = {RESOURCE: exp}
mocker.assert_called_with(FAKE_ID, exp_record)
self.assertEqual(None, result)
def test_set_nothing(self):
arglist = [FAKE_NAME, ]
verifylist = [('identifier', FAKE_NAME), ]
lister = mock.Mock(return_value={RESOURCES: [copy.deepcopy(RECORD)]})
self.app.client_manager.network.list_networks = lister
mocker = mock.Mock(return_value=None)
self.app.client_manager.network.update_network = mocker
cmd = network.SetNetwork(self.app, self.namespace)
parsed_args = self.check_parser(cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError, cmd.take_action,
parsed_args)
@mock.patch(
'openstackclient.api.network_v2.APIv2.find_attr'
)
class TestShowNetwork(common.TestNetworkBase):
def setUp(self):
super(TestShowNetwork, self).setUp()
# Get the command object to test
self.cmd = network.ShowNetwork(self.app, self.namespace)
self.NETWORK_ITEM = copy.deepcopy(RECORD)
def test_show_no_options(self, n_mock):
arglist = [
FAKE_NAME,
]
verifylist = [
('identifier', FAKE_NAME),
]
n_mock.return_value = copy.deepcopy(RECORD)
self.cmd = network.ShowNetwork(self.app, self.namespace)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = list(self.cmd.take_action(parsed_args))
n_mock.assert_called_with('networks', FAKE_NAME)
self.assertEqual(FILTERED, result)
def test_show_all_options(self, n_mock):
arglist = [FAKE_NAME]
verifylist = [('identifier', FAKE_NAME)]
n_mock.return_value = copy.deepcopy(RECORD)
self.cmd = network.ShowNetwork(self.app, self.namespace)
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = list(self.cmd.take_action(parsed_args))
n_mock.assert_called_with('networks', FAKE_NAME)
self.assertEqual(FILTERED, result)
|
{
"content_hash": "2f31fabc4074d851e85bf9d6ac592181",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 77,
"avg_line_length": 31.327659574468086,
"alnum_prop": 0.5602417821244227,
"repo_name": "sjsucohort6/openstack",
"id": "55062594f0693a583969afbc04e217aacff42bf5",
"size": "15290",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/venv/lib/python2.7/site-packages/openstackclient/tests/network/v2/test_network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "410"
},
{
"name": "CSS",
"bytes": "144982"
},
{
"name": "FreeMarker",
"bytes": "14104"
},
{
"name": "HTML",
"bytes": "8308"
},
{
"name": "Java",
"bytes": "243125"
},
{
"name": "JavaScript",
"bytes": "1493715"
},
{
"name": "Python",
"bytes": "16921939"
},
{
"name": "Shell",
"bytes": "13926"
}
],
"symlink_target": ""
}
|
"""
Generate header file with macros defining MicroPython version info.
This script works with Python 2.6, 2.7, 3.3 and 3.4.
"""
from __future__ import print_function
import sys
import os
import datetime
import subprocess
def get_version_info_from_git():
# Python 2.6 doesn't have check_output, so check for that
try:
subprocess.check_output
subprocess.check_call
except AttributeError:
return None
# Note: git describe doesn't work if no tag is available
try:
git_tag = subprocess.check_output(
["git", "describe", "--tags", "--dirty", "--always", "--match", "v[1-9].*"],
stderr=subprocess.STDOUT,
universal_newlines=True,
).strip()
except subprocess.CalledProcessError as er:
if er.returncode == 128:
# git exit code of 128 means no repository found
return None
git_tag = ""
except OSError:
return None
try:
git_hash = subprocess.check_output(
["git", "rev-parse", "--short", "HEAD"],
stderr=subprocess.STDOUT,
universal_newlines=True,
).strip()
except subprocess.CalledProcessError:
git_hash = "unknown"
except OSError:
return None
try:
# Check if there are any modified files.
subprocess.check_call(
["git", "diff", "--no-ext-diff", "--quiet", "--exit-code"], stderr=subprocess.STDOUT
)
# Check if there are any staged files.
subprocess.check_call(
["git", "diff-index", "--cached", "--quiet", "HEAD", "--"], stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError:
git_hash += "-dirty"
except OSError:
return None
return git_tag, git_hash
def get_version_info_from_docs_conf():
with open(os.path.join(os.path.dirname(sys.argv[0]), "..", "docs", "conf.py")) as f:
for line in f:
if line.startswith("version = release = '"):
ver = line.strip().split(" = ")[2].strip("'")
git_tag = "v" + ver
return git_tag, "<no hash>"
return None
def make_version_header(filename):
# Get version info using git, with fallback to docs/conf.py
info = get_version_info_from_git()
if info is None:
info = get_version_info_from_docs_conf()
git_tag, git_hash = info
build_date = datetime.date.today()
if "SOURCE_DATE_EPOCH" in os.environ:
build_date = datetime.datetime.utcfromtimestamp(
int(os.environ["SOURCE_DATE_EPOCH"])
).date()
# Generate the file with the git and version info
file_data = """\
// This file was generated by py/makeversionhdr.py
#define MICROPY_GIT_TAG "%s"
#define MICROPY_GIT_HASH "%s"
#define MICROPY_BUILD_DATE "%s"
""" % (
git_tag,
git_hash,
build_date.strftime("%Y-%m-%d"),
)
# Check if the file contents changed from last time
write_file = True
if os.path.isfile(filename):
with open(filename, "r") as f:
existing_data = f.read()
if existing_data == file_data:
write_file = False
# Only write the file if we need to
if write_file:
print("GEN %s" % filename)
with open(filename, "w") as f:
f.write(file_data)
if __name__ == "__main__":
make_version_header(sys.argv[1])
|
{
"content_hash": "e22937aae147db3e05d276f73cb358b7",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 96,
"avg_line_length": 29.11111111111111,
"alnum_prop": 0.5807398708162067,
"repo_name": "bvernoux/micropython",
"id": "54b7fa9ab7d2a5096855ec4feb05f9f2896dca19",
"size": "3406",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "py/makeversionhdr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "50694"
},
{
"name": "C",
"bytes": "19869126"
},
{
"name": "C++",
"bytes": "2489380"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "49218"
},
{
"name": "Objective-C",
"bytes": "8382"
},
{
"name": "Python",
"bytes": "856777"
},
{
"name": "Shell",
"bytes": "6229"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import subprocess
from abc import abstractmethod, abstractproperty
from contextlib import contextmanager
from six import string_types
from twitter.common.collections import maybe_list
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import environment_as
from pants.util.dirutil import relativize_paths
from pants.util.meta import AbstractClass
logger = logging.getLogger(__name__)
class Executor(AbstractClass):
"""Executes java programs."""
@staticmethod
def _scrub_args(classpath, main, jvm_options, args, cwd):
classpath = maybe_list(classpath)
if not isinstance(main, string_types) or not main:
raise ValueError('A non-empty main classname is required, given: {}'.format(main))
jvm_options = maybe_list(jvm_options or ())
args = maybe_list(args or ())
return classpath, main, jvm_options, args, cwd
class Error(Exception):
"""Indicates an error launching a java program."""
class InvalidDistribution(ValueError):
"""Indicates an invalid Distribution was used to construct this runner."""
class Runner(object):
"""A re-usable executor that can run a configured java command line."""
@abstractproperty
def executor(self):
"""Returns the executor this runner uses to run itself."""
@property
def cmd(self):
"""Returns a string representation of the command that will be run."""
return ' '.join(self.command)
@abstractproperty
def command(self):
"""Returns a copy of the command line that will be run as a list of command line tokens."""
@abstractmethod
def run(self, stdout=None, stderr=None, cwd=None):
"""Runs the configured java command.
If there is a problem executing tha java program subclasses should raise Executor.Error.
Its guaranteed that all arguments are valid as documented in `execute`
:param stdout: An optional stream to pump stdout to; defaults to `sys.stdout`.
:param stderr: An optional stream to pump stderr to; defaults to `sys.stderr`.
:param string cwd: optionally set the working directory
"""
@abstractmethod
def kill(self):
"""Terminates the java command."""
raise NotImplementedError
def __init__(self, distribution):
"""Constructs an Executor that can be used to launch java programs.
:param distribution: a validated java distribution to use when launching java programs.
"""
if not hasattr(distribution, 'java') or not hasattr(distribution, 'validate'):
raise self.InvalidDistribution('A valid distribution is required, given: {}'
.format(distribution))
distribution.validate()
self._distribution = distribution
@property
def distribution(self):
"""Returns the `Distribution` this executor runs via."""
return self._distribution
def runner(self, classpath, main, jvm_options=None, args=None, cwd=None):
"""Returns an `Executor.Runner` for the given java command."""
return self._runner(*self._scrub_args(classpath, main, jvm_options, args, cwd=cwd))
def execute(self, classpath, main, jvm_options=None, args=None, stdout=None, stderr=None,
cwd=None):
"""Launches the java program defined by the classpath and main.
:param list classpath: the classpath for the java program
:param string main: the fully qualified class name of the java program's entry point
:param list jvm_options: an optional sequence of options for the underlying jvm
:param list args: an optional sequence of args to pass to the java program
:param string cwd: optionally set the working directory
Returns the exit code of the java program.
Raises Executor.Error if there was a problem launching java itself.
"""
executor = self.runner(classpath=classpath, main=main, jvm_options=jvm_options, args=args,
cwd=cwd)
return executor.run(stdout=stdout, stderr=stderr, cwd=cwd)
@abstractmethod
def _runner(self, classpath, main, jvm_options, args, cwd=None):
"""Subclasses should return a `Runner` that can execute the given java main."""
def _create_command(self, classpath, main, jvm_options, args, cwd=None):
cmd = [self._distribution.java]
cmd.extend(jvm_options)
if cwd:
classpath = relativize_paths(classpath, cwd)
cmd.extend(['-cp', os.pathsep.join(classpath), main])
cmd.extend(args)
return cmd
class CommandLineGrabber(Executor):
"""Doesn't actually execute anything, just captures the cmd line."""
def __init__(self, distribution):
super(CommandLineGrabber, self).__init__(distribution=distribution)
self._command = None # Initialized when we run something.
def _runner(self, classpath, main, jvm_options, args, cwd=None):
self._command = self._create_command(classpath, main, jvm_options, args, cwd=cwd)
class Runner(self.Runner):
@property
def executor(_):
return self
@property
def command(_):
return list(self._command)
def run(_, stdout=None, stderr=None, cwd=None):
return 0
return Runner()
@property
def cmd(self):
return self._command
def kill(self):
pass
class SubprocessExecutor(Executor):
"""Executes java programs by launching a jvm in a subprocess."""
_SCRUBBED_ENV = {
# We attempt to control the classpath for correctness, caching and invalidation reasons and
# allowing CLASSPATH to influence would be a hermeticity leak
'CLASSPATH': None,
# We attempt to control jvm options and give user's explicit control in some cases as well.
# In all cases we want predictable behavior - pants defaults, repo defaults, or user tweaks
# specified on the command line. In addition cli options can affect outputs; ie: class debug
# info, target classfile version, etc - all breaking hermeticity.
'_JAVA_OPTIONS': None,
'JAVA_TOOL_OPTIONS': None
}
@classmethod
@contextmanager
def _maybe_scrubbed_env(cls):
for env_var in cls._SCRUBBED_ENV:
value = os.getenv(env_var)
if value:
logger.warn('Scrubbing {env_var}={value}'.format(env_var=env_var, value=value))
with environment_as(**cls._SCRUBBED_ENV):
yield
def __init__(self, distribution):
super(SubprocessExecutor, self).__init__(distribution=distribution)
self._buildroot = get_buildroot()
self._process = None
def _create_command(self, classpath, main, jvm_options, args, cwd=None):
cwd = cwd or self._buildroot
return super(SubprocessExecutor, self)._create_command(classpath, main, jvm_options,
args, cwd=cwd)
def _runner(self, classpath, main, jvm_options, args, cwd=None):
command = self._create_command(classpath, main, jvm_options, args, cwd=cwd)
class Runner(self.Runner):
@property
def executor(_):
return self
@property
def command(_):
return list(command)
def run(_, stdout=None, stderr=None, cwd=None):
return self._spawn(command, stdout=stdout, stderr=stderr, cwd=cwd).wait()
return Runner()
def spawn(self, classpath, main, jvm_options=None, args=None, cwd=None, **subprocess_args):
"""Spawns the java program passing any extra subprocess kwargs on to subprocess.Popen.
Returns the Popen process object handle to the spawned java program subprocess.
"""
cmd = self._create_command(*self._scrub_args(classpath, main, jvm_options, args, cwd=cwd))
return self._spawn(cmd, cwd, **subprocess_args)
def kill(self):
if self._process is not None:
self._process.kill()
def _spawn(self, cmd, cwd=None, **subprocess_args):
with self._maybe_scrubbed_env():
cwd = cwd or self._buildroot
logger.debug('Executing: {cmd} args={args} at cwd={cwd}'
.format(cmd=' '.join(cmd), args=subprocess_args, cwd=cwd))
try:
self._process = subprocess.Popen(cmd, cwd=cwd, **subprocess_args)
return self._process
except OSError as e:
raise self.Error('Problem executing {0}: {1}'.format(self._distribution.java, e))
|
{
"content_hash": "35683349710978aac60d51f6795dcdc8",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 99,
"avg_line_length": 36.334782608695654,
"alnum_prop": 0.6817039607514659,
"repo_name": "qma/pants",
"id": "4590686fd9a6cc776d15a0b9369e12113c88ad50",
"size": "8504",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/python/pants/java/executor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "64029"
},
{
"name": "Java",
"bytes": "315576"
},
{
"name": "JavaScript",
"bytes": "28962"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4166893"
},
{
"name": "Scala",
"bytes": "85457"
},
{
"name": "Shell",
"bytes": "49622"
},
{
"name": "Thrift",
"bytes": "2898"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
from unittest import mock
import numpy as np
from dials.algorithms.symmetry.cosym import observers
def test_SymmetryAnalysisObserver():
# setup script
script = mock.Mock()
# test when no symmetry analysis has been performed
script._symmetry_analysis = None
observer = observers.SymmetryAnalysisObserver()
observer.update(script)
d = observer.make_tables()
assert d == {"symmetry_analysis": {}}
script._symmetry_analysis = mock.Mock()
script._symmetry_analysis.sym_ops_table = mock.Mock()
script._symmetry_analysis.subgroups_table = mock.Mock()
script._symmetry_analysis.as_dict = mock.Mock(
return_value={
"subgroup_scores": [
{
"patterson_group": "-P 1",
"unit_cell": (10, 10, 10, 90, 90, 90),
"cb_op": "x,y,z",
"likelihood": 0.9,
"confidence": 0.9,
"stars": "*",
"z_cc_net": 1,
"z_cc_for": 2,
"z_cc_against": 3,
"max_angular_difference": 0.2,
}
],
"sym_op_scores": [
{
"cc": 0.99,
"operator": "x,y,z",
"likelihood": 0.99,
"stars": "**",
"z_cc": 10,
}
],
}
)
# test the observer
observer = observers.SymmetryAnalysisObserver()
observer.update(script)
d = observer.make_tables()
assert "symmetry_analysis" in d
assert set(d["symmetry_analysis"]) == {
"summary_table",
"subgroups_table",
"sym_ops_table",
}
def test_CosymClusterAnalysisObserver():
rij_matrix = np.random.rand(16).reshape(4, 4)
coords = np.random.rand(8).reshape(4, 2)
# setup script
script = mock.Mock()
script.target = mock.Mock()
script.target.rij_matrix = rij_matrix
script.coords = coords
script.cluster_labels = np.zeros(4)
# test the observer
observer = observers.CosymClusterAnalysisObserver()
observer.update(script)
d = observer.make_plots()
assert "cosym_graphs" in d
def test_CosymHTMLGenerator():
pass
# script = mock.Mock()
# script.params.output.html = "test.html"
## Test that CosymHTMLGenerator works if all data is empty.
# observer = observers.CosymHTMLGenerator()
# observer.make_html(script)
# assert os.path.exists("test.html")
|
{
"content_hash": "41e2b7ce78499312aa3baf734ddb7ead",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 63,
"avg_line_length": 28.633333333333333,
"alnum_prop": 0.5440434613892122,
"repo_name": "dials/dials",
"id": "34cbaa2b44010083a2a3c45205d91b1209e9b5f3",
"size": "2577",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/algorithms/symmetry/cosym/test_observers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "379"
},
{
"name": "C++",
"bytes": "1758129"
},
{
"name": "CMake",
"bytes": "34388"
},
{
"name": "Dockerfile",
"bytes": "329"
},
{
"name": "Gherkin",
"bytes": "400"
},
{
"name": "HTML",
"bytes": "25033"
},
{
"name": "Makefile",
"bytes": "76"
},
{
"name": "Python",
"bytes": "6147100"
},
{
"name": "Shell",
"bytes": "6419"
}
],
"symlink_target": ""
}
|
import json
# save info from common.print_info()
last_info = None
def output(video_extractor, pretty_print=True):
ve = video_extractor
out = {}
out['url'] = ve.url
out['title'] = ve.title
out['site'] = ve.name
out['streams'] = ve.streams
if pretty_print:
print(json.dumps(out, indent=4, sort_keys=True, ensure_ascii=False))
else:
print(json.dumps(out))
# a fake VideoExtractor object to save info
class VideoExtractor(object):
pass
def print_info(site_info=None, title=None, type=None, size=None):
global last_info
# create a VideoExtractor and save info for download_urls()
ve = VideoExtractor()
last_info = ve
ve.name = site_info
ve.title = title
ve.url = None
def download_urls(urls=None, title=None, ext=None, total_size=None, refer=None):
ve = last_info
# save download info in streams
stream = {}
stream['container'] = ext
stream['size'] = total_size
stream['src'] = urls
if refer:
stream['refer'] = refer
stream['video_profile'] = '__default__'
ve.streams = {}
ve.streams['__default__'] = stream
output(ve)
|
{
"content_hash": "8c9ad35e31823193d531631860d68200",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 80,
"avg_line_length": 26.181818181818183,
"alnum_prop": 0.6284722222222222,
"repo_name": "betaY/crawler",
"id": "86a42abccbe3fa17008b337ffbe3a6e0cec2dc48",
"size": "1153",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "you-get-master/src/you_get/json_output.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "48285"
},
{
"name": "C++",
"bytes": "34013"
},
{
"name": "CSS",
"bytes": "25363"
},
{
"name": "Groff",
"bytes": "8197"
},
{
"name": "HTML",
"bytes": "127658"
},
{
"name": "Java",
"bytes": "9625"
},
{
"name": "JavaScript",
"bytes": "16351"
},
{
"name": "Makefile",
"bytes": "342463"
},
{
"name": "Prolog",
"bytes": "917"
},
{
"name": "Python",
"bytes": "417869"
},
{
"name": "QMake",
"bytes": "1059"
},
{
"name": "Shell",
"bytes": "8246"
}
],
"symlink_target": ""
}
|
from markdown import markdown
import bleach
import re
from werkzeug.exceptions import NotFound
from . import consts as c
def md2html(md: str):
allowed_tags = ('a', 'abbr', 'acronym', 'b', 'blockquote', 'code', 'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'table', 'tr', 'td', 'thead', 'tbody', 'th', 'sub', 'sup',
'del')
return bleach.linkify(bleach.clean(markdown(md, output_format='html'), tags=allowed_tags, strip=True))
def check_pid(pid):
return _pid_check_re.match(pid) is not None and len(pid) < c.PID_MAX_LENGTH
_pid_check_re = re.compile('^[-A-Za-z0-9_]+$')
def check_pid_or_404(pid):
if not check_pid(pid):
raise NotFound
|
{
"content_hash": "c4348289ed90158965605fe0ef06334a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 119,
"avg_line_length": 32.391304347826086,
"alnum_prop": 0.5865771812080537,
"repo_name": "Einbert-Xeride/tsscp",
"id": "3fcf57a78c3cdb081a87fe39f1490b8e39729d04",
"size": "745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tsscp/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "116"
},
{
"name": "HTML",
"bytes": "6701"
},
{
"name": "Python",
"bytes": "14733"
}
],
"symlink_target": ""
}
|
def extractTranslatingZeTianJi(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
return buildReleaseMessageWithType(item, 'Ze Tian Ji ', vol, chp, frag=frag, postfix=postfix)
|
{
"content_hash": "a6d285e026c18a07a8dd8e7d80276ef6",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 94,
"avg_line_length": 36.625,
"alnum_prop": 0.7235494880546075,
"repo_name": "fake-name/ReadableWebProxy",
"id": "fea5fa92291ed547056a215ae9da2ec73afc4444",
"size": "293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractTranslatingZeTianJi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
from functools import wraps
class exception_guard(object):
"""Guard against the given exception and raise a different exception."""
def __init__(self, catchable, throwable=RuntimeError):
if is_exception_class(catchable):
self._catchable = catchable
else:
raise TypeError('catchable must be one or more exception types')
if throwable is None or is_exception(throwable):
self._throwable = throwable
else:
raise TypeError('throwable must be None or an exception')
def throw(self, cause):
"""Throw an exception from the given cause."""
throwable = self._throwable
assert throwable is not None
self._raisefrom(throwable, cause)
def _raisefrom(self, exception, cause):
# "raise ... from ..." syntax only supported in Python 3.
assert cause is not None # "raise ... from None" is not supported.
if isinstance(exception, BaseException):
# We're given an exception instance, so just use it as-is.
pass
else:
# We're given an exception class, so instantiate it with a
# helpful error message.
assert issubclass(exception, BaseException)
name = type(cause).__name__
message = 'guard triggered by %s exception' % name
exception = exception(message)
try:
exec("raise exception from cause", globals(), locals())
except SyntaxError:
# Python too old. Fall back to a simple raise, without cause.
raise exception
# === Context manager special methods ===
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None and issubclass(exc_type, self._catchable):
if self._throwable is None:
# Suppress the exception.
return True
else:
self.throw(exc_value)
# === Use exception_guard as a decorator ===
def __call__(self, function):
catchable = self._catchable
suppress_exception = (self._throwable is None)
@wraps(function)
def inner(*args, **kwargs):
try:
result = function(*args, **kwargs)
except catchable as error:
if suppress_exception:
return
else:
self.throw(error)
else:
return result
return inner
# Two helper functions.
def is_exception(obj):
"""Return whether obj is an exception.
>>> is_exception(ValueError) # An exception class.
True
>>> is_exception(ValueError()) # An exception instance.
True
>>> is_exception(float)
False
"""
try:
return issubclass(obj, BaseException)
except TypeError:
return isinstance(obj, BaseException)
def is_exception_class(obj):
"""Return whether obj is an exception class, or a tuple of the same.
>>> is_exception_class(ValueError)
True
>>> is_exception_class(float)
False
>>> is_exception_class(ValueError()) # An instance, not a class.
False
>>> is_exception_class((ValueError, KeyError))
True
"""
try:
if isinstance(obj, tuple):
return obj and all(issubclass(X, BaseException) for X in obj)
return issubclass(obj, BaseException)
except TypeError:
return False
|
{
"content_hash": "8b86013444ac76796b02ee8cef66b9bb",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 76,
"avg_line_length": 32.018348623853214,
"alnum_prop": 0.5865329512893983,
"repo_name": "ActiveState/code",
"id": "cefd71130b44100692420538276babfc190654c3",
"size": "3490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/580808_Guard_against_exceptiwrong/recipe-580808.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
"""
Deal with the part of a Tx that specifies where the Bitcoin goes to.
The MIT License (MIT)
Copyright (c) 2013 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import decimal
from ..serialize.bitcoin_streamer import parse_struct, stream_struct
from .script.tools import disassemble
COIN_FACTOR = decimal.Decimal(100000000)
class TxOut(object):
"""
The part of a Tx that specifies where the Bitcoin goes to.
"""
def __init__(self, coin_value, script):
self.coin_value = int(coin_value)
self.script = script
def stream(self, f):
stream_struct("QS", f, self.coin_value, self.script)
@classmethod
def parse(self, f):
return self(*parse_struct("QS", f))
def __str__(self):
return 'TxOut<%s "%s">' % (decimal.Decimal(self.coin_value)/COIN_FACTOR, disassemble(self.script))
|
{
"content_hash": "ce14495d4d41f511c03cf8ab67413cdc",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 106,
"avg_line_length": 36.53846153846154,
"alnum_prop": 0.72,
"repo_name": "ohmannomma/proofofexistence",
"id": "ec70d039ae00bbd2ebf2d9802eb2eafdb7c5f16b",
"size": "1925",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pycoin/tx/TxOut.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6668"
},
{
"name": "HTML",
"bytes": "21179"
},
{
"name": "JavaScript",
"bytes": "59156"
},
{
"name": "Python",
"bytes": "567526"
}
],
"symlink_target": ""
}
|
"""Test case runner."""
import os
import sys
import unittest
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
def main():
sys.argv.insert(1, 'discover')
sys.argv.insert(2, TESTS_DIR)
return unittest.main()
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "5efe9ce41ff1ea666f5f751ea5bed42f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 54,
"avg_line_length": 15.882352941176471,
"alnum_prop": 0.6407407407407407,
"repo_name": "nicko96/Chrome-Infra",
"id": "c6fee4d607ecb122dd22045b2c8dbf1717f3710e",
"size": "868",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "appengine/chromium_rietveld/tests/run_tests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "100398"
},
{
"name": "Go",
"bytes": "648467"
},
{
"name": "HTML",
"bytes": "7323317"
},
{
"name": "JavaScript",
"bytes": "913960"
},
{
"name": "Makefile",
"bytes": "11281"
},
{
"name": "Protocol Buffer",
"bytes": "2730"
},
{
"name": "Python",
"bytes": "4034630"
},
{
"name": "Shell",
"bytes": "21687"
}
],
"symlink_target": ""
}
|
import logging
from skybase import config as sky_cfg
from skybase.skytask import SkyTask
from skybase import skytask
from skybase.utils.logger import Logger
from skybase.planet import Planet
from skybase.utils import simple_error_format
import skybase.actions.route
def route_ping_add_arguments(parser):
parser.add_argument(
'-m', '--mode',
dest='exec_mode',
action='store',
choices={'local', 'restapi'},
default='restapi',
help='execution mode (default REST api)'
)
parser.add_argument(
'-p', '--planet',
dest='planet_name',
action='store',
default=sky_cfg.DEFAULT_PLANET,
help='planet name'
)
parser.add_argument(
'-n', '--count',
dest='count',
action='store',
default=1,
help='number of times to repeat'
)
parser.add_argument(
'-s', '--sleep',
dest='sleep_interval',
action='store',
default=0,
help='interval to sleep before returning'
)
class Ping(SkyTask):
def __init__(self, all_args=None, runner_cfg=None):
SkyTask.__init__(self, all_args, runner_cfg)
self.logger = Logger(logging.getLogger(__name__), logging.INFO)
self.name = 'route.ping'
self.args = all_args
self.runner_cfg = runner_cfg
def preflight_check(self):
# initialize results container
preflight_result = []
# instantiate planet
try:
self.planet = Planet(self.args.get('planet_name'))
except Exception as e:
self.preflight_check_result.status = 'FAIL'
preflight_result.append(skybase.exceptions.SkyBaseValidationError('planet init: {0}'.format(simple_error_format(e))))
self.preflight_check_result.set_output(preflight_result)
return self.preflight_check_result
def execute(self):
# TODO: create standard result object
result_dict = dict(
data=dict(),
type=skytask.output_format_raw,
status=None
)
# ping worker number of times indicated by --count
for n in range(int(self.args.get('count'))):
ping_N = 'ping_{0}'.format(n)
result_dict['data'][ping_N] = skybase.actions.route.ping(**self.args)
# TODO: result.status will be derived from all action status values.
self.result.status = sky_cfg.API_STATUS_SUCCESS
self.result.format = skytask.output_format_json
self.result.output = result_dict['data']
return self.result
|
{
"content_hash": "90e4741239d5abc25a3b5d753d83ec9b",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 129,
"avg_line_length": 27.892473118279568,
"alnum_prop": 0.6033153430994603,
"repo_name": "lithiumtech/skybase.io",
"id": "5c2665addb260b88c0d4841af78584f75de69f3a",
"size": "2594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skybase/skytask/route/ping.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "394577"
},
{
"name": "Ruby",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "3105"
}
],
"symlink_target": ""
}
|
"""
numpy.ma : a package to handle missing or invalid values.
This package was initially written for numarray by Paul F. Dubois
at Lawrence Livermore National Laboratory.
In 2006, the package was completely rewritten by Pierre Gerard-Marchant
(University of Georgia) to make the MaskedArray class a subclass of ndarray,
and to improve support of structured arrays.
Copyright 1999, 2000, 2001 Regents of the University of California.
Released for unlimited redistribution.
* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois.
* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant
(pgmdevlist_AT_gmail_DOT_com)
* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com)
.. moduleauthor:: Pierre Gerard-Marchant
"""
# pylint: disable-msg=E1002
from __future__ import division, absolute_import, print_function
import sys
import warnings
from functools import reduce
import numpy as np
import numpy.core.umath as umath
import numpy.core.numerictypes as ntypes
from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue
from numpy import array as narray
from numpy.lib.function_base import angle
from numpy.compat import (
getargspec, formatargspec, long, basestring, unicode, bytes, sixu
)
from numpy import expand_dims as n_expand_dims
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
__all__ = [
'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute',
'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin',
'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos',
'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray',
'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil',
'choose', 'clip', 'common_fill_value', 'compress', 'compressed',
'concatenate', 'conjugate', 'copy', 'cos', 'cosh', 'count', 'cumprod',
'cumsum', 'default_fill_value', 'diag', 'diagonal', 'diff', 'divide',
'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp', 'expand_dims',
'fabs', 'filled', 'fix_invalid', 'flatten_mask',
'flatten_structured_array', 'floor', 'floor_divide', 'fmod',
'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask',
'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot',
'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA',
'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift',
'less', 'less_equal', 'load', 'loads', 'log', 'log10', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask',
'make_mask_descr', 'make_mask_none', 'mask_or', 'masked',
'masked_array', 'masked_equal', 'masked_greater',
'masked_greater_equal', 'masked_inside', 'masked_invalid',
'masked_less', 'masked_less_equal', 'masked_not_equal',
'masked_object', 'masked_outside', 'masked_print_option',
'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum',
'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero',
'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'rank', 'ravel', 'remainder',
'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_',
'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask',
'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide',
'var', 'where', 'zeros',
]
MaskType = np.bool_
nomask = MaskType(0)
class MaskedArrayFutureWarning(FutureWarning):
pass
def doc_note(initialdoc, note):
"""
Adds a Notes section to an existing docstring.
"""
if initialdoc is None:
return
if note is None:
return initialdoc
newdoc = """
%s
Notes
-----
%s
"""
return newdoc % (initialdoc, note)
def get_object_signature(obj):
"""
Get the signature from obj
"""
try:
sig = formatargspec(*getargspec(obj))
except TypeError:
sig = ''
return sig
###############################################################################
# Exceptions #
###############################################################################
class MAError(Exception):
"""
Class for masked array related errors.
"""
pass
class MaskError(MAError):
"""
Class for mask related errors.
"""
pass
###############################################################################
# Filling options #
###############################################################################
# b: boolean - c: complex - f: floats - i: integer - O: object - S: string
default_filler = {'b': True,
'c': 1.e20 + 0.0j,
'f': 1.e20,
'i': 999999,
'O': '?',
'S': b'N/A',
'u': 999999,
'V': '???',
'U': sixu('N/A')
}
# Add datetime64 and timedelta64 types
for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps",
"fs", "as"]:
default_filler["M8[" + v + "]"] = np.datetime64("NaT", v)
default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v)
max_filler = ntypes._minvals
max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]])
min_filler = ntypes._maxvals
min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]])
if 'float128' in ntypes.typeDict:
max_filler.update([(np.float128, -np.inf)])
min_filler.update([(np.float128, +np.inf)])
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
The default filling value depends on the datatype of the input
array or the type of the input scalar:
======== ========
datatype default
======== ========
bool True
int 999999
float 1.e20
complex 1.e20+0j
object '?'
string 'N/A'
======== ========
Parameters
----------
obj : ndarray, dtype or scalar
The array data-type or scalar for which the default fill value
is returned.
Returns
-------
fill_value : scalar
The default fill value.
Examples
--------
>>> np.ma.default_fill_value(1)
999999
>>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))
1e+20
>>> np.ma.default_fill_value(np.dtype(complex))
(1e+20+0j)
"""
if hasattr(obj, 'dtype'):
defval = _check_fill_value(None, obj.dtype)
elif isinstance(obj, np.dtype):
if obj.subdtype:
defval = default_filler.get(obj.subdtype[0].kind, '?')
elif obj.kind in 'Mm':
defval = default_filler.get(obj.str[1:], '?')
else:
defval = default_filler.get(obj.kind, '?')
elif isinstance(obj, float):
defval = default_filler['f']
elif isinstance(obj, int) or isinstance(obj, long):
defval = default_filler['i']
elif isinstance(obj, bytes):
defval = default_filler['S']
elif isinstance(obj, unicode):
defval = default_filler['U']
elif isinstance(obj, complex):
defval = default_filler['c']
else:
defval = default_filler['O']
return defval
def _recursive_extremum_fill_value(ndtype, extremum):
names = ndtype.names
if names:
deflist = []
for name in names:
fval = _recursive_extremum_fill_value(ndtype[name], extremum)
deflist.append(fval)
return tuple(deflist)
return extremum[ndtype]
def minimum_fill_value(obj):
"""
Return the maximum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the minimum of an array with a given dtype.
Parameters
----------
obj : ndarray or dtype
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The maximum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
maximum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.minimum_fill_value(a)
127
>>> a = np.int32()
>>> ma.minimum_fill_value(a)
2147483647
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.minimum_fill_value(a)
127
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.minimum_fill_value(a)
inf
"""
errmsg = "Unsuitable type for calculating minimum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, min_filler)
elif isinstance(obj, float):
return min_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return min_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return min_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return min_filler[obj]
else:
raise TypeError(errmsg)
def maximum_fill_value(obj):
"""
Return the minimum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the maximum of an array with a given dtype.
Parameters
----------
obj : {ndarray, dtype}
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The minimum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
minimum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.maximum_fill_value(a)
-128
>>> a = np.int32()
>>> ma.maximum_fill_value(a)
-2147483648
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.maximum_fill_value(a)
-128
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.maximum_fill_value(a)
-inf
"""
errmsg = "Unsuitable type for calculating maximum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, max_filler)
elif isinstance(obj, float):
return max_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return max_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return max_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return max_filler[obj]
else:
raise TypeError(errmsg)
def _recursive_set_default_fill_value(dtypedescr):
deflist = []
for currentdescr in dtypedescr:
currenttype = currentdescr[1]
if isinstance(currenttype, list):
deflist.append(
tuple(_recursive_set_default_fill_value(currenttype)))
else:
deflist.append(default_fill_value(np.dtype(currenttype)))
return tuple(deflist)
def _recursive_set_fill_value(fillvalue, dtypedescr):
fillvalue = np.resize(fillvalue, len(dtypedescr))
output_value = []
for (fval, descr) in zip(fillvalue, dtypedescr):
cdtype = descr[1]
if isinstance(cdtype, list):
output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
else:
output_value.append(np.array(fval, dtype=cdtype).item())
return tuple(output_value)
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype
if this latter is standard (no fields). If the datatype is flexible (named
fields), fill_value is set to a tuple whose elements are the default fill
values corresponding to each field.
If fill_value is not None, its value is forced to the given dtype.
"""
ndtype = np.dtype(ndtype)
fields = ndtype.fields
if fill_value is None:
if fields:
descr = ndtype.descr
fill_value = np.array(_recursive_set_default_fill_value(descr),
dtype=ndtype,)
else:
fill_value = default_fill_value(ndtype)
elif fields:
fdtype = [(_[0], _[1]) for _ in ndtype.descr]
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=fdtype)
except ValueError:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, fdtype))
else:
descr = ndtype.descr
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, descr),
dtype=ndtype)
else:
if isinstance(fill_value, basestring) and (ndtype.char not in 'OSVU'):
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
else:
# In case we want to convert 1e20 to int.
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except OverflowError:
# Raise TypeError instead of OverflowError. OverflowError
# is seldom used, and the real problem here is that the
# passed fill_value is not compatible with the ndtype.
err_msg = "Fill value %s overflows dtype %s"
raise TypeError(err_msg % (fill_value, ndtype))
return np.array(fill_value)
def set_fill_value(a, fill_value):
"""
Set the filling value of a, if a is a masked array.
This function changes the fill value of the masked array `a` in place.
If `a` is not a masked array, the function returns silently, without
doing anything.
Parameters
----------
a : array_like
Input array.
fill_value : dtype
Filling value. A consistency test is performed to make sure
the value is compatible with the dtype of `a`.
Returns
-------
None
Nothing returned by this function.
See Also
--------
maximum_fill_value : Return the default fill value for a dtype.
MaskedArray.fill_value : Return current fill value.
MaskedArray.set_fill_value : Equivalent method.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> a = ma.masked_where(a < 3, a)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=999999)
>>> ma.set_fill_value(a, -999)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=-999)
Nothing happens if `a` is not a masked array.
>>> a = range(5)
>>> a
[0, 1, 2, 3, 4]
>>> ma.set_fill_value(a, 100)
>>> a
[0, 1, 2, 3, 4]
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> ma.set_fill_value(a, 100)
>>> a
array([0, 1, 2, 3, 4])
"""
if isinstance(a, MaskedArray):
a.set_fill_value(fill_value)
return
def get_fill_value(a):
"""
Return the filling value of a, if any. Otherwise, returns the
default filling value for that type.
"""
if isinstance(a, MaskedArray):
result = a.fill_value
else:
result = default_fill_value(a)
return result
def common_fill_value(a, b):
"""
Return the common filling value of two masked arrays, if any.
If ``a.fill_value == b.fill_value``, return the fill value,
otherwise return None.
Parameters
----------
a, b : MaskedArray
The masked arrays for which to compare fill values.
Returns
-------
fill_value : scalar or None
The common fill value, or None.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=3)
>>> y = np.ma.array([0, 1.], fill_value=3)
>>> np.ma.common_fill_value(x, y)
3.0
"""
t1 = get_fill_value(a)
t2 = get_fill_value(b)
if t1 == t2:
return t1
return None
def filled(a, fill_value=None):
"""
Return input as an array with masked data replaced by a fill value.
If `a` is not a `MaskedArray`, `a` itself is returned.
If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to
``a.fill_value``.
Parameters
----------
a : MaskedArray or array_like
An input object.
fill_value : scalar, optional
Filling value. Default is None.
Returns
-------
a : ndarray
The filled array.
See Also
--------
compressed
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x.filled()
array([[999999, 1, 2],
[999999, 4, 5],
[ 6, 7, 8]])
"""
if hasattr(a, 'filled'):
return a.filled(fill_value)
elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
elif isinstance(a, dict):
return np.array(a, 'O')
else:
return np.array(a)
def get_masked_subclass(*arrays):
"""
Return the youngest subclass of MaskedArray from a list of (masked) arrays.
In case of siblings, the first listed takes over.
"""
if len(arrays) == 1:
arr = arrays[0]
if isinstance(arr, MaskedArray):
rcls = type(arr)
else:
rcls = MaskedArray
else:
arrcls = [type(a) for a in arrays]
rcls = arrcls[0]
if not issubclass(rcls, MaskedArray):
rcls = MaskedArray
for cls in arrcls[1:]:
if issubclass(cls, rcls):
rcls = cls
# Don't return MaskedConstant as result: revert to MaskedArray
if rcls.__name__ == 'MaskedConstant':
return MaskedArray
return rcls
def getdata(a, subok=True):
"""
Return the data of a masked array as an ndarray.
Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,
else return `a` as a ndarray or subclass (depending on `subok`) if not.
Parameters
----------
a : array_like
Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
subok : bool
Whether to force the output to be a `pure` ndarray (False) or to
return a subclass of ndarray if appropriate (True, default).
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getdata(a)
array([[1, 2],
[3, 4]])
Equivalently use the ``MaskedArray`` `data` attribute.
>>> a.data
array([[1, 2],
[3, 4]])
"""
try:
data = a._data
except AttributeError:
data = np.array(a, copy=False, subok=subok)
if not subok:
return data.view(ndarray)
return data
get_data = getdata
def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
"""
Return input with invalid data masked and replaced by a fill value.
Invalid data means values of `nan`, `inf`, etc.
Parameters
----------
a : array_like
Input array, a (subclass of) ndarray.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
copy : bool, optional
Whether to use a copy of `a` (True) or to fix `a` in place (False).
Default is True.
fill_value : scalar, optional
Value used for fixing invalid data. Default is None, in which case
the ``a.fill_value`` is used.
Returns
-------
b : MaskedArray
The input array with invalid entries fixed.
Notes
-----
A copy is performed by default.
Examples
--------
>>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
>>> x
masked_array(data = [-- -1.0 nan inf],
mask = [ True False False False],
fill_value = 1e+20)
>>> np.ma.fix_invalid(x)
masked_array(data = [-- -1.0 -- --],
mask = [ True False True True],
fill_value = 1e+20)
>>> fixed = np.ma.fix_invalid(x)
>>> fixed.data
array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20,
1.00000000e+20])
>>> x.data
array([ 1., -1., NaN, Inf])
"""
a = masked_array(a, copy=copy, mask=mask, subok=True)
invalid = np.logical_not(np.isfinite(a._data))
if not invalid.any():
return a
a._mask |= invalid
if fill_value is None:
fill_value = a.fill_value
a._data[invalid] = fill_value
return a
###############################################################################
# Ufuncs #
###############################################################################
ufunc_domain = {}
ufunc_fills = {}
class _DomainCheckInterval:
"""
Define a valid interval, so that :
``domain_check_interval(a,b)(x) == True`` where
``x < a`` or ``x > b``.
"""
def __init__(self, a, b):
"domain_check_interval(a,b)(x) = true where x < a or y > b"
if (a > b):
(a, b) = (b, a)
self.a = a
self.b = b
def __call__(self, x):
"Execute the call behavior."
return umath.logical_or(umath.greater(x, self.b),
umath.less(x, self.a))
class _DomainTan:
"""
Define a valid interval for the `tan` function, so that:
``domain_tan(eps) = True`` where ``abs(cos(x)) < eps``
"""
def __init__(self, eps):
"domain_tan(eps) = true where abs(cos(x)) < eps)"
self.eps = eps
def __call__(self, x):
"Executes the call behavior."
return umath.less(umath.absolute(umath.cos(x)), self.eps)
class _DomainSafeDivide:
"""
Define a domain for safe division.
"""
def __init__(self, tolerance=None):
self.tolerance = tolerance
def __call__(self, a, b):
# Delay the selection of the tolerance to here in order to reduce numpy
# import times. The calculation of these parameters is a substantial
# component of numpy's import time.
if self.tolerance is None:
self.tolerance = np.finfo(float).tiny
# don't call ma ufuncs from __array_wrap__ which would fail for scalars
a, b = np.asarray(a), np.asarray(b)
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
class _DomainGreater:
"""
DomainGreater(v)(x) is True where x <= v.
"""
def __init__(self, critical_value):
"DomainGreater(v)(x) = true where x <= v"
self.critical_value = critical_value
def __call__(self, x):
"Executes the call behavior."
return umath.less_equal(x, self.critical_value)
class _DomainGreaterEqual:
"""
DomainGreaterEqual(v)(x) is True where x < v.
"""
def __init__(self, critical_value):
"DomainGreaterEqual(v)(x) = true where x < v"
self.critical_value = critical_value
def __call__(self, x):
"Executes the call behavior."
return umath.less(x, self.critical_value)
class _MaskedUnaryOperation:
"""
Defines masked version of unary operations, where invalid values are
pre-masked.
Parameters
----------
mufunc : callable
The function for which to define a masked version. Made available
as ``_MaskedUnaryOperation.f``.
fill : scalar, optional
Filling value, default is 0.
domain : class instance
Domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
"""
def __init__(self, mufunc, fill=0, domain=None):
self.f = mufunc
self.fill = fill
self.domain = domain
self.__doc__ = getattr(mufunc, "__doc__", str(mufunc))
self.__name__ = getattr(mufunc, "__name__", str(mufunc))
ufunc_domain[mufunc] = domain
ufunc_fills[mufunc] = fill
def __call__(self, a, *args, **kwargs):
"""
Execute the call behavior.
"""
d = getdata(a)
# Deal with domain
if self.domain is not None:
# Case 1.1. : Domained function
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(d, *args, **kwargs)
# Make a mask
m = ~umath.isfinite(result)
m |= self.domain(d)
m |= getmask(a)
else:
# Case 1.2. : Function without a domain
# Get the result and the mask
result = self.f(d, *args, **kwargs)
m = getmask(a)
if not result.ndim:
# Case 2.1. : The result is scalarscalar
if m:
return masked
return result
if m is not nomask:
# Case 2.2. The result is an array
# We need to fill the invalid data back w/ the input Now,
# that's plain silly: in C, we would just skip the element and
# keep the original, but we do have to do it that way in Python
# In case result has a lower dtype than the inputs (as in
# equal)
try:
np.copyto(result, d, where=m)
except TypeError:
pass
# Transform to
masked_result = result.view(get_masked_subclass(a))
masked_result._mask = m
masked_result._update_from(a)
return masked_result
def __str__(self):
return "Masked version of %s. [Invalid values are masked]" % str(self.f)
class _MaskedBinaryOperation:
"""
Define masked version of binary operations, where invalid
values are pre-masked.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_MaskedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__(self, mbfunc, fillx=0, filly=0):
"""
abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = mbfunc
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc))
self.__name__ = getattr(mbfunc, "__name__", str(mbfunc))
ufunc_domain[mbfunc] = None
ufunc_fills[mbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"""
Execute the call behavior.
"""
# Get the data, as ndarray
(da, db) = (getdata(a), getdata(b))
# Get the result
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
# Get the mask for the result
(ma, mb) = (getmask(a), getmask(b))
if ma is nomask:
if mb is nomask:
m = nomask
else:
m = umath.logical_or(getmaskarray(a), mb)
elif mb is nomask:
m = umath.logical_or(ma, getmaskarray(b))
else:
m = umath.logical_or(ma, mb)
# Case 1. : scalar
if not result.ndim:
if m:
return masked
return result
# Case 2. : array
# Revert result to da where masked
if m is not nomask and m.any():
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, 0, casting='unsafe', where=m)
# avoid using "*" since this may be overlaid
masked_da = umath.multiply(m, da)
# only add back if it can be cast safely
if np.can_cast(masked_da.dtype, result.dtype, casting='safe'):
result += masked_da
except:
pass
# Transforms to a (subclass of) MaskedArray
masked_result = result.view(get_masked_subclass(a, b))
masked_result._mask = m
if isinstance(a, MaskedArray):
masked_result._update_from(a)
elif isinstance(b, MaskedArray):
masked_result._update_from(b)
return masked_result
def reduce(self, target, axis=0, dtype=None):
"""
Reduce `target` along the given `axis`.
"""
tclass = get_masked_subclass(target)
m = getmask(target)
t = filled(target, self.filly)
if t.shape == ():
t = t.reshape(1)
if m is not nomask:
m = make_mask(m, copy=1)
m.shape = (1,)
if m is nomask:
tr = self.f.reduce(t, axis)
mr = nomask
else:
tr = self.f.reduce(t, axis, dtype=dtype or t.dtype)
mr = umath.logical_and.reduce(m, axis)
if not tr.shape:
if mr:
return masked
else:
return tr
masked_tr = tr.view(tclass)
masked_tr._mask = mr
return masked_tr
def outer(self, a, b):
"""
Return the function applied to the outer product of a and b.
"""
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
if m is not nomask:
np.copyto(d, da, where=m)
if not d.shape:
return d
masked_d = d.view(get_masked_subclass(a, b))
masked_d._mask = m
return masked_d
def accumulate(self, target, axis=0):
"""Accumulate `target` along `axis` after filling with y fill
value.
"""
tclass = get_masked_subclass(target)
t = filled(target, self.filly)
result = self.f.accumulate(t, axis)
masked_result = result.view(tclass)
return masked_result
def __str__(self):
return "Masked version of " + str(self.f)
class _DomainedBinaryOperation:
"""
Define binary operations that have a domain, like divide.
They have no reduce, outer or accumulate.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_DomainedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__(self, dbfunc, domain, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = dbfunc
self.domain = domain
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc))
self.__name__ = getattr(dbfunc, "__name__", str(dbfunc))
ufunc_domain[dbfunc] = domain
ufunc_fills[dbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data
(da, db) = (getdata(a), getdata(b))
# Get the result
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(da, db, *args, **kwargs)
# Get the mask as a combination of the source masks and invalid
m = ~umath.isfinite(result)
m |= getmask(a)
m |= getmask(b)
# Apply the domain
domain = ufunc_domain.get(self.f, None)
if domain is not None:
m |= filled(domain(da, db), True)
# Take care of the scalar case first
if (not m.ndim):
if m:
return masked
else:
return result
# When the mask is True, put back da if possible
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, 0, casting='unsafe', where=m)
# avoid using "*" since this may be overlaid
masked_da = umath.multiply(m, da)
# only add back if it can be cast safely
if np.can_cast(masked_da.dtype, result.dtype, casting='safe'):
result += masked_da
except:
pass
# Transforms to a (subclass of) MaskedArray
masked_result = result.view(get_masked_subclass(a, b))
masked_result._mask = m
if isinstance(a, MaskedArray):
masked_result._update_from(a)
elif isinstance(b, MaskedArray):
masked_result._update_from(b)
return masked_result
def __str__(self):
return "Masked version of " + str(self.f)
# Unary ufuncs
exp = _MaskedUnaryOperation(umath.exp)
conjugate = _MaskedUnaryOperation(umath.conjugate)
sin = _MaskedUnaryOperation(umath.sin)
cos = _MaskedUnaryOperation(umath.cos)
tan = _MaskedUnaryOperation(umath.tan)
arctan = _MaskedUnaryOperation(umath.arctan)
arcsinh = _MaskedUnaryOperation(umath.arcsinh)
sinh = _MaskedUnaryOperation(umath.sinh)
cosh = _MaskedUnaryOperation(umath.cosh)
tanh = _MaskedUnaryOperation(umath.tanh)
abs = absolute = _MaskedUnaryOperation(umath.absolute)
angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base
fabs = _MaskedUnaryOperation(umath.fabs)
negative = _MaskedUnaryOperation(umath.negative)
floor = _MaskedUnaryOperation(umath.floor)
ceil = _MaskedUnaryOperation(umath.ceil)
around = _MaskedUnaryOperation(np.round_)
logical_not = _MaskedUnaryOperation(umath.logical_not)
# Domained unary ufuncs
sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0,
_DomainGreaterEqual(0.0))
log = _MaskedUnaryOperation(umath.log, 1.0,
_DomainGreater(0.0))
log2 = _MaskedUnaryOperation(umath.log2, 1.0,
_DomainGreater(0.0))
log10 = _MaskedUnaryOperation(umath.log10, 1.0,
_DomainGreater(0.0))
tan = _MaskedUnaryOperation(umath.tan, 0.0,
_DomainTan(1e-35))
arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccos = _MaskedUnaryOperation(umath.arccos, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0,
_DomainGreaterEqual(1.0))
arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0,
_DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15))
# Binary ufuncs
add = _MaskedBinaryOperation(umath.add)
subtract = _MaskedBinaryOperation(umath.subtract)
multiply = _MaskedBinaryOperation(umath.multiply, 1, 1)
arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0)
equal = _MaskedBinaryOperation(umath.equal)
equal.reduce = None
not_equal = _MaskedBinaryOperation(umath.not_equal)
not_equal.reduce = None
less_equal = _MaskedBinaryOperation(umath.less_equal)
less_equal.reduce = None
greater_equal = _MaskedBinaryOperation(umath.greater_equal)
greater_equal.reduce = None
less = _MaskedBinaryOperation(umath.less)
less.reduce = None
greater = _MaskedBinaryOperation(umath.greater)
greater.reduce = None
logical_and = _MaskedBinaryOperation(umath.logical_and)
alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce
logical_or = _MaskedBinaryOperation(umath.logical_or)
sometrue = logical_or.reduce
logical_xor = _MaskedBinaryOperation(umath.logical_xor)
bitwise_and = _MaskedBinaryOperation(umath.bitwise_and)
bitwise_or = _MaskedBinaryOperation(umath.bitwise_or)
bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor)
hypot = _MaskedBinaryOperation(umath.hypot)
# Domained binary ufuncs
divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1)
true_divide = _DomainedBinaryOperation(umath.true_divide,
_DomainSafeDivide(), 0, 1)
floor_divide = _DomainedBinaryOperation(umath.floor_divide,
_DomainSafeDivide(), 0, 1)
remainder = _DomainedBinaryOperation(umath.remainder,
_DomainSafeDivide(), 0, 1)
fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1)
mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1)
###############################################################################
# Mask creation functions #
###############################################################################
def _recursive_make_descr(datatype, newtype=bool_):
"Private function allowing recursion in make_descr."
# Do we have some name fields ?
if datatype.names:
descr = []
for name in datatype.names:
field = datatype.fields[name]
if len(field) == 3:
# Prepend the title to the name
name = (field[-1], name)
descr.append((name, _recursive_make_descr(field[0], newtype)))
return descr
# Is this some kind of composite a la (np.float,2)
elif datatype.subdtype:
mdescr = list(datatype.subdtype)
mdescr[0] = _recursive_make_descr(datatype.subdtype[0], newtype)
return tuple(mdescr)
else:
return newtype
def make_mask_descr(ndtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with the type of all fields in `ndtype` to a
boolean type. Field names are not altered.
Parameters
----------
ndtype : dtype
The dtype to convert.
Returns
-------
result : dtype
A dtype that looks like `ndtype`, the type of all fields is boolean.
Examples
--------
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, np.int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_descr(dtype)
dtype([('foo', '|b1'), ('bar', '|b1')])
>>> ma.make_mask_descr(np.float32)
<type 'numpy.bool_'>
"""
# Make sure we do have a dtype
if not isinstance(ndtype, np.dtype):
ndtype = np.dtype(ndtype)
return np.dtype(_recursive_make_descr(ndtype, np.bool))
def getmask(a):
"""
Return the mask of a masked array, or nomask.
Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the
mask is not `nomask`, else return `nomask`. To guarantee a full array
of booleans of the same shape as a, use `getmaskarray`.
Parameters
----------
a : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getdata : Return the data of a masked array as an ndarray.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmask(a)
array([[False, True],
[False, False]], dtype=bool)
Equivalently use the `MaskedArray` `mask` attribute.
>>> a.mask
array([[False, True],
[False, False]], dtype=bool)
Result when mask == `nomask`
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> ma.nomask
False
>>> ma.getmask(b) == ma.nomask
True
>>> b.mask == ma.nomask
True
"""
return getattr(a, '_mask', nomask)
get_mask = getmask
def getmaskarray(arr):
"""
Return the mask of a masked array, or full boolean array of False.
Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and
the mask is not `nomask`, else return a full boolean array of False of
the same shape as `arr`.
Parameters
----------
arr : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getdata : Return the data of a masked array as an ndarray.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmaskarray(a)
array([[False, True],
[False, False]], dtype=bool)
Result when mask == ``nomask``
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> >ma.getmaskarray(b)
array([[False, False],
[False, False]], dtype=bool)
"""
mask = getmask(arr)
if mask is nomask:
mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None))
return mask
def is_mask(m):
"""
Return True if m is a valid, standard mask.
This function does not check the contents of the input, only that the
type is MaskType. In particular, this function returns False if the
mask has a flexible dtype.
Parameters
----------
m : array_like
Array to test.
Returns
-------
result : bool
True if `m.dtype.type` is MaskType, False otherwise.
See Also
--------
isMaskedArray : Test whether input is an instance of MaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> m
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_mask(m)
False
>>> ma.is_mask(m.mask)
True
Input must be an ndarray (or have similar attributes)
for it to be considered a valid mask.
>>> m = [False, True, False]
>>> ma.is_mask(m)
False
>>> m = np.array([False, True, False])
>>> m
array([False, True, False], dtype=bool)
>>> ma.is_mask(m)
True
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
'formats':[np.bool, np.bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
dtype=dtype)
>>> m
array([(True, False), (False, True), (True, False)],
dtype=[('monty', '|b1'), ('pithon', '|b1')])
>>> ma.is_mask(m)
False
"""
try:
return m.dtype.type is MaskType
except AttributeError:
return False
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interepreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has a
dtype of MaskType (bool). If the dtype is flexible, each field has
a boolean dtype. This is ignored when `m` is ``nomask``, in which
case ``nomask`` is always returned.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([ 0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False], dtype=bool)
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
'formats':[np.int, np.int]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i4'), ('mouse', '<i4')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
if m is nomask:
return nomask
elif isinstance(m, ndarray):
# We won't return after this point to make sure we can shrink the mask
# Fill the mask in case there are missing data
m = filled(m, True)
# Make sure the input dtype is valid
dtype = make_mask_descr(dtype)
if m.dtype == dtype:
if copy:
result = m.copy()
else:
result = m
else:
result = np.array(m, dtype=dtype, copy=copy)
else:
result = np.array(filled(m, True), dtype=MaskType)
# Bas les masques !
if shrink and (not result.dtype.names) and (not result.any()):
return nomask
else:
return result
def make_mask_none(newshape, dtype=None):
"""
Return a boolean mask of the given shape, filled with False.
This function returns a boolean ndarray with all entries False, that can
be used in common mask manipulations. If a complex dtype is specified, the
type of each field is converted to a boolean type.
Parameters
----------
newshape : tuple
A tuple indicating the shape of the mask.
dtype : {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
Returns
-------
result : ndarray
An ndarray of appropriate shape and dtype, filled with False.
See Also
--------
make_mask : Create a boolean mask from an array.
make_mask_descr : Construct a dtype description list from a given dtype.
Examples
--------
>>> import numpy.ma as ma
>>> ma.make_mask_none((3,))
array([False, False, False], dtype=bool)
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, np.int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_none((3,), dtype=dtype)
array([(False, False), (False, False), (False, False)],
dtype=[('foo', '|b1'), ('bar', '|b1')])
"""
if dtype is None:
result = np.zeros(newshape, dtype=MaskType)
else:
result = np.zeros(newshape, dtype=make_mask_descr(dtype))
return result
def mask_or(m1, m2, copy=False, shrink=True):
"""
Combine two masks with the ``logical_or`` operator.
The result may be a view on `m1` or `m2` if the other is `nomask`
(i.e. False).
Parameters
----------
m1, m2 : array_like
Input masks.
copy : bool, optional
If copy is False and one of the inputs is `nomask`, return a view
of the other input mask. Defaults to False.
shrink : bool, optional
Whether to shrink the output to `nomask` if all its values are
False. Defaults to True.
Returns
-------
mask : output mask
The result masks values that are masked in either `m1` or `m2`.
Raises
------
ValueError
If `m1` and `m2` have different flexible dtypes.
Examples
--------
>>> m1 = np.ma.make_mask([0, 1, 1, 0])
>>> m2 = np.ma.make_mask([1, 0, 0, 0])
>>> np.ma.mask_or(m1, m2)
array([ True, True, True, False], dtype=bool)
"""
def _recursive_mask_or(m1, m2, newmask):
names = m1.dtype.names
for name in names:
current1 = m1[name]
if current1.dtype.names:
_recursive_mask_or(current1, m2[name], newmask[name])
else:
umath.logical_or(current1, m2[name], newmask[name])
return
if (m1 is nomask) or (m1 is False):
dtype = getattr(m2, 'dtype', MaskType)
return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)
if (m2 is nomask) or (m2 is False):
dtype = getattr(m1, 'dtype', MaskType)
return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)
if m1 is m2 and is_mask(m1):
return m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
if (dtype1 != dtype2):
raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
if dtype1.names:
newmask = np.empty_like(m1)
_recursive_mask_or(m1, m2, newmask)
return newmask
return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)
def flatten_mask(mask):
"""
Returns a completely flattened version of the mask, where nested fields
are collapsed.
Parameters
----------
mask : array_like
Input array, which will be interpreted as booleans.
Returns
-------
flattened_mask : ndarray of bools
The flattened input.
Examples
--------
>>> mask = np.array([0, 0, 1], dtype=np.bool)
>>> flatten_mask(mask)
array([False, False, True], dtype=bool)
>>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
>>> flatten_mask(mask)
array([False, False, False, True], dtype=bool)
>>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
>>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)
>>> flatten_mask(mask)
array([False, False, False, False, False, True], dtype=bool)
"""
def _flatmask(mask):
"Flatten the mask and returns a (maybe nested) sequence of booleans."
mnames = mask.dtype.names
if mnames:
return [flatten_mask(mask[name]) for name in mnames]
else:
return mask
def _flatsequence(sequence):
"Generates a flattened version of the sequence."
try:
for element in sequence:
if hasattr(element, '__iter__'):
for f in _flatsequence(element):
yield f
else:
yield element
except TypeError:
yield sequence
mask = np.asarray(mask)
flattened = _flatsequence(_flatmask(mask))
return np.array([_ for _ in flattened], dtype=bool)
def _check_mask_axis(mask, axis):
"Check whether there are masked values along the given axis"
if mask is not nomask:
return mask.all(axis=axis)
return nomask
###############################################################################
# Masking functions #
###############################################################################
def masked_where(condition, a, copy=True):
"""
Mask an array where a condition is met.
Return `a` as an array masked where `condition` is True.
Any masked values of `a` or `condition` are also masked in the output.
Parameters
----------
condition : array_like
Masking condition. When `condition` tests floating point values for
equality, consider using ``masked_values`` instead.
a : array_like
Array to mask.
copy : bool
If True (default) make a copy of `a` in the result. If False modify
`a` in place and return a view.
Returns
-------
result : MaskedArray
The result of masking `a` where `condition` is True.
See Also
--------
masked_values : Mask using floating point equality.
masked_equal : Mask where equal to a given value.
masked_not_equal : Mask where `not` equal to a given value.
masked_less_equal : Mask where less than or equal to a given value.
masked_greater_equal : Mask where greater than or equal to a given value.
masked_less : Mask where less than a given value.
masked_greater : Mask where greater than a given value.
masked_inside : Mask inside a given interval.
masked_outside : Mask outside a given interval.
masked_invalid : Mask invalid values (NaNs or infs).
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_where(a <= 2, a)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
Mask array `b` conditional on `a`.
>>> b = ['a', 'b', 'c', 'd']
>>> ma.masked_where(a == 2, b)
masked_array(data = [a b -- d],
mask = [False False True False],
fill_value=N/A)
Effect of the `copy` argument.
>>> c = ma.masked_where(a <= 2, a)
>>> c
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([0, 1, 2, 3])
>>> c = ma.masked_where(a <= 2, a, copy=False)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([99, 1, 2, 3])
When `condition` or `a` contain masked values.
>>> a = np.arange(4)
>>> a = ma.masked_where(a == 2, a)
>>> a
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
>>> b = np.arange(4)
>>> b = ma.masked_where(b == 0, b)
>>> b
masked_array(data = [-- 1 2 3],
mask = [ True False False False],
fill_value=999999)
>>> ma.masked_where(a == 3, b)
masked_array(data = [-- 1 -- --],
mask = [ True False True True],
fill_value=999999)
"""
# Make sure that condition is a valid standard-type mask.
cond = make_mask(condition)
a = np.array(a, copy=copy, subok=True)
(cshape, ashape) = (cond.shape, a.shape)
if cshape and cshape != ashape:
raise IndexError("Inconsistant shape between the condition and the input"
" (got %s and %s)" % (cshape, ashape))
if hasattr(a, '_mask'):
cond = mask_or(cond, a._mask)
cls = type(a)
else:
cls = MaskedArray
result = a.view(cls)
# Assign to *.mask so that structured masks are handled correctly.
result.mask = cond
return result
def masked_greater(x, value, copy=True):
"""
Mask an array where greater than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x > value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater(a, 2)
masked_array(data = [0 1 2 --],
mask = [False False False True],
fill_value=999999)
"""
return masked_where(greater(x, value), x, copy=copy)
def masked_greater_equal(x, value, copy=True):
"""
Mask an array where greater than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x >= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater_equal(a, 2)
masked_array(data = [0 1 -- --],
mask = [False False True True],
fill_value=999999)
"""
return masked_where(greater_equal(x, value), x, copy=copy)
def masked_less(x, value, copy=True):
"""
Mask an array where less than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x < value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less(a, 2)
masked_array(data = [-- -- 2 3],
mask = [ True True False False],
fill_value=999999)
"""
return masked_where(less(x, value), x, copy=copy)
def masked_less_equal(x, value, copy=True):
"""
Mask an array where less than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x <= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less_equal(a, 2)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
"""
return masked_where(less_equal(x, value), x, copy=copy)
def masked_not_equal(x, value, copy=True):
"""
Mask an array where `not` equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x != value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_not_equal(a, 2)
masked_array(data = [-- -- 2 --],
mask = [ True True False True],
fill_value=999999)
"""
return masked_where(not_equal(x, value), x, copy=copy)
def masked_equal(x, value, copy=True):
"""
Mask an array where equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x == value). For floating point arrays,
consider using ``masked_values(x, value)``.
See Also
--------
masked_where : Mask where a condition is met.
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_equal(a, 2)
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
"""
output = masked_where(equal(x, value), x, copy=copy)
output.fill_value = value
return output
def masked_inside(x, v1, v2, copy=True):
"""
Mask an array inside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` inside
the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2`
can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_inside(x, -0.3, 0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_inside(x, 0.3, -0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf >= v1) & (xf <= v2)
return masked_where(condition, x, copy=copy)
def masked_outside(x, v1, v2, copy=True):
"""
Mask an array outside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` outside
the interval [v1,v2] (x < v1)|(x > v2).
The boundaries `v1` and `v2` can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_outside(x, -0.3, 0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_outside(x, 0.3, -0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf < v1) | (xf > v2)
return masked_where(condition, x, copy=copy)
def masked_object(x, value, copy=True, shrink=True):
"""
Mask the array `x` where the data are exactly equal to value.
This function is similar to `masked_values`, but only suitable
for object arrays: for floating point, use `masked_values` instead.
Parameters
----------
x : array_like
Array to mask
value : object
Comparison value
copy : {True, False}, optional
Whether to return a copy of `x`.
shrink : {True, False}, optional
Whether to collapse a mask full of False to nomask
Returns
-------
result : MaskedArray
The result of masking `x` where equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> food = np.array(['green_eggs', 'ham'], dtype=object)
>>> # don't eat spoiled food
>>> eat = ma.masked_object(food, 'green_eggs')
>>> print(eat)
[-- ham]
>>> # plain ol` ham is boring
>>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)
>>> eat = ma.masked_object(fresh_food, 'green_eggs')
>>> print(eat)
[cheese ham pineapple]
Note that `mask` is set to ``nomask`` if possible.
>>> eat
masked_array(data = [cheese ham pineapple],
mask = False,
fill_value=?)
"""
if isMaskedArray(x):
condition = umath.equal(x._data, value)
mask = x._mask
else:
condition = umath.equal(np.asarray(x), value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(x, mask=mask, copy=copy, fill_value=value)
def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
"""
Mask using floating point equality.
Return a MaskedArray, masked where the data in array `x` are approximately
equal to `value`, i.e. where the following condition is True
(abs(x - value) <= atol+rtol*abs(value))
The fill_value is set to `value` and the mask is set to ``nomask`` if
possible. For integers, consider using ``masked_equal``.
Parameters
----------
x : array_like
Array to mask.
value : float
Masking value.
rtol : float, optional
Tolerance parameter.
atol : float, optional
Tolerance parameter (1e-8).
copy : bool, optional
Whether to return a copy of `x`.
shrink : bool, optional
Whether to collapse a mask full of False to ``nomask``.
Returns
-------
result : MaskedArray
The result of masking `x` where approximately equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
Examples
--------
>>> import numpy.ma as ma
>>> x = np.array([1, 1.1, 2, 1.1, 3])
>>> ma.masked_values(x, 1.1)
masked_array(data = [1.0 -- 2.0 -- 3.0],
mask = [False True False True False],
fill_value=1.1)
Note that `mask` is set to ``nomask`` if possible.
>>> ma.masked_values(x, 1.5)
masked_array(data = [ 1. 1.1 2. 1.1 3. ],
mask = False,
fill_value=1.5)
For integers, the fill value will be different in general to the
result of ``masked_equal``.
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> ma.masked_values(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=2)
>>> ma.masked_equal(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=999999)
"""
mabs = umath.absolute
xnew = filled(x, value)
if issubclass(xnew.dtype.type, np.floating):
condition = umath.less_equal(
mabs(xnew - value), atol + rtol * mabs(value))
mask = getattr(x, '_mask', nomask)
else:
condition = umath.equal(xnew, value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink), shrink=shrink)
return masked_array(xnew, mask=mask, copy=copy, fill_value=value)
def masked_invalid(a, copy=True):
"""
Mask an array where invalid values occur (NaNs or infs).
This function is a shortcut to ``masked_where``, with
`condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved.
Only applies to arrays with a dtype where NaNs or infs make sense
(i.e. floating point types), but accepts any array_like object.
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5, dtype=np.float)
>>> a[2] = np.NaN
>>> a[3] = np.PINF
>>> a
array([ 0., 1., NaN, Inf, 4.])
>>> ma.masked_invalid(a)
masked_array(data = [0.0 1.0 -- -- 4.0],
mask = [False False True True False],
fill_value=1e+20)
"""
a = np.array(a, copy=copy, subok=True)
mask = getattr(a, '_mask', None)
if mask is not None:
condition = ~(np.isfinite(getdata(a)))
if mask is not nomask:
condition |= mask
cls = type(a)
else:
condition = ~(np.isfinite(a))
cls = MaskedArray
result = a.view(cls)
result._mask = condition
return result
###############################################################################
# Printing options #
###############################################################################
class _MaskedPrintOption:
"""
Handle the string used to represent missing data in a masked array.
"""
def __init__(self, display):
"""
Create the masked_print_option object.
"""
self._display = display
self._enabled = True
def display(self):
"""
Display the string to print for masked values.
"""
return self._display
def set_display(self, s):
"""
Set the string to print for masked values.
"""
self._display = s
def enabled(self):
"""
Is the use of the display value enabled?
"""
return self._enabled
def enable(self, shrink=1):
"""
Set the enabling shrink to `shrink`.
"""
self._enabled = shrink
def __str__(self):
return str(self._display)
__repr__ = __str__
# if you single index into a masked location you get this object.
masked_print_option = _MaskedPrintOption('--')
def _recursive_printoption(result, mask, printopt):
"""
Puts printoptions in result where mask is True.
Private function allowing for recursion
"""
names = result.dtype.names
for name in names:
(curdata, curmask) = (result[name], mask[name])
if curdata.dtype.names:
_recursive_printoption(curdata, curmask, printopt)
else:
np.copyto(curdata, printopt, where=curmask)
return
_print_templates = dict(long_std="""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s)
""",
short_std="""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s)
""",
long_flx="""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""",
short_flx="""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""")
###############################################################################
# MaskedArray class #
###############################################################################
def _recursive_filled(a, mask, fill_value):
"""
Recursively fill `a` with `fill_value`.
"""
names = a.dtype.names
for name in names:
current = a[name]
if current.dtype.names:
_recursive_filled(current, mask[name], fill_value[name])
else:
np.copyto(current, fill_value[name], where=mask[name])
def flatten_structured_array(a):
"""
Flatten a structured array.
The data type of the output is chosen such that it can represent all of the
(nested) fields.
Parameters
----------
a : structured array
Returns
-------
output : masked array or ndarray
A flattened masked array if the input is a masked array, otherwise a
standard ndarray.
Examples
--------
>>> ndtype = [('a', int), ('b', float)]
>>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
>>> flatten_structured_array(a)
array([[1., 1.],
[2., 2.]])
"""
def flatten_sequence(iterable):
"""
Flattens a compound of nested iterables.
"""
for elm in iter(iterable):
if hasattr(elm, '__iter__'):
for f in flatten_sequence(elm):
yield f
else:
yield elm
a = np.asanyarray(a)
inishape = a.shape
a = a.ravel()
if isinstance(a, MaskedArray):
out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])
out = out.view(MaskedArray)
out._mask = np.array([tuple(flatten_sequence(d.item()))
for d in getmaskarray(a)])
else:
out = np.array([tuple(flatten_sequence(d.item())) for d in a])
if len(inishape) > 1:
newshape = list(out.shape)
newshape[0] = inishape
out.shape = tuple(flatten_sequence(newshape))
return out
def _arraymethod(funcname, onmask=True):
"""
Return a class method wrapper around a basic array method.
Creates a class method which returns a masked array, where the new
``_data`` array is the output of the corresponding basic method called
on the original ``_data``.
If `onmask` is True, the new mask is the output of the method called
on the initial mask. Otherwise, the new mask is just a reference
to the initial mask.
Parameters
----------
funcname : str
Name of the function to apply on data.
onmask : bool
Whether the mask must be processed also (True) or left
alone (False). Default is True. Make available as `_onmask`
attribute.
Returns
-------
method : instancemethod
Class method wrapper of the specified basic array method.
"""
def wrapped_method(self, *args, **params):
result = getattr(self._data, funcname)(*args, **params)
result = result.view(type(self))
result._update_from(self)
mask = self._mask
if result.ndim:
if not onmask:
result.__setmask__(mask)
elif mask is not nomask:
result.__setmask__(getattr(mask, funcname)(*args, **params))
else:
if mask.ndim and (not mask.dtype.names and mask.all()):
return masked
return result
methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None)
if methdoc is not None:
wrapped_method.__doc__ = methdoc.__doc__
wrapped_method.__name__ = funcname
return wrapped_method
class MaskedIterator(object):
"""
Flat iterator object to iterate over masked arrays.
A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array
`x`. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
MaskedArray.flat : Return a flat iterator over an array.
MaskedArray.flatten : Returns a flattened copy of an array.
Notes
-----
`MaskedIterator` is not exported by the `ma` module. Instead of
instantiating a `MaskedIterator` directly, use `MaskedArray.flat`.
Examples
--------
>>> x = np.ma.array(arange(6).reshape(2, 3))
>>> fl = x.flat
>>> type(fl)
<class 'numpy.ma.core.MaskedIterator'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
Extracting more than a single element b indexing the `MaskedIterator`
returns a masked array:
>>> fl[2:4]
masked_array(data = [2 3],
mask = False,
fill_value = 999999)
"""
def __init__(self, ma):
self.ma = ma
self.dataiter = ma._data.flat
if ma._mask is nomask:
self.maskiter = None
else:
self.maskiter = ma._mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
result = self.dataiter.__getitem__(indx).view(type(self.ma))
if self.maskiter is not None:
_mask = self.maskiter.__getitem__(indx)
if isinstance(_mask, ndarray):
# set shape to match that of data; this is needed for matrices
_mask.shape = result.shape
result._mask = _mask
elif isinstance(_mask, np.void):
return mvoid(result, mask=_mask, hardmask=self.ma._hardmask)
elif _mask: # Just a scalar, masked
return masked
return result
# This won't work if ravel makes a copy
def __setitem__(self, index, value):
self.dataiter[index] = getdata(value)
if self.maskiter is not None:
self.maskiter[index] = getmaskarray(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
Examples
--------
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
>>> fl.next()
3
>>> fl.next()
masked_array(data = --,
mask = True,
fill_value = 1e+20)
>>> fl.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next
d = self.dataiter.next()
StopIteration
"""
d = next(self.dataiter)
if self.maskiter is not None:
m = next(self.maskiter)
if isinstance(m, np.void):
return mvoid(d, mask=m, hardmask=self.ma._hardmask)
elif m: # Just a scalar, masked
return masked
return d
next = __next__
class MaskedArray(ndarray):
"""
An array class with possibly masked values.
Masked values of True exclude the corresponding element from any
computation.
Construction::
x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True,
ndmin=0, fill_value=None, keep_mask=True, hard_mask=None,
shrink=True, order=None)
Parameters
----------
data : array_like
Input data.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
dtype : dtype, optional
Data type of the output.
If `dtype` is None, the type of the data argument (``data.dtype``)
is used. If `dtype` is not None and different from ``data.dtype``,
a copy is performed.
copy : bool, optional
Whether to copy the input data (True), or to use a reference instead.
Default is False.
subok : bool, optional
Whether to return a subclass of `MaskedArray` if possible (True) or a
plain `MaskedArray`. Default is True.
ndmin : int, optional
Minimum number of dimensions. Default is 0.
fill_value : scalar, optional
Value used to fill in the masked values when necessary.
If None, a default based on the data-type is used.
keep_mask : bool, optional
Whether to combine `mask` with the mask of the input data, if any
(True), or to use only `mask` for the output (False). Default is True.
hard_mask : bool, optional
Whether to use a hard mask or not. With a hard mask, masked values
cannot be unmasked. Default is False.
shrink : bool, optional
Whether to force compression of an empty mask. Default is True.
order : {'C', 'F', 'A'}, optional
Specify the order of the array. If order is 'C', then the array
will be in C-contiguous order (last-index varies the fastest).
If order is 'F', then the returned array will be in
Fortran-contiguous order (first-index varies the fastest).
If order is 'A' (default), then the returned array may be
in any order (either C-, Fortran-contiguous, or even discontiguous),
unless a copy is required, in which case it will be C-contiguous.
"""
__array_priority__ = 15
_defaultmask = nomask
_defaulthardmask = False
_baseclass = ndarray
# Maximum number of elements per axis used when printing an array.
_print_width = 100
def __new__(cls, data=None, mask=nomask, dtype=None, copy=False,
subok=True, ndmin=0, fill_value=None, keep_mask=True,
hard_mask=None, shrink=True, order=None, **options):
"""
Create a new masked array from scratch.
Notes
-----
A masked array can also be created by taking a .view(MaskedArray).
"""
# Process data.
_data = np.array(data, dtype=dtype, copy=copy,
order=order, subok=True, ndmin=ndmin)
_baseclass = getattr(data, '_baseclass', type(_data))
# Check that we're not erasing the mask.
if isinstance(data, MaskedArray) and (data.shape != _data.shape):
copy = True
# Careful, cls might not always be MaskedArray.
if not isinstance(data, cls) or not subok:
_data = ndarray.view(_data, cls)
else:
_data = ndarray.view(_data, type(data))
# Backwards compatibility w/ numpy.core.ma.
if hasattr(data, '_mask') and not isinstance(data, ndarray):
_data._mask = data._mask
# FIXME _sharedmask is never used.
_sharedmask = True
# Process mask.
# Number of named fields (or zero if none)
names_ = _data.dtype.names or ()
# Type of the mask
if names_:
mdtype = make_mask_descr(_data.dtype)
else:
mdtype = MaskType
if mask is nomask:
# Case 1. : no mask in input.
# Erase the current mask ?
if not keep_mask:
# With a reduced version
if shrink:
_data._mask = nomask
# With full version
else:
_data._mask = np.zeros(_data.shape, dtype=mdtype)
# Check whether we missed something
elif isinstance(data, (tuple, list)):
try:
# If data is a sequence of masked array
mask = np.array([getmaskarray(m) for m in data],
dtype=mdtype)
except ValueError:
# If data is nested
mask = nomask
# Force shrinking of the mask if needed (and possible)
if (mdtype == MaskType) and mask.any():
_data._mask = mask
_data._sharedmask = False
else:
if copy:
_data._mask = _data._mask.copy()
_data._sharedmask = False
# Reset the shape of the original mask
if getmask(data) is not nomask:
data._mask.shape = data.shape
else:
_data._sharedmask = True
else:
# Case 2. : With a mask in input.
# If mask is boolean, create an array of True or False
if mask is True and mdtype == MaskType:
mask = np.ones(_data.shape, dtype=mdtype)
elif mask is False and mdtype == MaskType:
mask = np.zeros(_data.shape, dtype=mdtype)
else:
# Read the mask with the current mdtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Make sure the mask and the data have the same shape
if mask.shape != _data.shape:
(nd, nm) = (_data.size, mask.size)
if nm == 1:
mask = np.resize(mask, _data.shape)
elif nm == nd:
mask = np.reshape(mask, _data.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MaskError(msg % (nd, nm))
copy = True
# Set the mask to the new value
if _data._mask is nomask:
_data._mask = mask
_data._sharedmask = not copy
else:
if not keep_mask:
_data._mask = mask
_data._sharedmask = not copy
else:
if names_:
def _recursive_or(a, b):
"do a|=b on each field of a, recursively"
for name in a.dtype.names:
(af, bf) = (a[name], b[name])
if af.dtype.names:
_recursive_or(af, bf)
else:
af |= bf
return
_recursive_or(_data._mask, mask)
else:
_data._mask = np.logical_or(mask, _data._mask)
_data._sharedmask = False
# Update fill_value.
if fill_value is None:
fill_value = getattr(data, '_fill_value', None)
# But don't run the check unless we have something to check.
if fill_value is not None:
_data._fill_value = _check_fill_value(fill_value, _data.dtype)
# Process extra options ..
if hard_mask is None:
_data._hardmask = getattr(data, '_hardmask', False)
else:
_data._hardmask = hard_mask
_data._baseclass = _baseclass
return _data
def _update_from(self, obj):
"""
Copies some attributes of obj to self.
"""
if obj is not None and isinstance(obj, ndarray):
_baseclass = type(obj)
else:
_baseclass = ndarray
# We need to copy the _basedict to avoid backward propagation
_optinfo = {}
_optinfo.update(getattr(obj, '_optinfo', {}))
_optinfo.update(getattr(obj, '_basedict', {}))
if not isinstance(obj, MaskedArray):
_optinfo.update(getattr(obj, '__dict__', {}))
_dict = dict(_fill_value=getattr(obj, '_fill_value', None),
_hardmask=getattr(obj, '_hardmask', False),
_sharedmask=getattr(obj, '_sharedmask', False),
_isfield=getattr(obj, '_isfield', False),
_baseclass=getattr(obj, '_baseclass', _baseclass),
_optinfo=_optinfo,
_basedict=_optinfo)
self.__dict__.update(_dict)
self.__dict__.update(_optinfo)
return
def __array_finalize__(self, obj):
"""
Finalizes the masked array.
"""
# Get main attributes.
self._update_from(obj)
# We have to decide how to initialize self.mask, based on
# obj.mask. This is very difficult. There might be some
# correspondence between the elements in the array we are being
# created from (= obj) and us. Or there might not. This method can
# be called in all kinds of places for all kinds of reasons -- could
# be empty_like, could be slicing, could be a ufunc, could be a view.
# The numpy subclassing interface simply doesn't give us any way
# to know, which means that at best this method will be based on
# guesswork and heuristics. To make things worse, there isn't even any
# clear consensus about what the desired behavior is. For instance,
# most users think that np.empty_like(marr) -- which goes via this
# method -- should return a masked array with an empty mask (see
# gh-3404 and linked discussions), but others disagree, and they have
# existing code which depends on empty_like returning an array that
# matches the input mask.
#
# Historically our algorithm was: if the template object mask had the
# same *number of elements* as us, then we used *it's mask object
# itself* as our mask, so that writes to us would also write to the
# original array. This is horribly broken in multiple ways.
#
# Now what we do instead is, if the template object mask has the same
# number of elements as us, and we do not have the same base pointer
# as the template object (b/c views like arr[...] should keep the same
# mask), then we make a copy of the template object mask and use
# that. This is also horribly broken but somewhat less so. Maybe.
if isinstance(obj, ndarray):
# XX: This looks like a bug -- shouldn't it check self.dtype
# instead?
if obj.dtype.names:
_mask = getattr(obj, '_mask',
make_mask_none(obj.shape, obj.dtype))
else:
_mask = getattr(obj, '_mask', nomask)
# If self and obj point to exactly the same data, then probably
# self is a simple view of obj (e.g., self = obj[...]), so they
# should share the same mask. (This isn't 100% reliable, e.g. self
# could be the first row of obj, or have strange strides, but as a
# heuristic it's not bad.) In all other cases, we make a copy of
# the mask, so that future modifications to 'self' do not end up
# side-effecting 'obj' as well.
if (obj.__array_interface__["data"][0]
!= self.__array_interface__["data"][0]):
_mask = _mask.copy()
else:
_mask = nomask
self._mask = _mask
# Finalize the mask
if self._mask is not nomask:
try:
self._mask.shape = self.shape
except ValueError:
self._mask = nomask
except (TypeError, AttributeError):
# When _mask.shape is not writable (because it's a void)
pass
# Finalize the fill_value for structured arrays
if self.dtype.names:
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
return
def __array_wrap__(self, obj, context=None):
"""
Special hook for ufuncs.
Wraps the numpy array and sets the mask according to context.
"""
result = obj.view(type(self))
result._update_from(self)
if context is not None:
result._mask = result._mask.copy()
(func, args, _) = context
m = reduce(mask_or, [getmaskarray(arg) for arg in args])
# Get the domain mask
domain = ufunc_domain.get(func, None)
if domain is not None:
# Take the domain, and make sure it's a ndarray
if len(args) > 2:
d = filled(reduce(domain, args), True)
else:
d = filled(domain(*args), True)
# Fill the result where the domain is wrong
try:
# Binary domain: take the last value
fill_value = ufunc_fills[func][-1]
except TypeError:
# Unary domain: just use this one
fill_value = ufunc_fills[func]
except KeyError:
# Domain not recognized, use fill_value instead
fill_value = self.fill_value
result = result.copy()
np.copyto(result, fill_value, where=d)
# Update the mask
if m is nomask:
if d is not nomask:
m = d
else:
# Don't modify inplace, we risk back-propagation
m = (m | d)
# Make sure the mask has the proper size
if result.shape == () and m:
return masked
else:
result._mask = m
result._sharedmask = False
return result
def view(self, dtype=None, type=None, fill_value=None):
"""
Return a view of the MaskedArray data
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16.
The default, None, results in the view having the same data-type
as `a`. As with ``ndarray.view``, dtype can also be specified as
an ndarray sub-class, which then specifies the type of the
returned object (this is equivalent to setting the ``type``
parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
If `fill_value` is not specified, but `dtype` is specified (and is not
an ndarray sub-class), the `fill_value` of the MaskedArray will be
reset. If neither `fill_value` nor `dtype` are specified (or if
`dtype` is an ndarray sub-class), then the fill value is preserved.
Finally, if `fill_value` is specified, but `dtype` is not, the fill
value is set to the specified value.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
"""
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
except TypeError:
output = ndarray.view(self, dtype)
else:
output = ndarray.view(self, dtype, type)
# also make the mask be a view (so attr changes to the view's
# mask do no affect original object's mask)
# (especially important to avoid affecting np.masked singleton)
if (getattr(output, '_mask', nomask) is not nomask):
output._mask = output._mask.view()
# Make sure to reset the _fill_value if needed
if getattr(output, '_fill_value', None) is not None:
if fill_value is None:
if dtype is None:
pass # leave _fill_value as is
else:
output._fill_value = None
else:
output.fill_value = fill_value
return output
view.__doc__ = ndarray.view.__doc__
def astype(self, newtype):
"""
Returns a copy of the MaskedArray cast to given newtype.
Returns
-------
output : MaskedArray
A copy of self cast to input newtype.
The returned record shape matches self.shape.
Examples
--------
>>> x = np.ma.array([[1,2,3.1],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1.0 -- 3.1]
[-- 5.0 --]
[7.0 -- 9.0]]
>>> print(x.astype(int32))
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
"""
newtype = np.dtype(newtype)
output = self._data.astype(newtype).view(type(self))
output._update_from(self)
names = output.dtype.names
if names is None:
output._mask = self._mask.astype(bool)
else:
if self._mask is nomask:
output._mask = nomask
else:
output._mask = self._mask.astype([(n, bool) for n in names])
# Don't check _fill_value if it's None, that'll speed things up
if self._fill_value is not None:
output._fill_value = _check_fill_value(self._fill_value, newtype)
return output
def __getitem__(self, indx):
"""
x.__getitem__(y) <==> x[y]
Return the item described by i, as a masked array.
"""
dout = self.data[indx]
# We could directly use ndarray.__getitem__ on self.
# But then we would have to modify __array_finalize__ to prevent the
# mask of being reshaped if it hasn't been set up properly yet
# So it's easier to stick to the current version
_mask = self._mask
# Did we extract a single item?
if not getattr(dout, 'ndim', False):
# A record
if isinstance(dout, np.void):
mask = _mask[indx]
# We should always re-cast to mvoid, otherwise users can
# change masks on rows that already have masked values, but not
# on rows that have no masked values, which is inconsistent.
dout = mvoid(dout, mask=mask, hardmask=self._hardmask)
# Just a scalar
elif _mask is not nomask and _mask[indx]:
return masked
elif self.dtype.type is np.object_ and self.dtype is not dout.dtype:
# self contains an object array of arrays (yes, that happens).
# If masked, turn into a MaskedArray, with everything masked.
if _mask is not nomask and _mask[indx]:
return MaskedArray(dout, mask=True)
else:
# Force dout to MA
dout = dout.view(type(self))
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value
if isinstance(indx, basestring):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
# If we're indexing a multidimensional field in a
# structured array (such as dtype("(2,)i2,(2,)i1")),
# dimensionality goes up (M[field].ndim == M.ndim +
# len(M.dtype[field].shape)). That's fine for
# M[field] but problematic for M[field].fill_value
# which should have shape () to avoid breaking several
# methods. There is no great way out, so set to
# first element. See issue #6723.
if dout._fill_value.ndim > 0:
if not (dout._fill_value ==
dout._fill_value.flat[0]).all():
warnings.warn(
"Upon accessing multidimensional field "
"{indx:s}, need to keep dimensionality "
"of fill_value at 0. Discarding "
"heterogeneous fill_value and setting "
"all to {fv!s}.".format(indx=indx,
fv=dout._fill_value[0]))
dout._fill_value = dout._fill_value.flat[0]
dout._isfield = True
# Update the mask if needed
if _mask is not nomask:
dout._mask = _mask[indx]
# set shape to match that of data; this is needed for matrices
dout._mask.shape = dout.shape
dout._sharedmask = True
# Note: Don't try to check for m.any(), that'll take too long
return dout
def __setitem__(self, indx, value):
"""
x.__setitem__(i, y) <==> x[i]=y
Set item described by index. If value is masked, masks those
locations.
"""
if self is masked:
raise MaskError('Cannot alter the masked element.')
_data = self._data
_mask = self._mask
if isinstance(indx, basestring):
_data[indx] = value
if _mask is nomask:
self._mask = _mask = make_mask_none(self.shape, self.dtype)
_mask[indx] = getmask(value)
return
_dtype = _data.dtype
nbfields = len(_dtype.names or ())
if value is masked:
# The mask wasn't set: create a full version.
if _mask is nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
# Now, set the mask to its value.
if nbfields:
_mask[indx] = tuple([True] * nbfields)
else:
_mask[indx] = True
if not self._isfield:
self._sharedmask = False
return
# Get the _data part of the new value
dval = value
# Get the _mask part of the new value
mval = getattr(value, '_mask', nomask)
if nbfields and mval is nomask:
mval = tuple([False] * nbfields)
if _mask is nomask:
# Set the data, then the mask
_data[indx] = dval
if mval is not nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
_mask[indx] = mval
elif not self._hardmask:
# Unshare the mask if necessary to avoid propagation
# We want to remove the unshare logic from this place in the
# future. Note that _sharedmask has lots of false positives.
if not self._isfield:
if self._sharedmask and not (
# If no one else holds a reference (we have two
# references (_mask and self._mask) -- add one for
# getrefcount) and the array owns its own data
# copying the mask should do nothing.
(sys.getrefcount(_mask) == 3) and _mask.flags.owndata):
# 2016.01.15 -- v1.11.0
warnings.warn(
"setting an item on a masked array which has a shared "
"mask will not copy the mask and also change the "
"original mask array in the future.\n"
"Check the NumPy 1.11 release notes for more "
"information.",
MaskedArrayFutureWarning, stacklevel=2)
self.unshare_mask()
_mask = self._mask
# Set the data, then the mask
_data[indx] = dval
_mask[indx] = mval
elif hasattr(indx, 'dtype') and (indx.dtype == MaskType):
indx = indx * umath.logical_not(_mask)
_data[indx] = dval
else:
if nbfields:
err_msg = "Flexible 'hard' masks are not yet supported."
raise NotImplementedError(err_msg)
mindx = mask_or(_mask[indx], mval, copy=True)
dindx = self._data[indx]
if dindx.size > 1:
np.copyto(dindx, dval, where=~mindx)
elif mindx is nomask:
dindx = dval
_data[indx] = dindx
_mask[indx] = mindx
return
def __setattr__(self, attr, value):
super(MaskedArray, self).__setattr__(attr, value)
if attr == 'dtype' and self._mask is not nomask:
self._mask = self._mask.view(make_mask_descr(value), ndarray)
# Try to reset the shape of the mask (if we don't have a void)
# This raises a ValueError if the dtype change won't work
try:
self._mask.shape = self.shape
except (AttributeError, TypeError):
pass
def __getslice__(self, i, j):
"""
x.__getslice__(i, j) <==> x[i:j]
Return the slice described by (i, j). The use of negative indices
is not supported.
"""
return self.__getitem__(slice(i, j))
def __setslice__(self, i, j, value):
"""
x.__setslice__(i, j, value) <==> x[i:j]=value
Set the slice (i,j) of a to value. If value is masked, mask those
locations.
"""
self.__setitem__(slice(i, j), value)
def __setmask__(self, mask, copy=False):
"""
Set the mask.
"""
idtype = self.dtype
current_mask = self._mask
if mask is masked:
mask = True
if (current_mask is nomask):
# Make sure the mask is set
# Just don't do anything if there's nothing to do.
if mask is nomask:
return
current_mask = self._mask = make_mask_none(self.shape, idtype)
if idtype.names is None:
# No named fields.
# Hardmask: don't unmask the data
if self._hardmask:
current_mask |= mask
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method.
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# Otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
else:
# Named fields w/
mdtype = current_mask.dtype
mask = np.array(mask, copy=False)
# Mask is a singleton
if not mask.ndim:
# It's a boolean : make a record
if mask.dtype.kind == 'b':
mask = np.array(tuple([mask.item()] * len(mdtype)),
dtype=mdtype)
# It's a record: make sure the dtype is correct
else:
mask = mask.astype(mdtype)
# Mask is a sequence
else:
# Make sure the new mask is a ndarray with the proper dtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Hardmask: don't unmask the data
if self._hardmask:
for n in idtype.names:
current_mask[n] |= mask[n]
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method.
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# Otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
# Reshape if needed
if current_mask.shape:
current_mask.shape = self.shape
return
_set_mask = __setmask__
def _get_mask(self):
"""Return the current mask.
"""
# We could try to force a reshape, but that wouldn't work in some
# cases.
return self._mask
mask = property(fget=_get_mask, fset=__setmask__, doc="Mask")
def _get_recordmask(self):
"""
Return the mask of the records.
A record is masked when all the fields are masked.
"""
_mask = self._mask.view(ndarray)
if _mask.dtype.names is None:
return _mask
return np.all(flatten_structured_array(_mask), axis=-1)
def _set_recordmask(self):
"""
Return the mask of the records.
A record is masked when all the fields are masked.
"""
raise NotImplementedError("Coming soon: setting the mask per records!")
recordmask = property(fget=_get_recordmask)
def harden_mask(self):
"""
Force the mask to hard.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `harden_mask` sets `hardmask` to True.
See Also
--------
hardmask
"""
self._hardmask = True
return self
def soften_mask(self):
"""
Force the mask to soft.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `soften_mask` sets `hardmask` to False.
See Also
--------
hardmask
"""
self._hardmask = False
return self
hardmask = property(fget=lambda self: self._hardmask,
doc="Hardness of the mask")
def unshare_mask(self):
"""
Copy the mask and set the sharedmask flag to False.
Whether the mask is shared between masked arrays can be seen from
the `sharedmask` property. `unshare_mask` ensures the mask is not shared.
A copy of the mask is only made if it was shared.
See Also
--------
sharedmask
"""
if self._sharedmask:
self._mask = self._mask.copy()
self._sharedmask = False
return self
sharedmask = property(fget=lambda self: self._sharedmask,
doc="Share status of the mask (read-only).")
def shrink_mask(self):
"""
Reduce a mask to nomask when possible.
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4)
>>> x.mask
array([[False, False],
[False, False]], dtype=bool)
>>> x.shrink_mask()
>>> x.mask
False
"""
m = self._mask
if m.ndim and not m.any():
self._mask = nomask
return self
baseclass = property(fget=lambda self: self._baseclass,
doc="Class of the underlying data (read-only).")
def _get_data(self):
"""Return the current data, as a view of the original
underlying data.
"""
return ndarray.view(self, self._baseclass)
_data = property(fget=_get_data)
data = property(fget=_get_data)
def _get_flat(self):
"Return a flat iterator."
return MaskedIterator(self)
def _set_flat(self, value):
"Set a flattened version of self to value."
y = self.ravel()
y[:] = value
flat = property(fget=_get_flat, fset=_set_flat,
doc="Flat version of the array.")
def get_fill_value(self):
"""
Return the filling value of the masked array.
Returns
-------
fill_value : scalar
The filling value.
Examples
--------
>>> for dt in [np.int32, np.int64, np.float64, np.complex128]:
... np.ma.array([0, 1], dtype=dt).get_fill_value()
...
999999
999999
1e+20
(1e+20+0j)
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.get_fill_value()
-inf
"""
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
return self._fill_value[()]
def set_fill_value(self, value=None):
"""
Set the filling value of the masked array.
Parameters
----------
value : scalar, optional
The new filling value. Default is None, in which case a default
based on the data type is used.
See Also
--------
ma.set_fill_value : Equivalent function.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.fill_value
-inf
>>> x.set_fill_value(np.pi)
>>> x.fill_value
3.1415926535897931
Reset to default:
>>> x.set_fill_value()
>>> x.fill_value
1e+20
"""
target = _check_fill_value(value, self.dtype)
_fill_value = self._fill_value
if _fill_value is None:
# Create the attribute if it was undefined
self._fill_value = target
else:
# Don't overwrite the attribute, just fill it (for propagation)
_fill_value[()] = target
fill_value = property(fget=get_fill_value, fset=set_fill_value,
doc="Filling value.")
def filled(self, fill_value=None):
"""
Return a copy of self, with masked values filled with a given value.
**However**, if there are no masked values to fill, self will be
returned instead as an ndarray.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute of the array is used instead.
Returns
-------
filled_array : ndarray
A copy of ``self`` with invalid entries replaced by *fill_value*
(be it the function argument or the attribute of ``self``), or
``self`` itself as an ndarray if there are no invalid entries to
be replaced.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([1, 2, -999, 4, -999])
>>> type(x.filled())
<type 'numpy.ndarray'>
Subclassing is preserved. This means that if the data part of the masked
array is a matrix, `filled` returns a matrix:
>>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.filled()
matrix([[ 1, 999999],
[999999, 4]])
"""
m = self._mask
if m is nomask:
return self._data
if fill_value is None:
fill_value = self.fill_value
else:
fill_value = _check_fill_value(fill_value, self.dtype)
if self is masked_singleton:
return np.asanyarray(fill_value)
if m.dtype.names:
result = self._data.copy('K')
_recursive_filled(result, self._mask, fill_value)
elif not m.any():
return self._data
else:
result = self._data.copy('K')
try:
np.copyto(result, fill_value, where=m)
except (TypeError, AttributeError):
fill_value = narray(fill_value, dtype=object)
d = result.astype(object)
result = np.choose(m, (d, fill_value))
except IndexError:
# ok, if scalar
if self._data.shape:
raise
elif m:
result = np.array(fill_value, dtype=self.dtype)
else:
result = self._data
return result
def compressed(self):
"""
Return all the non-masked data as a 1-D array.
Returns
-------
data : ndarray
A new `ndarray` holding the non-masked data is returned.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3)
>>> x.compressed()
array([0, 1])
>>> type(x.compressed())
<type 'numpy.ndarray'>
"""
data = ndarray.ravel(self._data)
if self._mask is not nomask:
data = data.compress(np.logical_not(ndarray.ravel(self._mask)))
return data
def compress(self, condition, axis=None, out=None):
"""
Return `a` where condition is ``True``.
If condition is a `MaskedArray`, missing values are considered
as ``False``.
Parameters
----------
condition : var
Boolean 1-d array selecting which entries to return. If len(condition)
is less than the size of a along the axis, then output is truncated
to length of condition array.
axis : {None, int}, optional
Axis along which the operation must be performed.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
result : MaskedArray
A :class:`MaskedArray` object.
Notes
-----
Please note the difference with :meth:`compressed` !
The output of :meth:`compress` has a mask, the output of
:meth:`compressed` does not.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.compress([1, 0, 1])
masked_array(data = [1 3],
mask = [False False],
fill_value=999999)
>>> x.compress([1, 0, 1], axis=1)
masked_array(data =
[[1 3]
[-- --]
[7 9]],
mask =
[[False False]
[ True True]
[False False]],
fill_value=999999)
"""
# Get the basic components
(_data, _mask) = (self._data, self._mask)
# Force the condition to a regular ndarray and forget the missing
# values.
condition = np.array(condition, copy=False, subok=False)
_new = _data.compress(condition, axis=axis, out=out).view(type(self))
_new._update_from(self)
if _mask is not nomask:
_new._mask = _mask.compress(condition, axis=axis)
return _new
def __str__(self):
"""
String representation.
"""
if masked_print_option.enabled():
f = masked_print_option
if self is masked:
return str(f)
m = self._mask
if m is nomask:
res = self._data
else:
if m.shape == () and m.itemsize==len(m.dtype):
if m.dtype.names:
m = m.view((bool, len(m.dtype)))
if m.any():
return str(tuple((f if _m else _d) for _d, _m in
zip(self._data.tolist(), m)))
else:
return str(self._data)
elif m:
return str(f)
else:
return str(self._data)
# convert to object array to make filled work
names = self.dtype.names
if names is None:
data = self._data
mask = m
# For big arrays, to avoid a costly conversion to the
# object dtype, extract the corners before the conversion.
for axis in range(self.ndim):
if data.shape[axis] > self._print_width:
ind = self._print_width // 2
arr = np.split(data, (ind, -ind), axis=axis)
data = np.concatenate((arr[0], arr[2]), axis=axis)
arr = np.split(mask, (ind, -ind), axis=axis)
mask = np.concatenate((arr[0], arr[2]), axis=axis)
res = data.astype("O")
res.view(ndarray)[mask] = f
else:
rdtype = _recursive_make_descr(self.dtype, "O")
res = self._data.astype(rdtype)
_recursive_printoption(res, m, f)
else:
res = self.filled(self.fill_value)
return str(res)
def __repr__(self):
"""
Literal string representation.
"""
n = len(self.shape)
if self._baseclass is np.ndarray:
name = 'array'
else:
name = self._baseclass.__name__
parameters = dict(name=name, nlen=" " * len(name),
data=str(self), mask=str(self._mask),
fill=str(self.fill_value), dtype=str(self.dtype))
if self.dtype.names:
if n <= 1:
return _print_templates['short_flx'] % parameters
return _print_templates['long_flx'] % parameters
elif n <= 1:
return _print_templates['short_std'] % parameters
return _print_templates['long_std'] % parameters
def _delegate_binop(self, other):
# This emulates the logic in
# multiarray/number.c:PyArray_GenericBinaryFunction
if (not isinstance(other, np.ndarray)
and not hasattr(other, "__numpy_ufunc__")):
other_priority = getattr(other, "__array_priority__", -1000000)
if self.__array_priority__ < other_priority:
return True
return False
def __eq__(self, other):
"""
Check whether other equals self elementwise.
"""
if self is masked:
return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = self.filled(0).__eq__(other)
try:
check = check.view(type(self))
check._mask = self._mask
except AttributeError:
# Dang, we have a bool instead of an array: return the bool
return check
else:
odata = filled(other, 0)
check = self.filled(0).__eq__(odata).view(type(self))
if self._mask is nomask:
check._mask = omask
else:
mask = mask_or(self._mask, omask)
if mask.dtype.names:
if mask.size > 1:
axis = 1
else:
axis = None
try:
mask = mask.view((bool_, len(self.dtype))).all(axis)
except ValueError:
mask = np.all([[f[n].all() for n in mask.dtype.names]
for f in mask], axis=axis)
check._mask = mask
return check
def __ne__(self, other):
"""
Check whether other doesn't equal self elementwise
"""
if self is masked:
return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = self.filled(0).__ne__(other)
try:
check = check.view(type(self))
check._mask = self._mask
except AttributeError:
# In case check is a boolean (or a numpy.bool)
return check
else:
odata = filled(other, 0)
check = self.filled(0).__ne__(odata).view(type(self))
if self._mask is nomask:
check._mask = omask
else:
mask = mask_or(self._mask, omask)
if mask.dtype.names:
if mask.size > 1:
axis = 1
else:
axis = None
try:
mask = mask.view((bool_, len(self.dtype))).all(axis)
except ValueError:
mask = np.all([[f[n].all() for n in mask.dtype.names]
for f in mask], axis=axis)
check._mask = mask
return check
def __add__(self, other):
"""
Add self to other, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return add(self, other)
def __radd__(self, other):
"""
Add other to self, and return a new masked array.
"""
# In analogy with __rsub__ and __rdiv__, use original order:
# we get here from `other + self`.
return add(other, self)
def __sub__(self, other):
"""
Subtract other from self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return subtract(self, other)
def __rsub__(self, other):
"""
Subtract self from other, and return a new masked array.
"""
return subtract(other, self)
def __mul__(self, other):
"Multiply self by other, and return a new masked array."
if self._delegate_binop(other):
return NotImplemented
return multiply(self, other)
def __rmul__(self, other):
"""
Multiply other by self, and return a new masked array.
"""
# In analogy with __rsub__ and __rdiv__, use original order:
# we get here from `other * self`.
return multiply(other, self)
def __div__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return divide(self, other)
def __truediv__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return true_divide(self, other)
def __rtruediv__(self, other):
"""
Divide self into other, and return a new masked array.
"""
return true_divide(other, self)
def __floordiv__(self, other):
"""
Divide other into self, and return a new masked array.
"""
if self._delegate_binop(other):
return NotImplemented
return floor_divide(self, other)
def __rfloordiv__(self, other):
"""
Divide self into other, and return a new masked array.
"""
return floor_divide(other, self)
def __pow__(self, other):
"""
Raise self to the power other, masking the potential NaNs/Infs
"""
if self._delegate_binop(other):
return NotImplemented
return power(self, other)
def __rpow__(self, other):
"""
Raise other to the power self, masking the potential NaNs/Infs
"""
return power(other, self)
def __iadd__(self, other):
"""
Add other to self in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
else:
if m is not nomask:
self._mask += m
self._data.__iadd__(np.where(self._mask, self.dtype.type(0),
getdata(other)))
return self
def __isub__(self, other):
"""
Subtract other from self in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
self._data.__isub__(np.where(self._mask, self.dtype.type(0),
getdata(other)))
return self
def __imul__(self, other):
"""
Multiply self by other in-place.
"""
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
self._data.__imul__(np.where(self._mask, self.dtype.type(1),
getdata(other)))
return self
def __idiv__(self, other):
"""
Divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__idiv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __ifloordiv__(self, other):
"""
Floor divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.floor_divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__ifloordiv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __itruediv__(self, other):
"""
True divide self by other in-place.
"""
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.true_divide]
other_data = np.where(dom_mask, fval, other_data)
self._mask |= new_mask
self._data.__itruediv__(np.where(self._mask, self.dtype.type(1),
other_data))
return self
def __ipow__(self, other):
"""
Raise self to the power other, in place.
"""
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate(divide='ignore', invalid='ignore'):
self._data.__ipow__(np.where(self._mask, self.dtype.type(1),
other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
def __float__(self):
"""
Convert to float.
"""
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
warnings.warn("Warning: converting a masked element to nan.")
return np.nan
return float(self.item())
def __int__(self):
"""
Convert to int.
"""
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
raise MaskError('Cannot convert masked element to a Python int.')
return int(self.item())
def get_imag(self):
"""
Return the imaginary part of the masked array.
The returned array is a view on the imaginary part of the `MaskedArray`
whose `get_imag` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The imaginary part of the masked array.
See Also
--------
get_real, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_imag()
masked_array(data = [1.0 -- 1.6],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.imag.view(type(self))
result.__setmask__(self._mask)
return result
imag = property(fget=get_imag, doc="Imaginary part.")
def get_real(self):
"""
Return the real part of the masked array.
The returned array is a view on the real part of the `MaskedArray`
whose `get_real` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The real part of the masked array.
See Also
--------
get_imag, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_real()
masked_array(data = [1.0 -- 3.45],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.real.view(type(self))
result.__setmask__(self._mask)
return result
real = property(fget=get_real, doc="Real part")
def count(self, axis=None):
"""
Count the non-masked elements of the array along the given axis.
Parameters
----------
axis : int, optional
Axis along which to count the non-masked elements. If `axis` is
`None`, all non-masked elements are counted.
Returns
-------
result : int or ndarray
If `axis` is `None`, an integer count is returned. When `axis` is
not `None`, an array with shape determined by the lengths of the
remaining axes, is returned.
See Also
--------
count_masked : Count masked elements in array or along a given axis.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(6).reshape((2, 3))
>>> a[1, :] = ma.masked
>>> a
masked_array(data =
[[0 1 2]
[-- -- --]],
mask =
[[False False False]
[ True True True]],
fill_value = 999999)
>>> a.count()
3
When the `axis` keyword is specified an array of appropriate size is
returned.
>>> a.count(axis=0)
array([1, 1, 1])
>>> a.count(axis=1)
array([3, 0])
"""
m = self._mask
s = self.shape
if m is nomask:
if axis is None:
return self.size
else:
n = s[axis]
t = list(s)
del t[axis]
return np.full(t, n, dtype=np.intp)
n1 = np.size(m, axis)
n2 = np.sum(m, axis=axis, dtype=np.intp)
if axis is None:
return (n1 - n2)
else:
return narray(n1 - n2)
flatten = _arraymethod('flatten')
def ravel(self, order='C'):
"""
Returns a 1D version of self, as a view.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means to
index the elements in C-like order, with the last axis index
changing fastest, back to the first axis index changing slowest.
'F' means to index the elements in Fortran-like index order, with
the first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of the
memory layout of the underlying array, and only refer to the order
of axis indexing. 'A' means to read the elements in Fortran-like
index order if `m` is Fortran *contiguous* in memory, C-like order
otherwise. 'K' means to read the elements in the order they occur
in memory, except for reversing the data when strides are negative.
By default, 'C' index order is used.
Returns
-------
MaskedArray
Output view is of shape ``(self.size,)`` (or
``(np.ma.product(self.shape),)``).
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print(x.ravel())
[1 -- 3 -- 5 -- 7 -- 9]
"""
r = ndarray.ravel(self._data, order=order).view(type(self))
r._update_from(self)
if self._mask is not nomask:
r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape)
else:
r._mask = nomask
return r
repeat = _arraymethod('repeat')
def reshape(self, *s, **kwargs):
"""
Give a new shape to the array without changing its data.
Returns a masked array containing the same data, but with a new shape.
The result is a view on the original array; if this is not possible, a
ValueError is raised.
Parameters
----------
shape : int or tuple of ints
The new shape should be compatible with the original shape. If an
integer is supplied, then the result will be a 1-D array of that
length.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) or FORTRAN (column-major) order.
Returns
-------
reshaped_array : array
A new view on the array.
See Also
--------
reshape : Equivalent function in the masked array module.
numpy.ndarray.reshape : Equivalent method on ndarray object.
numpy.reshape : Equivalent function in the NumPy module.
Notes
-----
The reshaping operation cannot guarantee that a copy will not be made,
to modify the shape in place, use ``a.shape = s``
Examples
--------
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
>>> print(x)
[[-- 2]
[3 --]]
>>> x = x.reshape((4,1))
>>> print(x)
[[--]
[2]
[3]
[--]]
"""
kwargs.update(order=kwargs.get('order', 'C'))
result = self._data.reshape(*s, **kwargs).view(type(self))
result._update_from(self)
mask = self._mask
if mask is not nomask:
result._mask = mask.reshape(*s, **kwargs)
return result
def resize(self, newshape, refcheck=True, order=False):
"""
.. warning::
This method does nothing, except raise a ValueError exception. A
masked array does not own its data and therefore cannot safely be
resized in place. Use the `numpy.ma.resize` function instead.
This method is difficult to implement safely and may be deprecated in
future releases of NumPy.
"""
# Note : the 'order' keyword looks broken, let's just drop it
errmsg = "A masked array does not own its data "\
"and therefore cannot be resized.\n" \
"Use the numpy.ma.resize function instead."
raise ValueError(errmsg)
def put(self, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
Sets self._data.flat[n] = values[n] for each n in indices.
If `values` is shorter than `indices` then it will repeat.
If `values` has some masked values, the initial mask is updated
in consequence, else the corresponding values are unmasked.
Parameters
----------
indices : 1-D array_like
Target indices, interpreted as integers.
values : array_like
Values to place in self._data copy at target indices.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
'raise' : raise an error.
'wrap' : wrap around.
'clip' : clip to the range.
Notes
-----
`values` can be a scalar or length 1 array.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.put([0,4,8],[10,20,30])
>>> print(x)
[[10 -- 3]
[-- 20 --]
[7 -- 30]]
>>> x.put(4,999)
>>> print(x)
[[10 -- 3]
[-- 999 --]
[7 -- 30]]
"""
# Hard mask: Get rid of the values/indices that fall on masked data
if self._hardmask and self._mask is not nomask:
mask = self._mask[indices]
indices = narray(indices, copy=False)
values = narray(values, copy=False, subok=True)
values.resize(indices.shape)
indices = indices[~mask]
values = values[~mask]
self._data.put(indices, values, mode=mode)
# short circut if neither self nor values are masked
if self._mask is nomask and getmask(values) is nomask:
return
m = getmaskarray(self).copy()
if getmask(values) is nomask:
m.put(indices, False, mode=mode)
else:
m.put(indices, values._mask, mode=mode)
m = make_mask(m, copy=False, shrink=True)
self._mask = m
return
def ids(self):
"""
Return the addresses of the data and mask areas.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1])
>>> x.ids()
(166670640, 166659832)
If the array has no mask, the address of `nomask` is returned. This address
is typically not close to the data in memory:
>>> x = np.ma.array([1, 2, 3])
>>> x.ids()
(166691080, 3083169284L)
"""
if self._mask is nomask:
return (self.ctypes.data, id(nomask))
return (self.ctypes.data, self._mask.ctypes.data)
def iscontiguous(self):
"""
Return a boolean indicating whether the data is contiguous.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3])
>>> x.iscontiguous()
True
`iscontiguous` returns one of the flags of the masked array:
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : True
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
return self.flags['CONTIGUOUS']
def all(self, axis=None, out=None):
"""
Check if all of the elements of `a` are true.
Performs a :func:`logical_and` over the given axis and returns the result.
Masked values are considered as True during computation.
For convenience, the output array is masked where ALL the values along the
current axis are masked: if the output would have been a scalar and that
all the values are masked, then the output is `masked`.
Parameters
----------
axis : {None, integer}
Axis to perform the operation over.
If None, perform over flattened array.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
See Also
--------
all : equivalent function
Examples
--------
>>> np.ma.array([1,2,3]).all()
True
>>> a = np.ma.array([1,2,3], mask=True)
>>> (a.all() is np.ma.masked)
True
"""
mask = _check_mask_axis(self._mask, axis)
if out is None:
d = self.filled(True).all(axis=axis).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
return masked
return d
self.filled(True).all(axis=axis, out=out)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def any(self, axis=None, out=None):
"""
Check if any of the elements of `a` are true.
Performs a logical_or over the given axis and returns the result.
Masked values are considered as False during computation.
Parameters
----------
axis : {None, integer}
Axis to perform the operation over.
If None, perform over flattened array and return a scalar.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
See Also
--------
any : equivalent function
"""
mask = _check_mask_axis(self._mask, axis)
if out is None:
d = self.filled(False).any(axis=axis).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
d = masked
return d
self.filled(False).any(axis=axis, out=out)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def nonzero(self):
"""
Return the indices of unmasked elements that are not zero.
Returns a tuple of arrays, one for each dimension, containing the
indices of the non-zero elements in that dimension. The corresponding
non-zero values can be obtained with::
a[a.nonzero()]
To group the indices by element, rather than dimension, use
instead::
np.transpose(a.nonzero())
The result of this is always a 2d array, with a row for each non-zero
element.
Parameters
----------
None
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
numpy.nonzero :
Function operating on ndarrays.
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array(np.eye(3))
>>> x
masked_array(data =
[[ 1. 0. 0.]
[ 0. 1. 0.]
[ 0. 0. 1.]],
mask =
False,
fill_value=1e+20)
>>> x.nonzero()
(array([0, 1, 2]), array([0, 1, 2]))
Masked elements are ignored.
>>> x[1, 1] = ma.masked
>>> x
masked_array(data =
[[1.0 0.0 0.0]
[0.0 -- 0.0]
[0.0 0.0 1.0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=1e+20)
>>> x.nonzero()
(array([0, 2]), array([0, 2]))
Indices can also be grouped by element.
>>> np.transpose(x.nonzero())
array([[0, 0],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, ma.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
masked_array(data =
[[False False False]
[ True True True]
[ True True True]],
mask =
False,
fill_value=999999)
>>> ma.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the condition array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return narray(self.filled(0), copy=False).nonzero()
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
(this docstring should be overwritten)
"""
#!!!: implement out + test!
m = self._mask
if m is nomask:
result = super(MaskedArray, self).trace(offset=offset, axis1=axis1,
axis2=axis2, out=out)
return result.astype(dtype)
else:
D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return D.astype(dtype).filled(0).sum(axis=None, out=out)
trace.__doc__ = ndarray.trace.__doc__
def dot(self, b, out=None, strict=False):
"""
a.dot(b, out=None)
Masked dot product of two arrays. Note that `out` and `strict` are
located in different positions than in `ma.dot`. In order to
maintain compatibility with the functional version, it is
recommended that the optional arguments be treated as keyword only.
At some point that may be mandatory.
.. versionadded:: 1.10.0
Parameters
----------
b : masked_array_like
Inputs array.
out : masked_array, optional
Output argument. This must have the exact kind that would be
returned if it was not used. In particular, it must have the
right type, must be C-contiguous, and its dtype must be the
dtype that would be returned for `ma.dot(a,b)`. This is a
performance feature. Therefore, if these conditions are not
met, an exception is raised, instead of attempting to be
flexible.
strict : bool, optional
Whether masked data are propagated (True) or set to 0 (False)
for the computation. Default is False. Propagating the mask
means that if a masked value appears in a row or column, the
whole row or column is considered masked.
.. versionadded:: 1.10.2
See Also
--------
numpy.ma.dot : equivalent function
"""
return dot(self, b, out=out, strict=strict)
def sum(self, axis=None, dtype=None, out=None):
"""
Return the sum of the array elements over the given axis.
Masked elements are set to 0 internally.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the sum is computed. The default
(`axis` = None) is to compute over the flattened array.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and
the type of a is an integer type of precision less than the default
platform integer, then the default platform integer precision is
used. Otherwise, the dtype is the same as that of a.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
sum_along_axis : MaskedArray or scalar
An array with the same shape as self, with the specified
axis removed. If self is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print(x.sum())
25
>>> print(x.sum(axis=1))
[4 5 16]
>>> print(x.sum(axis=0))
[8 5 12]
>>> print(type(x.sum(axis=0, dtype=np.int64)[0]))
<type 'numpy.int64'>
"""
_mask = self._mask
newmask = _check_mask_axis(_mask, axis)
# No explicit output
if out is None:
result = self.filled(0).sum(axis, dtype=dtype)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(0).sum(axis, dtype=dtype, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
def cumsum(self, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along the given axis.
The cumulative sum is calculated over the flattened array by
default, otherwise over the specified axis.
Masked values are set to 0 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the sum is computed. The default (`axis` = None) is to
compute over the flattened array. `axis` may be negative, in which case
it counts from the last to the first axis.
dtype : {None, dtype}, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
cumsum : ndarray.
A new array holding the result is returned unless ``out`` is
specified, in which case a reference to ``out`` is returned.
Notes
-----
The mask is lost if `out` is not a valid :class:`MaskedArray` !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
>>> print(marr.cumsum())
[0 1 3 -- -- -- 9 16 24 33]
"""
result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self.mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def prod(self, axis=None, dtype=None, out=None):
"""
Return the product of the array elements over the given axis.
Masked elements are set to 1 internally for computation.
Parameters
----------
axis : {None, int}, optional
Axis over which the product is taken. If None is used, then the
product is over all the array elements.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are multiplied. If ``dtype`` has the value ``None``
and the type of a is an integer type of precision less than the default
platform integer, then the default platform integer precision is
used. Otherwise, the dtype is the same as that of a.
out : {None, array}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
product_along_axis : {array, scalar}, see dtype parameter above.
Returns an array whose shape is the same as a with the specified
axis removed. Returns a 0d array when a is 1d or axis=None.
Returns a reference to the specified output array if specified.
See Also
--------
prod : equivalent function
Notes
-----
Arithmetic is modular when using integer types, and no error is raised
on overflow.
Examples
--------
>>> np.prod([1.,2.])
2.0
>>> np.prod([1.,2.], dtype=np.int32)
2
>>> np.prod([[1.,2.],[3.,4.]])
24.0
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
"""
_mask = self._mask
newmask = _check_mask_axis(_mask, axis)
# No explicit output
if out is None:
result = self.filled(1).prod(axis, dtype=dtype)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(1).prod(axis, dtype=dtype, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
product = prod
def cumprod(self, axis=None, dtype=None, out=None):
"""
Return the cumulative product of the elements along the given axis.
The cumulative product is taken over the flattened array by
default, otherwise over the specified axis.
Masked values are set to 1 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the product is computed. The default
(`axis` = None) is to compute over the flattened array.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are multiplied. If ``dtype`` has the value ``None``
and the type of ``a`` is an integer type of precision less than the
default platform integer, then the default platform integer precision
is used. Otherwise, the dtype is the same as that of ``a``.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
Notes
-----
The mask is lost if `out` is not a valid MaskedArray !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
"""
result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def mean(self, axis=None, dtype=None, out=None):
"""
Returns the average of the array elements.
Masked entries are ignored.
The average is taken over the flattened array by default, otherwise over
the specified axis. Refer to `numpy.mean` for the full documentation.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : dtype, optional
Type to use in computing the mean. For integer inputs, the default
is float64; for floating point, inputs it is the same as the input
dtype.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
mean : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
numpy.ma.mean : Equivalent function.
numpy.mean : Equivalent function on non-masked arrays.
numpy.ma.average: Weighted average.
Examples
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data = [1 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.mean()
1.5
"""
if self._mask is nomask:
result = super(MaskedArray, self).mean(axis=axis, dtype=dtype)
else:
dsum = self.sum(axis=axis, dtype=dtype)
cnt = self.count(axis=axis)
if cnt.shape == () and (cnt == 0):
result = masked
else:
result = dsum * 1. / cnt
if out is not None:
out.flat = result
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getattr(result, '_mask', nomask)
return out
return result
def anom(self, axis=None, dtype=None):
"""
Compute the anomalies (deviations from the arithmetic mean)
along the given axis.
Returns an array of anomalies, with the same shape as the input and
where the arithmetic mean is computed along the given axis.
Parameters
----------
axis : int, optional
Axis over which the anomalies are taken.
The default is to use the mean of the flattened array as reference.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
See Also
--------
mean : Compute the mean of the array.
Examples
--------
>>> a = np.ma.array([1,2,3])
>>> a.anom()
masked_array(data = [-1. 0. 1.],
mask = False,
fill_value = 1e+20)
"""
m = self.mean(axis, dtype)
if m is masked:
return m
if not axis:
return (self - m)
else:
return (self - expand_dims(m, axis))
def var(self, axis=None, dtype=None, out=None, ddof=0):
""
# Easy case: nomask, business as usual
if self._mask is nomask:
return self._data.var(axis=axis, dtype=dtype, out=out, ddof=ddof)
# Some data are masked, yay!
cnt = self.count(axis=axis) - ddof
danom = self.anom(axis=axis, dtype=dtype)
if iscomplexobj(self):
danom = umath.absolute(danom) ** 2
else:
danom *= danom
dvar = divide(danom.sum(axis), cnt).view(type(self))
# Apply the mask if it's not a scalar
if dvar.ndim:
dvar._mask = mask_or(self._mask.all(axis), (cnt <= 0))
dvar._update_from(self)
elif getattr(dvar, '_mask', False):
# Make sure that masked is returned when the scalar is masked.
dvar = masked
if out is not None:
if isinstance(out, MaskedArray):
out.flat = 0
out.__setmask__(True)
elif out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or "\
"more location."
raise MaskError(errmsg)
else:
out.flat = np.nan
return out
# In case with have an explicit output
if out is not None:
# Set the data
out.flat = dvar
# Set the mask if needed
if isinstance(out, MaskedArray):
out.__setmask__(dvar.mask)
return out
return dvar
var.__doc__ = np.var.__doc__
def std(self, axis=None, dtype=None, out=None, ddof=0):
""
dvar = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof)
if dvar is not masked:
if out is not None:
np.power(out, 0.5, out=out, casting='unsafe')
return out
dvar = sqrt(dvar)
return dvar
std.__doc__ = np.std.__doc__
def round(self, decimals=0, out=None):
"""
Return an array rounded a to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
if result.ndim > 0:
result._mask = self._mask
result._update_from(self)
elif self._mask:
# Return masked when the scalar is masked
result = masked
# No explicit output: we're done
if out is None:
return result
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
round.__doc__ = ndarray.round.__doc__
def argsort(self, axis=None, kind='quicksort', order=None, fill_value=None):
"""
Return an ndarray of indices that sort the array along the
specified axis. Masked values are filled beforehand to
`fill_value`.
Parameters
----------
axis : int, optional
Axis along which to sort. The default is -1 (last axis).
If None, the flattened array is used.
fill_value : var, optional
Value used to fill the array before sorting.
The default is the `fill_value` attribute of the input array.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.ma.array([3,2,1], mask=[False, False, True])
>>> a
masked_array(data = [3 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.argsort()
array([1, 0, 2])
"""
if fill_value is None:
fill_value = default_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argsort(axis=axis, kind=kind, order=order)
def argmin(self, axis=None, fill_value=None, out=None):
"""
Return array of indices to the minimum values along the given axis.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
minimum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
ndarray or scalar
If multi-dimension input, returns a new ndarray of indices to the
minimum values along the given axis. Otherwise, returns a scalar
of index to the minimum values along the given axis.
Examples
--------
>>> x = np.ma.array(arange(4), mask=[1,1,0,0])
>>> x.shape = (2,2)
>>> print(x)
[[-- --]
[2 3]]
>>> print(x.argmin(axis=0, fill_value=-1))
[0 0]
>>> print(x.argmin(axis=0, fill_value=9))
[1 1]
"""
if fill_value is None:
fill_value = minimum_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argmin(axis, out=out)
def argmax(self, axis=None, fill_value=None, out=None):
"""
Returns array of indices of the maximum values along the given axis.
Masked values are treated as if they had the value fill_value.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
maximum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
index_array : {integer_array}
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a.argmax()
5
>>> a.argmax(0)
array([1, 1, 1])
>>> a.argmax(1)
array([2, 2])
"""
if fill_value is None:
fill_value = maximum_fill_value(self._data)
d = self.filled(fill_value).view(ndarray)
return d.argmax(axis, out=out)
def sort(self, axis=-1, kind='quicksort', order=None,
endwith=True, fill_value=None):
"""
Sort the array, in-place
Parameters
----------
a : array_like
Array to be sorted.
axis : int, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
endwith : {True, False}, optional
Whether missing values (if any) should be forced in the upper indices
(at the end of the array) (True) or lower indices (at the beginning).
When the array contains unmasked values of the largest (or smallest if
False) representable value of the datatype the ordering of these values
and the masked values is undefined. To enforce the masked values are
at the end (beginning) in this case one must sort the mask.
fill_value : {var}, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Default
>>> a.sort()
>>> print(a)
[1 3 5 -- --]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Put missing values in the front
>>> a.sort(endwith=False)
>>> print(a)
[-- -- 1 3 5]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # fill_value takes over endwith
>>> a.sort(endwith=False, fill_value=3)
>>> print(a)
[1 -- -- 3 5]
"""
if self._mask is nomask:
ndarray.sort(self, axis=axis, kind=kind, order=order)
else:
if self is masked:
return self
if fill_value is None:
if endwith:
# nan > inf
if np.issubdtype(self.dtype, np.floating):
filler = np.nan
else:
filler = minimum_fill_value(self)
else:
filler = maximum_fill_value(self)
else:
filler = fill_value
sidx = self.filled(filler).argsort(axis=axis, kind=kind,
order=order)
# save meshgrid memory for 1d arrays
if self.ndim == 1:
idx = sidx
else:
idx = np.meshgrid(*[np.arange(x) for x in self.shape], sparse=True,
indexing='ij')
idx[axis] = sidx
tmp_mask = self._mask[idx].flat
tmp_data = self._data[idx].flat
self._data.flat = tmp_data
self._mask.flat = tmp_mask
return
def min(self, axis=None, out=None, fill_value=None):
"""
Return the minimum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must be of
the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of `minimum_fill_value`.
Returns
-------
amin : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
minimum_fill_value
Returns the minimum filling value for a given datatype.
"""
_mask = self._mask
newmask = _check_mask_axis(_mask, axis)
if fill_value is None:
fill_value = minimum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).min(
axis=axis, out=out).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).min(axis=axis, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
def mini(self, axis=None):
"""
Return the array minimum along the specified axis.
Parameters
----------
axis : int, optional
The axis along which to find the minima. Default is None, in which case
the minimum value in the whole array is returned.
Returns
-------
min : scalar or MaskedArray
If `axis` is None, the result is a scalar. Otherwise, if `axis` is
given and the array is at least 2-D, the result is a masked array with
dimension one smaller than the array on which `mini` is called.
Examples
--------
>>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2)
>>> print(x)
[[0 --]
[2 3]
[4 --]]
>>> x.mini()
0
>>> x.mini(axis=0)
masked_array(data = [0 3],
mask = [False False],
fill_value = 999999)
>>> print(x.mini(axis=1))
[0 2 4]
"""
if axis is None:
return minimum(self)
else:
return minimum.reduce(self, axis)
def max(self, axis=None, out=None, fill_value=None):
"""
Return the maximum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of maximum_fill_value().
Returns
-------
amax : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
maximum_fill_value
Returns the maximum filling value for a given datatype.
"""
_mask = self._mask
newmask = _check_mask_axis(_mask, axis)
if fill_value is None:
fill_value = maximum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).max(
axis=axis, out=out).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).max(axis=axis, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
def ptp(self, axis=None, out=None, fill_value=None):
"""
Return (maximum - minimum) along the the given dimension
(i.e. peak-to-peak value).
Parameters
----------
axis : {None, int}, optional
Axis along which to find the peaks. If None (default) the
flattened array is used.
out : {None, array_like}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
fill_value : {var}, optional
Value used to fill in the masked values.
Returns
-------
ptp : ndarray.
A new array holding the result, unless ``out`` was
specified, in which case a reference to ``out`` is returned.
"""
if out is None:
result = self.max(axis=axis, fill_value=fill_value)
result -= self.min(axis=axis, fill_value=fill_value)
return result
out.flat = self.max(axis=axis, out=out, fill_value=fill_value)
min_value = self.min(axis=axis, fill_value=fill_value)
np.subtract(out, min_value, out=out, casting='unsafe')
return out
def take(self, indices, axis=None, out=None, mode='raise'):
"""
"""
(_data, _mask) = (self._data, self._mask)
cls = type(self)
# Make sure the indices are not masked
maskindices = getattr(indices, '_mask', nomask)
if maskindices is not nomask:
indices = indices.filled(0)
# Get the data
if out is None:
out = _data.take(indices, axis=axis, mode=mode).view(cls)
else:
np.take(_data, indices, axis=axis, mode=mode, out=out)
# Get the mask
if isinstance(out, MaskedArray):
if _mask is nomask:
outmask = maskindices
else:
outmask = _mask.take(indices, axis=axis, mode=mode)
outmask |= maskindices
out.__setmask__(outmask)
return out
# Array methods
copy = _arraymethod('copy')
diagonal = _arraymethod('diagonal')
transpose = _arraymethod('transpose')
T = property(fget=lambda self: self.transpose())
swapaxes = _arraymethod('swapaxes')
clip = _arraymethod('clip', onmask=False)
copy = _arraymethod('copy')
squeeze = _arraymethod('squeeze')
def tolist(self, fill_value=None):
"""
Return the data portion of the masked array as a hierarchical Python list.
Data items are converted to the nearest compatible Python type.
Masked values are converted to `fill_value`. If `fill_value` is None,
the corresponding entries in the output list will be ``None``.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries. Default is None.
Returns
-------
result : list
The Python list representation of the masked array.
Examples
--------
>>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4)
>>> x.tolist()
[[1, None, 3], [None, 5, None], [7, None, 9]]
>>> x.tolist(-999)
[[1, -999, 3], [-999, 5, -999], [7, -999, 9]]
"""
_mask = self._mask
# No mask ? Just return .data.tolist ?
if _mask is nomask:
return self._data.tolist()
# Explicit fill_value: fill the array and get the list
if fill_value is not None:
return self.filled(fill_value).tolist()
# Structured array.
names = self.dtype.names
if names:
result = self._data.astype([(_, object) for _ in names])
for n in names:
result[n][_mask[n]] = None
return result.tolist()
# Standard arrays.
if _mask is nomask:
return [None]
# Set temps to save time when dealing w/ marrays.
inishape = self.shape
result = np.array(self._data.ravel(), dtype=object)
result[_mask.ravel()] = None
result.shape = inishape
return result.tolist()
def tostring(self, fill_value=None, order='C'):
"""
This function is a compatibility alias for tobytes. Despite its name it
returns bytes not strings.
"""
return self.tobytes(fill_value, order='C')
def tobytes(self, fill_value=None, order='C'):
"""
Return the array data as a string containing the raw bytes in the array.
The array is filled with a fill value before the string conversion.
.. versionadded:: 1.9.0
Parameters
----------
fill_value : scalar, optional
Value used to fill in the masked values. Deafult is None, in which
case `MaskedArray.fill_value` is used.
order : {'C','F','A'}, optional
Order of the data item in the copy. Default is 'C'.
- 'C' -- C order (row major).
- 'F' -- Fortran order (column major).
- 'A' -- Any, current order of array.
- None -- Same as 'A'.
See Also
--------
ndarray.tobytes
tolist, tofile
Notes
-----
As for `ndarray.tobytes`, information about the shape, dtype, etc.,
but also about `fill_value`, will be lost.
Examples
--------
>>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.tobytes()
'\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00'
"""
return self.filled(fill_value).tobytes(order=order)
def tofile(self, fid, sep="", format="%s"):
"""
Save a masked array to a file in binary format.
.. warning::
This function is not implemented yet.
Raises
------
NotImplementedError
When `tofile` is called.
"""
raise NotImplementedError("MaskedArray.tofile() not implemented yet.")
def toflex(self):
"""
Transforms a masked array into a flexible-type array.
The flexible type array that is returned will have two fields:
* the ``_data`` field stores the ``_data`` part of the array.
* the ``_mask`` field stores the ``_mask`` part of the array.
Parameters
----------
None
Returns
-------
record : ndarray
A new flexible-type `ndarray` with two fields: the first element
containing a value, the second element containing the corresponding
mask boolean. The returned record shape matches self.shape.
Notes
-----
A side-effect of transforming a masked array into a flexible `ndarray` is
that meta information (``fill_value``, ...) will be lost.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print(x)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print(x.toflex())
[[(1, False) (2, True) (3, False)]
[(4, True) (5, False) (6, True)]
[(7, False) (8, True) (9, False)]]
"""
# Get the basic dtype.
ddtype = self.dtype
# Make sure we have a mask
_mask = self._mask
if _mask is None:
_mask = make_mask_none(self.shape, ddtype)
# And get its dtype
mdtype = self._mask.dtype
record = np.ndarray(shape=self.shape,
dtype=[('_data', ddtype), ('_mask', mdtype)])
record['_data'] = self._data
record['_mask'] = self._mask
return record
torecords = toflex
# Pickling
def __getstate__(self):
"""Return the internal state of the masked array, for pickling
purposes.
"""
cf = 'CF'[self.flags.fnc]
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
self._data.tobytes(cf),
# self._data.tolist(),
getmaskarray(self).tobytes(cf),
# getmaskarray(self).tolist(),
self._fill_value,
)
return state
def __setstate__(self, state):
"""Restore the internal state of the masked array, for
pickling purposes. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(_, shp, typ, isf, raw, msk, flv) = state
super(MaskedArray, self).__setstate__((shp, typ, isf, raw))
self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk))
self.fill_value = flv
def __reduce__(self):
"""Return a 3-tuple for pickling a MaskedArray.
"""
return (_mareconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
def __deepcopy__(self, memo=None):
from copy import deepcopy
copied = MaskedArray.__new__(type(self), self, copy=True)
if memo is None:
memo = {}
memo[id(self)] = copied
for (k, v) in self.__dict__.items():
copied.__dict__[k] = deepcopy(v, memo)
return copied
def _mareconstruct(subtype, baseclass, baseshape, basetype,):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype)
_mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype))
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
class mvoid(MaskedArray):
"""
Fake a 'void' object to use for masked array with structured dtypes.
"""
def __new__(self, data, mask=nomask, dtype=None, fill_value=None,
hardmask=False, copy=False, subok=True):
_data = np.array(data, copy=copy, subok=subok, dtype=dtype)
_data = _data.view(self)
_data._hardmask = hardmask
if mask is not nomask:
if isinstance(mask, np.void):
_data._mask = mask
else:
try:
# Mask is already a 0D array
_data._mask = np.void(mask)
except TypeError:
# Transform the mask to a void
mdtype = make_mask_descr(dtype)
_data._mask = np.array(mask, dtype=mdtype)[()]
if fill_value is not None:
_data.fill_value = fill_value
return _data
def _get_data(self):
# Make sure that the _data part is a np.void
return self.view(ndarray)[()]
_data = property(fget=_get_data)
def __getitem__(self, indx):
"""
Get the index.
"""
m = self._mask
if isinstance(m[indx], ndarray):
# Can happen when indx is a multi-dimensional field:
# A = ma.masked_array(data=[([0,1],)], mask=[([True,
# False],)], dtype=[("A", ">i2", (2,))])
# x = A[0]; y = x["A"]; then y.mask["A"].size==2
# and we can not say masked/unmasked.
# The result is no longer mvoid!
# See also issue #6724.
return masked_array(
data=self._data[indx], mask=m[indx],
fill_value=self._fill_value[indx],
hard_mask=self._hardmask)
if m is not nomask and m[indx]:
return masked
return self._data[indx]
def __setitem__(self, indx, value):
self._data[indx] = value
if self._hardmask:
self._mask[indx] |= getattr(value, "_mask", False)
else:
self._mask[indx] = getattr(value, "_mask", False)
def __str__(self):
m = self._mask
if m is nomask:
return self._data.__str__()
printopt = masked_print_option
rdtype = _recursive_make_descr(self._data.dtype, "O")
res = np.array([self._data]).astype(rdtype)
_recursive_printoption(res, self._mask, printopt)
return str(res[0])
__repr__ = __str__
def __iter__(self):
"Defines an iterator for mvoid"
(_data, _mask) = (self._data, self._mask)
if _mask is nomask:
for d in _data:
yield d
else:
for (d, m) in zip(_data, _mask):
if m:
yield masked
else:
yield d
def __len__(self):
return self._data.__len__()
def filled(self, fill_value=None):
"""
Return a copy with masked fields filled with a given value.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute is used instead.
Returns
-------
filled_void
A `np.void` object
See Also
--------
MaskedArray.filled
"""
return asarray(self).filled(fill_value)[()]
def tolist(self):
"""
Transforms the mvoid object into a tuple.
Masked fields are replaced by None.
Returns
-------
returned_tuple
Tuple of fields
"""
_mask = self._mask
if _mask is nomask:
return self._data.tolist()
result = []
for (d, m) in zip(self._data, self._mask):
if m:
result.append(None)
else:
# .item() makes sure we return a standard Python object
result.append(d.item())
return tuple(result)
##############################################################################
# Shortcuts #
##############################################################################
def isMaskedArray(x):
"""
Test whether input is an instance of MaskedArray.
This function returns True if `x` is an instance of MaskedArray
and returns False otherwise. Any object is accepted as input.
Parameters
----------
x : object
Object to test.
Returns
-------
result : bool
True if `x` is a MaskedArray.
See Also
--------
isMA : Alias to isMaskedArray.
isarray : Alias to isMaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.eye(3, 3)
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> m = ma.masked_values(a, 0)
>>> m
masked_array(data =
[[1.0 -- --]
[-- 1.0 --]
[-- -- 1.0]],
mask =
[[False True True]
[ True False True]
[ True True False]],
fill_value=0.0)
>>> ma.isMaskedArray(a)
False
>>> ma.isMaskedArray(m)
True
>>> ma.isMaskedArray([0, 1, 2])
False
"""
return isinstance(x, MaskedArray)
isarray = isMaskedArray
isMA = isMaskedArray # backward compatibility
class MaskedConstant(MaskedArray):
# We define the masked singleton as a float for higher precedence.
# Note that it can be tricky sometimes w/ type comparison
_data = data = np.array(0.)
_mask = mask = np.array(True)
_baseclass = ndarray
def __new__(self):
return self._data.view(self)
def __array_finalize__(self, obj):
return
def __array_wrap__(self, obj):
return self
def __str__(self):
return str(masked_print_option._display)
def __repr__(self):
return 'masked'
def flatten(self):
return masked_array([self._data], dtype=float, mask=[True])
def __reduce__(self):
"""Override of MaskedArray's __reduce__.
"""
return (self.__class__, ())
masked = masked_singleton = MaskedConstant()
masked_array = MaskedArray
def array(data, dtype=None, copy=False, order=None,
mask=nomask, fill_value=None, keep_mask=True,
hard_mask=False, shrink=True, subok=True, ndmin=0):
"""
Shortcut to MaskedArray.
The options are in a different order for convenience and backwards
compatibility.
"""
return MaskedArray(data, mask=mask, dtype=dtype, copy=copy,
subok=subok, keep_mask=keep_mask,
hard_mask=hard_mask, fill_value=fill_value,
ndmin=ndmin, shrink=shrink, order=order)
array.__doc__ = masked_array.__doc__
def is_masked(x):
"""
Determine whether input has masked values.
Accepts any object as input, but always returns False unless the
input is a MaskedArray containing masked values.
Parameters
----------
x : array_like
Array to check for masked values.
Returns
-------
result : bool
True if `x` is a MaskedArray with masked values, False otherwise.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> x
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_masked(x)
True
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 42)
>>> x
masked_array(data = [0 1 0 2 3],
mask = False,
fill_value=999999)
>>> ma.is_masked(x)
False
Always returns False if `x` isn't a MaskedArray.
>>> x = [False, True, False]
>>> ma.is_masked(x)
False
>>> x = 'a string'
>>> ma.is_masked(x)
False
"""
m = getmask(x)
if m is nomask:
return False
elif m.any():
return True
return False
##############################################################################
# Extrema functions #
##############################################################################
class _extrema_operation(object):
"""
Generic class for maximum/minimum functions.
.. note::
This is the base class for `_maximum_operation` and
`_minimum_operation`.
"""
def __call__(self, a, b=None):
"Executes the call behavior."
if b is None:
return self.reduce(a)
return where(self.compare(a, b), a, b)
def reduce(self, target, axis=None):
"Reduce target along the given axis."
target = narray(target, copy=False, subok=True)
m = getmask(target)
if axis is not None:
kargs = {'axis': axis}
else:
kargs = {}
target = target.ravel()
if not (m is nomask):
m = m.ravel()
if m is nomask:
t = self.ufunc.reduce(target, **kargs)
else:
target = target.filled(
self.fill_value_func(target)).view(type(target))
t = self.ufunc.reduce(target, **kargs)
m = umath.logical_and.reduce(m, **kargs)
if hasattr(t, '_mask'):
t._mask = m
elif m:
t = masked
return t
def outer(self, a, b):
"Return the function applied to the outer product of a and b."
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
result = self.ufunc.outer(filled(a), filled(b))
if not isinstance(result, MaskedArray):
result = result.view(MaskedArray)
result._mask = m
return result
class _minimum_operation(_extrema_operation):
"Object to calculate minima"
def __init__(self):
"""minimum(a, b) or minimum(a)
In one argument case, returns the scalar minimum.
"""
self.ufunc = umath.minimum
self.afunc = amin
self.compare = less
self.fill_value_func = minimum_fill_value
class _maximum_operation(_extrema_operation):
"Object to calculate maxima"
def __init__(self):
"""maximum(a, b) or maximum(a)
In one argument case returns the scalar maximum.
"""
self.ufunc = umath.maximum
self.afunc = amax
self.compare = greater
self.fill_value_func = maximum_fill_value
def min(obj, axis=None, out=None, fill_value=None):
try:
return obj.min(axis=axis, fill_value=fill_value, out=out)
except (AttributeError, TypeError):
# If obj doesn't have a min method or if the method doesn't accept
# a fill_value argument
return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out)
min.__doc__ = MaskedArray.min.__doc__
def max(obj, axis=None, out=None, fill_value=None):
try:
return obj.max(axis=axis, fill_value=fill_value, out=out)
except (AttributeError, TypeError):
# If obj doesn't have a max method, or if the method doesn't accept
# a fill_value argument
return asanyarray(obj).max(axis=axis, fill_value=fill_value, out=out)
max.__doc__ = MaskedArray.max.__doc__
def ptp(obj, axis=None, out=None, fill_value=None):
"""
a.ptp(axis=None) = a.max(axis) - a.min(axis)
"""
try:
return obj.ptp(axis, out=out, fill_value=fill_value)
except (AttributeError, TypeError):
# If obj doesn't have a ptp method or if the method doesn't accept
# a fill_value argument
return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out)
ptp.__doc__ = MaskedArray.ptp.__doc__
##############################################################################
# Definition of functions from the corresponding methods #
##############################################################################
class _frommethod:
"""
Define functions from existing MaskedArray methods.
Parameters
----------
methodname : str
Name of the method to transform.
"""
def __init__(self, methodname, reversed=False):
self.__name__ = methodname
self.__doc__ = self.getdoc()
self.reversed = reversed
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
meth = getattr(MaskedArray, self.__name__, None) or\
getattr(np, self.__name__, None)
signature = self.__name__ + get_object_signature(meth)
if meth is not None:
doc = """ %s\n%s""" % (
signature, getattr(meth, '__doc__', None))
return doc
def __call__(self, a, *args, **params):
if self.reversed:
args = list(args)
arr = args[0]
args[0] = a
a = arr
# Get the method from the array (if possible)
method_name = self.__name__
method = getattr(a, method_name, None)
if method is not None:
return method(*args, **params)
# Still here ? Then a is not a MaskedArray
method = getattr(MaskedArray, method_name, None)
if method is not None:
return method(MaskedArray(a), *args, **params)
# Still here ? OK, let's call the corresponding np function
method = getattr(np, method_name)
return method(a, *args, **params)
all = _frommethod('all')
anomalies = anom = _frommethod('anom')
any = _frommethod('any')
compress = _frommethod('compress', reversed=True)
cumprod = _frommethod('cumprod')
cumsum = _frommethod('cumsum')
copy = _frommethod('copy')
diagonal = _frommethod('diagonal')
harden_mask = _frommethod('harden_mask')
ids = _frommethod('ids')
maximum = _maximum_operation()
mean = _frommethod('mean')
minimum = _minimum_operation()
nonzero = _frommethod('nonzero')
prod = _frommethod('prod')
product = _frommethod('prod')
ravel = _frommethod('ravel')
repeat = _frommethod('repeat')
shrink_mask = _frommethod('shrink_mask')
soften_mask = _frommethod('soften_mask')
std = _frommethod('std')
sum = _frommethod('sum')
swapaxes = _frommethod('swapaxes')
#take = _frommethod('take')
trace = _frommethod('trace')
var = _frommethod('var')
def take(a, indices, axis=None, out=None, mode='raise'):
"""
"""
a = masked_array(a)
return a.take(indices, axis=axis, out=out, mode=mode)
def power(a, b, third=None):
"""
Returns element-wise base array raised to power from second array.
This is the masked array version of `numpy.power`. For details see
`numpy.power`.
See Also
--------
numpy.power
Notes
-----
The *out* argument to `numpy.power` is not supported, `third` has to be
None.
"""
if third is not None:
raise MaskError("3-argument power not supported.")
# Get the masks
ma = getmask(a)
mb = getmask(b)
m = mask_or(ma, mb)
# Get the rawdata
fa = getdata(a)
fb = getdata(b)
# Get the type of the result (so that we preserve subclasses)
if isinstance(a, MaskedArray):
basetype = type(a)
else:
basetype = MaskedArray
# Get the result and view it as a (subclass of) MaskedArray
with np.errstate(divide='ignore', invalid='ignore'):
result = np.where(m, fa, umath.power(fa, fb)).view(basetype)
result._update_from(a)
# Find where we're in trouble w/ NaNs and Infs
invalid = np.logical_not(np.isfinite(result.view(ndarray)))
# Add the initial mask
if m is not nomask:
if not (result.ndim):
return masked
result._mask = np.logical_or(m, invalid)
# Fix the invalid parts
if invalid.any():
if not result.ndim:
return masked
elif result._mask is nomask:
result._mask = invalid
result._data[invalid] = result.fill_value
return result
def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
d = filled(a, fill_value)
if axis is None:
return d.argsort(kind=kind, order=order)
return d.argsort(axis, kind=kind, order=order)
argsort.__doc__ = MaskedArray.argsort.__doc__
def argmin(a, axis=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
d = filled(a, fill_value)
return d.argmin(axis=axis)
argmin.__doc__ = MaskedArray.argmin.__doc__
def argmax(a, axis=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
try:
fill_value = -fill_value
except:
pass
d = filled(a, fill_value)
return d.argmax(axis=axis)
argmax.__doc__ = MaskedArray.argmax.__doc__
def sort(a, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = narray(a, copy=True, subok=True)
if axis is None:
a = a.flatten()
axis = 0
if fill_value is None:
if endwith:
# nan > inf
if np.issubdtype(a.dtype, np.floating):
filler = np.nan
else:
filler = minimum_fill_value(a)
else:
filler = maximum_fill_value(a)
else:
filler = fill_value
sindx = filled(a, filler).argsort(axis=axis, kind=kind, order=order)
# save meshgrid memory for 1d arrays
if a.ndim == 1:
indx = sindx
else:
indx = np.meshgrid(*[np.arange(x) for x in a.shape], sparse=True,
indexing='ij')
indx[axis] = sindx
return a[indx]
sort.__doc__ = MaskedArray.sort.__doc__
def compressed(x):
"""
Return all the non-masked data as a 1-D array.
This function is equivalent to calling the "compressed" method of a
`MaskedArray`, see `MaskedArray.compressed` for details.
See Also
--------
MaskedArray.compressed
Equivalent method.
"""
if not isinstance(x, MaskedArray):
x = asanyarray(x)
return x.compressed()
def concatenate(arrays, axis=0):
"""
Concatenate a sequence of arrays along the given axis.
Parameters
----------
arrays : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
result : MaskedArray
The concatenated array with any masked entries preserved.
See Also
--------
numpy.concatenate : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(3)
>>> a[1] = ma.masked
>>> b = ma.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
masked_array(data = [2 3 4],
mask = False,
fill_value = 999999)
>>> ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
"""
d = np.concatenate([getdata(a) for a in arrays], axis)
rcls = get_masked_subclass(*arrays)
data = d.view(rcls)
# Check whether one of the arrays has a non-empty mask.
for x in arrays:
if getmask(x) is not nomask:
break
else:
return data
# OK, so we have to concatenate the masks
dm = np.concatenate([getmaskarray(a) for a in arrays], axis)
# If we decide to keep a '_shrinkmask' option, we want to check that
# all of them are True, and then check for dm.any()
if not dm.dtype.fields and not dm.any():
data._mask = nomask
else:
data._mask = dm.reshape(d.shape)
return data
def count(a, axis=None):
if isinstance(a, MaskedArray):
return a.count(axis)
return masked_array(a, copy=False).count(axis)
count.__doc__ = MaskedArray.count.__doc__
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
This function is the equivalent of `numpy.diag` that takes masked
values into account, see `numpy.diag` for details.
See Also
--------
numpy.diag : Equivalent function for ndarrays.
"""
output = np.diag(v, k).view(MaskedArray)
if getmask(v) is not nomask:
output._mask = np.diag(v._mask, k)
return output
def expand_dims(x, axis):
"""
Expand the shape of an array.
Expands the shape of the array by including a new axis before the one
specified by the `axis` parameter. This function behaves the same as
`numpy.expand_dims` but preserves masked elements.
See Also
--------
numpy.expand_dims : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array([1, 2, 4])
>>> x[1] = ma.masked
>>> x
masked_array(data = [1 -- 4],
mask = [False True False],
fill_value = 999999)
>>> np.expand_dims(x, axis=0)
array([[1, 2, 4]])
>>> ma.expand_dims(x, axis=0)
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
The same result can be achieved using slicing syntax with `np.newaxis`.
>>> x[np.newaxis, :]
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
"""
result = n_expand_dims(x, axis)
if isinstance(x, MaskedArray):
new_shape = result.shape
result = x.view()
result.shape = new_shape
if result._mask is not nomask:
result._mask.shape = new_shape
return result
def left_shift(a, n):
"""
Shift the bits of an integer to the left.
This is the masked array version of `numpy.left_shift`, for details
see that function.
See Also
--------
numpy.left_shift
"""
m = getmask(a)
if m is nomask:
d = umath.left_shift(filled(a), n)
return masked_array(d)
else:
d = umath.left_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def right_shift(a, n):
"""
Shift the bits of an integer to the right.
This is the masked array version of `numpy.right_shift`, for details
see that function.
See Also
--------
numpy.right_shift
"""
m = getmask(a)
if m is nomask:
d = umath.right_shift(filled(a), n)
return masked_array(d)
else:
d = umath.right_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def put(a, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
This function is equivalent to `MaskedArray.put`, see that method
for details.
See Also
--------
MaskedArray.put
"""
# We can't use 'frommethod', the order of arguments is different
try:
return a.put(indices, values, mode=mode)
except AttributeError:
return narray(a, copy=False).put(indices, values, mode=mode)
def putmask(a, mask, values): # , mode='raise'):
"""
Changes elements of an array based on conditional and input values.
This is the masked array version of `numpy.putmask`, for details see
`numpy.putmask`.
See Also
--------
numpy.putmask
Notes
-----
Using a masked array as `values` will **not** transform a `ndarray` into
a `MaskedArray`.
"""
# We can't use 'frommethod', the order of arguments is different
if not isinstance(a, MaskedArray):
a = a.view(MaskedArray)
(valdata, valmask) = (getdata(values), getmask(values))
if getmask(a) is nomask:
if valmask is not nomask:
a._sharedmask = True
a._mask = make_mask_none(a.shape, a.dtype)
np.copyto(a._mask, valmask, where=mask)
elif a._hardmask:
if valmask is not nomask:
m = a._mask.copy()
np.copyto(m, valmask, where=mask)
a.mask |= m
else:
if valmask is nomask:
valmask = getmaskarray(values)
np.copyto(a._mask, valmask, where=mask)
np.copyto(a._data, valdata, where=mask)
return
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
This function is exactly equivalent to `numpy.transpose`.
See Also
--------
numpy.transpose : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.arange(4).reshape((2,2))
>>> x[1, 1] = ma.masked
>>>> x
masked_array(data =
[[0 1]
[2 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
>>> ma.transpose(x)
masked_array(data =
[[0 2]
[1 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
"""
# We can't use 'frommethod', as 'transpose' doesn't take keywords
try:
return a.transpose(axes)
except AttributeError:
return narray(a, copy=False).transpose(axes).view(MaskedArray)
def reshape(a, new_shape, order='C'):
"""
Returns an array containing the same data with a new shape.
Refer to `MaskedArray.reshape` for full documentation.
See Also
--------
MaskedArray.reshape : equivalent function
"""
# We can't use 'frommethod', it whine about some parameters. Dmmit.
try:
return a.reshape(new_shape, order=order)
except AttributeError:
_tmp = narray(a, copy=False).reshape(new_shape, order=order)
return _tmp.view(MaskedArray)
def resize(x, new_shape):
"""
Return a new masked array with the specified size and shape.
This is the masked equivalent of the `numpy.resize` function. The new
array is filled with repeated copies of `x` (in the order that the
data are stored in memory). If `x` is masked, the new array will be
masked, and the new mask will be a repetition of the old one.
See Also
--------
numpy.resize : Equivalent function in the top level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.array([[1, 2] ,[3, 4]])
>>> a[0, 1] = ma.masked
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value = 999999)
>>> np.resize(a, (3, 3))
array([[1, 2, 3],
[4, 1, 2],
[3, 4, 1]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 -- 3]
[4 1 --]
[3 4 1]],
mask =
[[False True False]
[False False True]
[False False False]],
fill_value = 999999)
A MaskedArray is always returned, regardless of the input type.
>>> a = np.array([[1, 2] ,[3, 4]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 2 3]
[4 1 2]
[3 4 1]],
mask =
False,
fill_value = 999999)
"""
# We can't use _frommethods here, as N.resize is notoriously whiny.
m = getmask(x)
if m is not nomask:
m = np.resize(m, new_shape)
result = np.resize(x, new_shape).view(get_masked_subclass(x))
if result.ndim:
result._mask = m
return result
def rank(obj):
"""
maskedarray version of the numpy function.
.. note::
Deprecated since 1.10.0
"""
# 2015-04-12, 1.10.0
warnings.warn(
"`rank` is deprecated; use the `ndim` function instead. ",
np.VisibleDeprecationWarning)
return np.ndim(getdata(obj))
rank.__doc__ = np.rank.__doc__
def ndim(obj):
"""
maskedarray version of the numpy function.
"""
return np.ndim(getdata(obj))
ndim.__doc__ = np.ndim.__doc__
def shape(obj):
"maskedarray version of the numpy function."
return np.shape(getdata(obj))
shape.__doc__ = np.shape.__doc__
def size(obj, axis=None):
"maskedarray version of the numpy function."
return np.size(getdata(obj), axis)
size.__doc__ = np.size.__doc__
##############################################################################
# Extra functions #
##############################################################################
def where(condition, x=_NoValue, y=_NoValue):
"""
Return a masked array with elements from x or y, depending on condition.
Returns a masked array, shaped like condition, where the elements
are from `x` when `condition` is True, and from `y` otherwise.
If neither `x` nor `y` are given, the function returns a tuple of
indices where `condition` is True (the result of
``condition.nonzero()``).
Parameters
----------
condition : array_like, bool
The condition to meet. For each True element, yield the corresponding
element from `x`, otherwise from `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same shape
as condition, or be broadcast-able to that shape.
Returns
-------
out : MaskedArray or tuple of ndarrays
The resulting masked array if `x` and `y` were given, otherwise
the result of ``condition.nonzero()``.
See Also
--------
numpy.where : Equivalent function in the top-level NumPy module.
Examples
--------
>>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
... [1, 0, 1],
... [0, 1, 0]])
>>> print(x)
[[0.0 -- 2.0]
[-- 4.0 --]
[6.0 -- 8.0]]
>>> np.ma.where(x > 5) # return the indices where x > 5
(array([2, 2]), array([0, 2]))
>>> print(np.ma.where(x > 5, x, -3.1416))
[[-3.1416 -- -3.1416]
[-- -3.1416 --]
[6.0 -- 8.0]]
"""
missing = (x is _NoValue, y is _NoValue).count(True)
if missing == 1:
raise ValueError("Must provide both 'x' and 'y' or neither.")
if missing == 2:
return filled(condition, 0).nonzero()
# Both x and y are provided
# Get the condition
fc = filled(condition, 0).astype(MaskType)
notfc = np.logical_not(fc)
# Get the data
xv = getdata(x)
yv = getdata(y)
if x is masked:
ndtype = yv.dtype
elif y is masked:
ndtype = xv.dtype
else:
ndtype = np.find_common_type([xv.dtype, yv.dtype], [])
# Construct an empty array and fill it
d = np.empty(fc.shape, dtype=ndtype).view(MaskedArray)
np.copyto(d._data, xv.astype(ndtype), where=fc)
np.copyto(d._data, yv.astype(ndtype), where=notfc)
# Create an empty mask and fill it
mask = np.zeros(fc.shape, dtype=MaskType)
np.copyto(mask, getmask(x), where=fc)
np.copyto(mask, getmask(y), where=notfc)
mask |= getmaskarray(condition)
# Use d._mask instead of d.mask to avoid copies
d._mask = mask if mask.any() else nomask
return d
def choose(indices, choices, out=None, mode='raise'):
"""
Use an index array to construct a new array from a set of choices.
Given an array of integers and a set of n choice arrays, this method
will create a new array that merges each of the choice arrays. Where a
value in `a` is i, the new array will have the value that choices[i]
contains in the same place.
Parameters
----------
a : ndarray of ints
This array must contain integers in ``[0, n-1]``, where n is the
number of choices.
choices : sequence of arrays
Choice arrays. The index array and all of the choices should be
broadcastable to the same shape.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and `dtype`.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' : raise an error
* 'wrap' : wrap around
* 'clip' : clip to the range
Returns
-------
merged_array : array
See Also
--------
choose : equivalent function
Examples
--------
>>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]])
>>> a = np.array([2, 1, 0])
>>> np.ma.choose(a, choice)
masked_array(data = [3 2 1],
mask = False,
fill_value=999999)
"""
def fmask(x):
"Returns the filled array, or True if masked."
if x is masked:
return True
return filled(x)
def nmask(x):
"Returns the mask, True if ``masked``, False if ``nomask``."
if x is masked:
return True
return getmask(x)
# Get the indices.
c = filled(indices, 0)
# Get the masks.
masks = [nmask(x) for x in choices]
data = [fmask(x) for x in choices]
# Construct the mask
outputmask = np.choose(c, masks, mode=mode)
outputmask = make_mask(mask_or(outputmask, getmask(indices)),
copy=0, shrink=True)
# Get the choices.
d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(outputmask)
return out
d.__setmask__(outputmask)
return d
def round_(a, decimals=0, out=None):
"""
Return a copy of a, rounded to 'decimals' places.
When 'decimals' is negative, it specifies the number of positions
to the left of the decimal point. The real and imaginary parts of
complex numbers are rounded separately. Nothing is done if the
array is not of float type and 'decimals' is greater than or equal
to 0.
Parameters
----------
decimals : int
Number of decimals to round to. May be negative.
out : array_like
Existing array to use for output.
If not given, returns a default copy of a.
Notes
-----
If out is given and does not have a mask attribute, the mask of a
is lost!
"""
if out is None:
return np.round_(a, decimals, out)
else:
np.round_(getdata(a), decimals, out)
if hasattr(out, '_mask'):
out._mask = getmask(a)
return out
round = round_
# Needed by dot, so move here from extras.py. It will still be exported
# from extras.py for compatibility.
def mask_rowcols(a, axis=None):
"""
Mask rows and/or columns of a 2D array that contain masked values.
Mask whole rows and/or columns of a 2D array that contain
masked values. The masking behavior is selected using the
`axis` parameter.
- If `axis` is None, rows *and* columns are masked.
- If `axis` is 0, only rows are masked.
- If `axis` is 1 or -1, only columns are masked.
Parameters
----------
a : array_like, MaskedArray
The array to mask. If not a MaskedArray instance (or if no array
elements are masked). The result is a MaskedArray with `mask` set
to `nomask` (False). Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. If None, applies to a
flattened version of the array.
Returns
-------
a : MaskedArray
A modified version of the input array, masked depending on the value
of the `axis` parameter.
Raises
------
NotImplementedError
If input array `a` is not 2D.
See Also
--------
mask_rows : Mask rows of a 2D array that contain masked values.
mask_cols : Mask cols of a 2D array that contain masked values.
masked_where : Mask where a condition is met.
Notes
-----
The input array's mask is modified by this function.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=np.int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(data =
[[0 0 0]
[0 -- 0]
[0 0 0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=999999)
>>> ma.mask_rowcols(a)
masked_array(data =
[[0 -- 0]
[-- -- --]
[0 -- 0]],
mask =
[[False True False]
[ True True True]
[False True False]],
fill_value=999999)
"""
a = array(a, subok=False)
if a.ndim != 2:
raise NotImplementedError("mask_rowcols works for 2D arrays only.")
m = getmask(a)
# Nothing is masked: return a
if m is nomask or not m.any():
return a
maskedval = m.nonzero()
a._mask = a._mask.copy()
if not axis:
a[np.unique(maskedval[0])] = masked
if axis in [None, 1, -1]:
a[:, np.unique(maskedval[1])] = masked
return a
# Include masked dot here to avoid import problems in getting it from
# extras.py. Note that it is not included in __all__, but rather exported
# from extras in order to avoid backward compatibility problems.
def dot(a, b, strict=False, out=None):
"""
Return the dot product of two arrays.
This function is the equivalent of `numpy.dot` that takes masked values
into account. Note that `strict` and `out` are in different position
than in the method version. In order to maintain compatibility with the
corresponding method, it is recommended that the optional arguments be
treated as keyword only. At some point that may be mandatory.
.. note::
Works only with 2-D arrays at the moment.
Parameters
----------
a, b : masked_array_like
Inputs arrays.
strict : bool, optional
Whether masked data are propagated (True) or set to 0 (False) for
the computation. Default is False. Propagating the mask means that
if a masked value appears in a row or column, the whole row or
column is considered masked.
out : masked_array, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
.. versionadded:: 1.10.2
See Also
--------
numpy.dot : Equivalent function for ndarrays.
Examples
--------
>>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]])
>>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]])
>>> np.ma.dot(a, b)
masked_array(data =
[[21 26]
[45 64]],
mask =
[[False False]
[False False]],
fill_value = 999999)
>>> np.ma.dot(a, b, strict=True)
masked_array(data =
[[-- --]
[-- 64]],
mask =
[[ True True]
[ True False]],
fill_value = 999999)
"""
# !!!: Works only with 2D arrays. There should be a way to get it to run
# with higher dimension
if strict and (a.ndim == 2) and (b.ndim == 2):
a = mask_rowcols(a, 0)
b = mask_rowcols(b, 1)
am = ~getmaskarray(a)
bm = ~getmaskarray(b)
if out is None:
d = np.dot(filled(a, 0), filled(b, 0))
m = ~np.dot(am, bm)
if d.ndim == 0:
d = np.asarray(d)
r = d.view(get_masked_subclass(a, b))
r.__setmask__(m)
return r
else:
d = np.dot(filled(a, 0), filled(b, 0), out._data)
if out.mask.shape != d.shape:
out._mask = np.empty(d.shape, MaskType)
np.dot(am, bm, out._mask)
np.logical_not(out._mask, out._mask)
return out
def inner(a, b):
"""
Returns the inner product of a and b for arrays of floating point types.
Like the generic NumPy equivalent the product sum is over the last dimension
of a and b.
Notes
-----
The first argument is not conjugated.
"""
fa = filled(a, 0)
fb = filled(b, 0)
if len(fa.shape) == 0:
fa.shape = (1,)
if len(fb.shape) == 0:
fb.shape = (1,)
return np.inner(fa, fb).view(MaskedArray)
inner.__doc__ = doc_note(np.inner.__doc__,
"Masked values are replaced by 0.")
innerproduct = inner
def outer(a, b):
"maskedarray version of the numpy function."
fa = filled(a, 0).ravel()
fb = filled(b, 0).ravel()
d = np.outer(fa, fb)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
return masked_array(d)
ma = getmaskarray(a)
mb = getmaskarray(b)
m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0)
return masked_array(d, mask=m)
outer.__doc__ = doc_note(np.outer.__doc__,
"Masked values are replaced by 0.")
outerproduct = outer
def allequal(a, b, fill_value=True):
"""
Return True if all entries of a and b are equal, using
fill_value as a truth value where either or both are masked.
Parameters
----------
a, b : array_like
Input arrays to compare.
fill_value : bool, optional
Whether masked values in a or b are considered equal (True) or not
(False).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN,
then False is returned.
See Also
--------
all, any
numpy.ma.allclose
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value=1e+20)
>>> b = array([1e10, 1e-7, -42.0])
>>> b
array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01])
>>> ma.allequal(a, b, fill_value=False)
False
>>> ma.allequal(a, b)
True
"""
m = mask_or(getmask(a), getmask(b))
if m is nomask:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
return d.all()
elif fill_value:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
dm = array(d, mask=m, copy=False)
return dm.filled(True).all(None)
else:
return False
def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
This function is equivalent to `allclose` except that masked values
are treated as equal (default) or unequal, depending on the `masked_equal`
argument.
Parameters
----------
a, b : array_like
Input arrays to compare.
masked_equal : bool, optional
Whether masked values in `a` and `b` are considered equal (True) or not
(False). They are considered equal by default.
rtol : float, optional
Relative tolerance. The relative difference is equal to ``rtol * b``.
Default is 1e-5.
atol : float, optional
Absolute tolerance. The absolute difference is equal to `atol`.
Default is 1e-8.
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any
numpy.allclose : the non-masked `allclose`.
Notes
-----
If the following equation is element-wise True, then `allclose` returns
True::
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Return True if all elements of `a` and `b` are equal subject to
given tolerances.
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value = 1e+20)
>>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
False
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
Masked values are not compared directly.
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
"""
x = masked_array(a, copy=False)
y = masked_array(b, copy=False)
# make sure y is an inexact type to avoid abs(MIN_INT); will cause
# casting of x later.
dtype = np.result_type(y, 1.)
if y.dtype != dtype:
y = masked_array(y, dtype=dtype, copy=False)
m = mask_or(getmask(x), getmask(y))
xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False)
# If we have some infs, they should fall at the same place.
if not np.all(xinf == filled(np.isinf(y), False)):
return False
# No infs at all
if not np.any(xinf):
d = filled(umath.less_equal(umath.absolute(x - y),
atol + rtol * umath.absolute(y)),
masked_equal)
return np.all(d)
if not np.all(filled(x[xinf] == y[xinf], masked_equal)):
return False
x = x[~xinf]
y = y[~xinf]
d = filled(umath.less_equal(umath.absolute(x - y),
atol + rtol * umath.absolute(y)),
masked_equal)
return np.all(d)
def asarray(a, dtype=None, order=None):
"""
Convert the input to a masked array of the given data-type.
No copy is performed if the input is already an `ndarray`. If `a` is
a subclass of `MaskedArray`, a base class `MaskedArray` is returned.
Parameters
----------
a : array_like
Input data, in any form that can be converted to a masked array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists, ndarrays and masked arrays.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
Masked array interpretation of `a`.
See Also
--------
asanyarray : Similar to `asarray`, but conserves subclasses.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
order = order or 'C'
return masked_array(a, dtype=dtype, copy=False, keep_mask=True,
subok=False, order=order)
def asanyarray(a, dtype=None):
"""
Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)
##############################################################################
# Pickling #
##############################################################################
def dump(a, F):
"""
Pickle a masked array to a file.
This is a wrapper around ``cPickle.dump``.
Parameters
----------
a : MaskedArray
The array to be pickled.
F : str or file-like object
The file to pickle `a` to. If a string, the full path to the file.
"""
if not hasattr(F, 'readline'):
F = open(F, 'w')
return pickle.dump(a, F)
def dumps(a):
"""
Return a string corresponding to the pickling of a masked array.
This is a wrapper around ``cPickle.dumps``.
Parameters
----------
a : MaskedArray
The array for which the string representation of the pickle is
returned.
"""
return pickle.dumps(a)
def load(F):
"""
Wrapper around ``cPickle.load`` which accepts either a file-like object
or a filename.
Parameters
----------
F : str or file
The file or file name to load.
See Also
--------
dump : Pickle an array
Notes
-----
This is different from `numpy.load`, which does not use cPickle but loads
the NumPy binary .npy format.
"""
if not hasattr(F, 'readline'):
F = open(F, 'r')
return pickle.load(F)
def loads(strg):
"""
Load a pickle from the current string.
The result of ``cPickle.loads(strg)`` is returned.
Parameters
----------
strg : str
The string to load.
See Also
--------
dumps : Return a string corresponding to the pickling of a masked array.
"""
return pickle.loads(strg)
def fromfile(file, dtype=float, count=-1, sep=''):
raise NotImplementedError(
"fromfile() not yet implemented for a MaskedArray.")
def fromflex(fxarray):
"""
Build a masked array from a suitable flexible-type array.
The input array has to have a data-type with ``_data`` and ``_mask``
fields. This type of array is output by `MaskedArray.toflex`.
Parameters
----------
fxarray : ndarray
The structured input array, containing ``_data`` and ``_mask``
fields. If present, other fields are discarded.
Returns
-------
result : MaskedArray
The constructed masked array.
See Also
--------
MaskedArray.toflex : Build a flexible-type array from a masked array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4)
>>> rec = x.toflex()
>>> rec
array([[(0, False), (1, True), (2, False)],
[(3, True), (4, False), (5, True)],
[(6, False), (7, True), (8, False)]],
dtype=[('_data', '<i4'), ('_mask', '|b1')])
>>> x2 = np.ma.fromflex(rec)
>>> x2
masked_array(data =
[[0 -- 2]
[-- 4 --]
[6 -- 8]],
mask =
[[False True False]
[ True False True]
[False True False]],
fill_value = 999999)
Extra fields can be present in the structured array but are discarded:
>>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]
>>> rec2 = np.zeros((2, 2), dtype=dt)
>>> rec2
array([[(0, False, 0.0), (0, False, 0.0)],
[(0, False, 0.0), (0, False, 0.0)]],
dtype=[('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')])
>>> y = np.ma.fromflex(rec2)
>>> y
masked_array(data =
[[0 0]
[0 0]],
mask =
[[False False]
[False False]],
fill_value = 999999)
"""
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
class _convert2ma:
"""
Convert functions from numpy to numpy.ma.
Parameters
----------
_methodname : string
Name of the method to transform.
"""
__doc__ = None
def __init__(self, funcname, params=None):
self._func = getattr(np, funcname)
self.__doc__ = self.getdoc()
self._extras = params or {}
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
doc = getattr(self._func, '__doc__', None)
sig = get_object_signature(self._func)
if doc:
# Add the signature of the function at the beginning of the doc
if sig:
sig = "%s%s\n" % (self._func.__name__, sig)
doc = sig + doc
return doc
def __call__(self, *args, **params):
# Find the common parameters to the call and the definition
_extras = self._extras
common_params = set(params).intersection(_extras)
# Drop the common parameters from the call
for p in common_params:
_extras[p] = params.pop(p)
# Get the result
result = self._func.__call__(*args, **params).view(MaskedArray)
if "fill_value" in common_params:
result.fill_value = _extras.get("fill_value", None)
if "hardmask" in common_params:
result._hardmask = bool(_extras.get("hard_mask", False))
return result
arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False))
clip = np.clip
diff = np.diff
empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False))
empty_like = _convert2ma('empty_like')
frombuffer = _convert2ma('frombuffer')
fromfunction = _convert2ma('fromfunction')
identity = _convert2ma(
'identity', params=dict(fill_value=None, hardmask=False))
indices = np.indices
ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False))
ones_like = np.ones_like
squeeze = np.squeeze
zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False))
zeros_like = np.zeros_like
def append(a, b, axis=None):
"""Append values to the end of an array.
.. versionadded:: 1.9.0
Parameters
----------
a : array_like
Values are appended to a copy of this array.
b : array_like
These values are appended to a copy of `a`. It must be of the
correct shape (the same shape as `a`, excluding `axis`). If `axis`
is not specified, `b` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `v` are appended. If `axis` is not given,
both `a` and `b` are flattened before use.
Returns
-------
append : MaskedArray
A copy of `a` with `b` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, the result is a flattened array.
See Also
--------
numpy.append : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_values([1, 2, 3], 2)
>>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
>>> print(ma.append(a, b))
[1 -- 3 4 5 6 -- 8 9]
"""
return concatenate([a, b], axis)
|
{
"content_hash": "4aad2792c0e4d85feccdab5dd4b8c7d8",
"timestamp": "",
"source": "github",
"line_count": 7863,
"max_line_length": 83,
"avg_line_length": 31.440798677349612,
"alnum_prop": 0.5378025151788495,
"repo_name": "ryfeus/lambda-packs",
"id": "574077185251ddcca62f9a7c59892e413d9bb80c",
"size": "247219",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Skimage_numpy/source/numpy/ma/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
import logging
from django import forms
from django.utils.safestring import mark_safe
from xlivesettings import settings as x_setts
log = logging.getLogger(__name__)
class LocalizedStringValueFieldWidget(forms.MultiWidget):
def __init__(self, langs=[], attrs=None):
widgets = []
for lang in langs:
widget = forms.TextInput(attrs=attrs)
widget.lang = lang
widgets.append(widget)
super(LocalizedStringValueFieldWidget, self).__init__(widgets, attrs)
def value_from_datadict(self, data, files, name):
try:
return data.get(name, None).rsplit()
except AttributeError:
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def decompress(self, value):
return dict(value) if value else {}
def render(self, name, value, attrs=None):
log.debug("render")
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, dict):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
lang_code = widget.lang
try:
widget_label = unicode(x_setts.LANGUAGES_DICT.get(lang_code, lang_code))
widget_value = value[lang_code]
except:
widget_label, widget_value, lang_code = None, None, None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append('<div class="form-row xsettings-multi-input">')
output.append('<label for="%s">%s:</label>' % (final_attrs['id'], widget_label))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
output.append('</div>')
return mark_safe(self.format_output(output))
|
{
"content_hash": "f86f3c427080cc0bba401fc79e848d80",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 119,
"avg_line_length": 38.50909090909091,
"alnum_prop": 0.5930122757318225,
"repo_name": "oblalex/django-xlivesettings",
"id": "11dccd024b787e0adbc4c832fa3e86d756b9e680",
"size": "2118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xlivesettings/widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "248"
},
{
"name": "JavaScript",
"bytes": "117"
},
{
"name": "Python",
"bytes": "108497"
}
],
"symlink_target": ""
}
|
print 'hello'
|
{
"content_hash": "71fc66bfc9fbe7a7ef53fd984dd55379",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 13,
"avg_line_length": 14,
"alnum_prop": 0.7142857142857143,
"repo_name": "liggettla/python",
"id": "d079c1cf462cf29833d8273805cec55234ecac43",
"size": "46",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41183"
},
{
"name": "Shell",
"bytes": "493"
}
],
"symlink_target": ""
}
|
"""
Description
Example:
Input: [format]
[]
Output: [format]
[]
"""
def solve(input):
return str("solution")
input_file = open("io/tests.txt", "r");
output_file = open("io/solution.txt", "w");
debug = True
test = input_file.readline()
while test != '':
test = test.rstrip("\r\n")
solution = solve(test)
if debug: solution += ": '%s'" % test
output_file.write(solution + "\n")
test = input_file.readline()
input_file.close();
output_file.close();
|
{
"content_hash": "c5e492ef1dfeb6aa8b8dd166c0177f3b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 43,
"avg_line_length": 16.482758620689655,
"alnum_prop": 0.604602510460251,
"repo_name": "felixs8696/code-hub",
"id": "c1e3594bee6e75c96ce1b83c2184a19c4bde3fbe",
"size": "478",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Algorithms/Challenges/Combinatorics/BracketPermutations/Python/BracketPermutations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "46021"
},
{
"name": "Python",
"bytes": "19367"
},
{
"name": "Shell",
"bytes": "15271"
}
],
"symlink_target": ""
}
|
import logging; logger = logging.getLogger("morse." + __name__)
import pymoos.MOOSCommClient
import morse.core.middleware
def init_extra_module(self, component_instance, function, mw_data):
""" Setup the middleware connection with this data
Prepare the middleware to handle the serialised data as necessary.
"""
# Compose the name of the port, based on the parent and module names
component_name = component_instance.blender_obj.name
parent_name = component_instance.robot_parent.blender_obj.name
# Add the new method to the component
component_instance.output_functions.append(function)
# Generate one publisher and one topic for each component that is a sensor and uses post_message
logger.info('######## Gyroscope-SENSOR INITIALIZED ########')
def post_gyroscope(self, component_instance):
""" Publish the data of the Odometry-sensor as a ROS-Pose message
"""
curTime=pymoos.MOOSCommClient.MOOSTime()
self.m.Notify('zYaw',component_instance.local_data['yaw'],curTime)
self.m.Notify('zRoll',component_instance.local_data['roll'],curTime)
self.m.Notify('zPitch',component_instance.local_data['pitch'],curTime)
|
{
"content_hash": "b7c64665adca7b785df84d64ed4102c5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 100,
"avg_line_length": 43.925925925925924,
"alnum_prop": 0.7276559865092749,
"repo_name": "Arkapravo/morse-0.6",
"id": "0b600f0501b2219837e13d3fda0740b18e95f1be",
"size": "1186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/morse/middleware/moos/gyroscope.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "46148"
},
{
"name": "C++",
"bytes": "30878"
},
{
"name": "Perl",
"bytes": "1705"
},
{
"name": "Python",
"bytes": "1117700"
},
{
"name": "Shell",
"bytes": "684"
}
],
"symlink_target": ""
}
|
"""create openvas last update table
Revision ID: 4e051e1c257
Revises: 46942860847
Create Date: 2016-01-04 09:04:30.597267
"""
# revision identifiers, used by Alembic.
revision = '4e051e1c257'
down_revision = '46942860847'
branch_labels = None
depends_on = None
from sqlalchemy.dialects import postgresql
from alembic import op
import sqlalchemy as sa
import datetime
def _get_date():
return datetime.datetime.now()
def upgrade():
op.create_table('openvas_last_updates',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('perception_product_uuid', postgresql.UUID, nullable=False),
sa.Column('updated_at', sa.TIMESTAMP(timezone=True), default=_get_date))
def downgrade():
op.drop_table('openvas_last_updates')
|
{
"content_hash": "aba4b2755148f38718aed735d38a1492",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 92,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.6928746928746928,
"repo_name": "asrozar/perception",
"id": "2e2a3c51889585ea775dde5c06b3210f001cb8da",
"size": "814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perception/database/migrations/versions/4e051e1c257_create_openvas_last_update_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "493"
},
{
"name": "Python",
"bytes": "142478"
}
],
"symlink_target": ""
}
|
"""
sentry.interfaces.exception
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = ('Exception',)
import six
from django.conf import settings
from sentry.interfaces.base import Interface, InterfaceValidationError
from sentry.interfaces.stacktrace import Stacktrace, slim_frame_data
from sentry.utils import json
from sentry.utils.safe import trim
class SingleException(Interface):
"""
A standard exception with a ``type`` and value argument, and an optional
``module`` argument describing the exception class type and
module namespace. Either ``type`` or ``value`` must be present.
You can also optionally bind a stacktrace interface to an exception. The
spec is identical to ``sentry.interfaces.Stacktrace``.
>>> {
>>> "type": "ValueError",
>>> "value": "My exception value",
>>> "module": "__builtins__",
>>> "mechanism": {},
>>> "stacktrace": {
>>> # see sentry.interfaces.Stacktrace
>>> }
>>> }
"""
score = 2000
@classmethod
def to_python(cls, data, has_system_frames=None, slim_frames=True):
if not (data.get('type') or data.get('value')):
raise InterfaceValidationError("No 'type' or 'value' present")
if data.get('stacktrace') and data['stacktrace'].get('frames'):
stacktrace = Stacktrace.to_python(
data['stacktrace'],
has_system_frames=has_system_frames,
slim_frames=slim_frames,
)
else:
stacktrace = None
if data.get('raw_stacktrace') and data['raw_stacktrace'].get('frames'):
raw_stacktrace = Stacktrace.to_python(
data['raw_stacktrace'],
has_system_frames=has_system_frames,
slim_frames=slim_frames,
raw=True
)
else:
raw_stacktrace = None
type = data.get('type')
value = data.get('value')
if not type and ':' in value.split(' ', 1)[0]:
type, value = value.split(':', 1)
# in case of TypeError: foo (no space)
value = value.strip()
if value is not None and not isinstance(value, six.string_types):
value = json.dumps(value)
value = trim(value, 4096)
mechanism = data.get('mechanism')
if mechanism is not None:
if not isinstance(mechanism, dict):
raise InterfaceValidationError('Bad value for mechanism')
mechanism = trim(data.get('mechanism'), 4096)
mechanism.setdefault('type', 'generic')
kwargs = {
'type': trim(type, 128),
'value': value,
'module': trim(data.get('module'), 128),
'mechanism': mechanism,
'stacktrace': stacktrace,
'thread_id': trim(data.get('thread_id'), 40),
'raw_stacktrace': raw_stacktrace,
}
return cls(**kwargs)
def to_json(self):
if self.stacktrace:
stacktrace = self.stacktrace.to_json()
else:
stacktrace = None
if self.raw_stacktrace:
raw_stacktrace = self.raw_stacktrace.to_json()
else:
raw_stacktrace = None
return {
'type': self.type,
'value': self.value,
'mechanism': self.mechanism or None,
'module': self.module,
'stacktrace': stacktrace,
'thread_id': self.thread_id,
'raw_stacktrace': raw_stacktrace,
}
def get_api_context(self, is_public=False):
if self.stacktrace:
stacktrace = self.stacktrace.get_api_context(is_public=is_public)
else:
stacktrace = None
if self.raw_stacktrace:
raw_stacktrace = self.raw_stacktrace.get_api_context(is_public=is_public)
else:
raw_stacktrace = None
return {
'type': self.type,
'value': six.text_type(self.value) if self.value else None,
'mechanism': self.mechanism or None,
'threadId': self.thread_id,
'module': self.module,
'stacktrace': stacktrace,
'rawStacktrace': raw_stacktrace,
}
def get_alias(self):
return 'exception'
def get_path(self):
return 'sentry.interfaces.Exception'
def get_hash(self):
output = None
if self.stacktrace:
output = self.stacktrace.get_hash()
if output and self.type:
output.append(self.type)
if not output:
output = [s for s in [self.type, self.value] if s]
return output
class Exception(Interface):
"""
An exception consists of a list of values. In most cases, this list
contains a single exception, with an optional stacktrace interface.
Each exception has a mandatory ``value`` argument and optional ``type`` and
``module`` arguments describing the exception class type and module
namespace.
You can also optionally bind a stacktrace interface to an exception. The
spec is identical to ``sentry.interfaces.Stacktrace``.
>>> {
>>> "values": [{
>>> "type": "ValueError",
>>> "value": "My exception value",
>>> "module": "__builtins__",
>>> "mechanism": {},
>>> "stacktrace": {
>>> # see sentry.interfaces.Stacktrace
>>> }
>>> }]
>>> }
Values should be sent oldest to newest, this includes both the stacktrace
and the exception itself.
.. note:: This interface can be passed as the 'exception' key in addition
to the full interface path.
"""
score = 2000
def __getitem__(self, key):
return self.values[key]
def __iter__(self):
return iter(self.values)
def __len__(self):
return len(self.values)
@classmethod
def to_python(cls, data):
if 'values' not in data:
data = {'values': [data]}
if not data['values']:
raise InterfaceValidationError("No 'values' present")
if not isinstance(data['values'], list):
raise InterfaceValidationError("Invalid value for 'values'")
has_system_frames = cls.data_has_system_frames(data)
kwargs = {
'values': [
SingleException.to_python(
v,
has_system_frames=has_system_frames,
slim_frames=False,
)
for v in data['values']
],
}
if data.get('exc_omitted'):
if len(data['exc_omitted']) != 2:
raise InterfaceValidationError("Invalid value for 'exc_omitted'")
kwargs['exc_omitted'] = data['exc_omitted']
else:
kwargs['exc_omitted'] = None
instance = cls(**kwargs)
# we want to wait to slim things til we've reconciled in_app
slim_exception_data(instance)
return instance
@classmethod
def data_has_system_frames(cls, data):
system_frames = 0
app_frames = 0
for exc in data['values']:
if not exc.get('stacktrace'):
continue
frames = exc['stacktrace'].get('frames')
if not frames:
continue
for frame in frames:
# XXX(dcramer): handle PHP sending an empty array for a frame
if not isinstance(frame, dict):
continue
if frame.get('in_app') is True:
app_frames += 1
else:
system_frames += 1
# if there is a mix of frame styles then we indicate that system frames
# are present and should be represented as a split
return bool(app_frames and system_frames)
def to_json(self):
return {
'values': [v.to_json() for v in self.values],
'exc_omitted': self.exc_omitted,
}
def get_alias(self):
return 'exception'
def get_path(self):
return 'sentry.interfaces.Exception'
def compute_hashes(self, platform):
system_hash = self.get_hash(system_frames=True)
if not system_hash:
return []
app_hash = self.get_hash(system_frames=False)
if system_hash == app_hash or not app_hash:
return [system_hash]
return [system_hash, app_hash]
def get_hash(self, system_frames=True):
# optimize around the fact that some exceptions might have stacktraces
# while others may not and we ALWAYS want stacktraces over values
output = []
for value in self.values:
if not value.stacktrace:
continue
stack_hash = value.stacktrace.get_hash(
system_frames=system_frames,
)
if stack_hash:
output.extend(stack_hash)
output.append(value.type)
if not output:
for value in self.values:
output.extend(value.get_hash())
return output
def get_api_context(self, is_public=False):
return {
'values': [
v.get_api_context(is_public=is_public)
for v in self.values
],
'hasSystemFrames': any(
v.stacktrace.has_system_frames
for v in self.values
if v.stacktrace
),
'excOmitted': self.exc_omitted,
}
def to_string(self, event, is_public=False, **kwargs):
if not self.values:
return ''
output = []
for exc in self.values:
output.append(u'{0}: {1}\n'.format(exc.type, exc.value))
if exc.stacktrace:
output.append(exc.stacktrace.get_stacktrace(
event, system_frames=False, max_frames=5,
header=False) + '\n\n')
return (''.join(output)).strip()
def get_stacktrace(self, *args, **kwargs):
exc = self.values[0]
if exc.stacktrace:
return exc.stacktrace.get_stacktrace(*args, **kwargs)
return ''
def slim_exception_data(instance, frame_allowance=settings.SENTRY_MAX_STACKTRACE_FRAMES):
"""
Removes various excess metadata from middle frames which go beyond
``frame_allowance``.
"""
# TODO(dcramer): it probably makes sense to prioritize a certain exception
# rather than distributing allowance among all exceptions
frames = []
for exception in instance.values:
if not exception.stacktrace:
continue
frames.extend(exception.stacktrace.frames)
slim_frame_data(frames, frame_allowance)
|
{
"content_hash": "a25993826b5cceb0e986b7c7b0e36128",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 89,
"avg_line_length": 31.357142857142858,
"alnum_prop": 0.5545330296127563,
"repo_name": "BuildingLink/sentry",
"id": "595e330cd9065d5305dab2635bd78e59e622edd1",
"size": "10975",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/interfaces/exception.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "259940"
},
{
"name": "HTML",
"bytes": "297498"
},
{
"name": "JavaScript",
"bytes": "1051088"
},
{
"name": "Lua",
"bytes": "45617"
},
{
"name": "Makefile",
"bytes": "6255"
},
{
"name": "Python",
"bytes": "14120672"
},
{
"name": "Ruby",
"bytes": "4084"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
"""Unittests for builder_status_lib."""
from __future__ import print_function
import sys
from chromite.lib import builder_status_lib
from chromite.lib import cidb
from chromite.lib import constants
from chromite.lib import cros_test_lib
from chromite.lib import fake_cidb
from chromite.lib import failure_message_lib
from chromite.lib import failure_message_lib_unittest
from chromite.lib.buildstore import FakeBuildStore, BuildIdentifier
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
stage_failure_helper = failure_message_lib_unittest.StageFailureHelper
def ConstructFailureMessages(build_config):
"""Helper method to construct failure messages."""
entry_1 = stage_failure_helper.GetStageFailure(
build_config=build_config, failure_id=1)
entry_2 = stage_failure_helper.GetStageFailure(
build_config=build_config, failure_id=2, outer_failure_id=1)
entry_3 = stage_failure_helper.GetStageFailure(
build_config=build_config, failure_id=3, outer_failure_id=1)
failure_entries = [entry_1, entry_2, entry_3]
failure_messages = (
failure_message_lib.FailureMessageManager.ConstructStageFailureMessages(
failure_entries))
return failure_messages
class BuilderStatusLibTests(cros_test_lib.MockTestCase):
"""Tests for builder_status_lib."""
def testGetSlavesAbortedBySelfDestructedMaster(self):
"""Test GetSlavesAbortedBySelfDestructedMaster with aborted slaves."""
db = fake_cidb.FakeCIDBConnection()
buildstore = FakeBuildStore(db)
cidb.CIDBConnectionFactory.SetupMockCidb(db)
master_build_id = db.InsertBuild(
'master', 1, 'master', 'bot_hostname',
buildbucket_id=1234)
master_build_identifier = BuildIdentifier(cidb_id=master_build_id,
buildbucket_id=1234)
self.assertEqual(
set(),
builder_status_lib.GetSlavesAbortedBySelfDestructedMaster(
master_build_identifier, buildstore))
db.InsertBuild(
'slave_1', 1, 'slave_1', 'bot_hostname',
master_build_id=master_build_id, buildbucket_id=12)
db.InsertBuild(
'slave_2', 2, 'slave_2', 'bot_hostname',
master_build_id=master_build_id, buildbucket_id=23)
db.InsertBuild(
'slave_3', 3, 'slave_3', 'bot_hostname',
master_build_id=master_build_id, buildbucket_id=34)
for slave_build_id in (12, 23):
db.InsertBuildMessage(
master_build_id,
message_type=constants.MESSAGE_TYPE_IGNORED_REASON,
message_subtype=constants.MESSAGE_SUBTYPE_SELF_DESTRUCTION,
message_value=str(slave_build_id))
self.assertEqual(
{'slave_1', 'slave_2'},
builder_status_lib.GetSlavesAbortedBySelfDestructedMaster(
BuildIdentifier(cidb_id=master_build_id,
buildbucket_id=1234), buildstore))
# pylint: disable=protected-access
class BuilderStatusManagerTest(cros_test_lib.MockTestCase):
"""Tests for BuilderStatusManager."""
def setUp(self):
self.db = fake_cidb.FakeCIDBConnection()
def testCreateBuildFailureMessageWithMessages(self):
"""Test CreateBuildFailureMessage with stage failure messages."""
overlays = constants.PRIVATE_OVERLAYS
dashboard_url = 'http://fake_dashboard_url'
slave = 'cyan-paladin'
failure_messages = ConstructFailureMessages(slave)
build_msg = (
builder_status_lib.BuilderStatusManager.CreateBuildFailureMessage(
slave, overlays, dashboard_url, failure_messages))
self.assertTrue('the builder failed' in build_msg.message_summary)
self.assertTrue(build_msg.internal)
self.assertEqual(build_msg.builder, slave)
def testCreateBuildFailureMessageWithoutMessages(self):
"""Test CreateBuildFailureMessage without stage failure messages."""
overlays = constants.PUBLIC_OVERLAYS
dashboard_url = 'http://fake_dashboard_url'
slave = 'cyan-paladin'
build_msg = (
builder_status_lib.BuilderStatusManager.CreateBuildFailureMessage(
slave, overlays, dashboard_url, None))
self.assertTrue('cbuildbot failed' in build_msg.message_summary)
self.assertFalse(build_msg.internal)
self.assertEqual(build_msg.builder, slave)
def testCreateBuildFailureMessageWhenCanceled(self):
"""Test CreateBuildFailureMessage with no stage failure and canceled"""
overlays = constants.PRIVATE_OVERLAYS
dashboard_url = 'http://fake_dashboard_url'
slave = 'cyan-paladin'
build_msg = (
builder_status_lib.BuilderStatusManager.CreateBuildFailureMessage(
slave, overlays, dashboard_url, None,
aborted_by_self_destruction=True))
self.assertTrue('aborted by self-destruction' in build_msg.message_summary)
self.assertFalse('cbuildbot failed' in build_msg.message_summary)
self.assertEqual(build_msg.builder, slave)
def testCreateBuildFailureMessageSupersedesCancellation(self):
"""Test CreateBuildFailureMessage with a stage failure when canceled"""
overlays = constants.PRIVATE_OVERLAYS
dashboard_url = 'http://fake_dashboard_url'
slave = 'cyan-paladin'
failure_messages = ConstructFailureMessages(slave)
build_msg = (
builder_status_lib.BuilderStatusManager.CreateBuildFailureMessage(
slave, overlays, dashboard_url, failure_messages,
aborted_by_self_destruction=True))
self.assertFalse('canceled by master' in build_msg.message_summary)
self.assertFalse('cbuildbot failed' in build_msg.message_summary)
self.assertEqual(build_msg.builder, slave)
|
{
"content_hash": "2630f3ac26b737e3463e3052d6e280ec",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 79,
"avg_line_length": 38.5448275862069,
"alnum_prop": 0.7160493827160493,
"repo_name": "endlessm/chromium-browser",
"id": "a58840140aaeb7650a85b6891c392115f1a8adb9",
"size": "5779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/lib/builder_status_lib_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import MotionParticlesFLE as mp
gen_dot = mp.generate_dot
import numpy as np
import os
from default_param import *
image = {}
experiment = 'FLE'
do_sim = False
do_sim = True
for stimulus_tag, im_arg in zip(stim_labels, stim_args):
# generating the movie
image[stimulus_tag] = {}
image[stimulus_tag]['args'] = im_arg
image[stimulus_tag]['im'] = gen_dot(N_X=N_X, N_Y=N_Y, N_frame=N_frame, **image[stimulus_tag]['args'])
mp.anim_save(image[stimulus_tag]['im'], os.path.join(mp.figpath, experiment + '-' + stimulus_tag + '-anim'))
image[stimulus_tag]['result'] = {}
if do_sim:
# running PX and MBP with 2 different latencies
for D_x, D_V, v_prior, label in zip([mp.D_x, PBP_D_x], [mp.D_V, PBP_D_V], [mp.v_prior, PBP_prior], ['MBP', 'PBP']):
figname = os.path.join(mp.figpath, experiment + '-' + stimulus_tag + '-' + label)
image[stimulus_tag]['result'][label] = {}
image[stimulus_tag]['args'].update(D_V=D_V, D_x=D_x, v_prior=v_prior)
_ = mp.figure_image_variable(
figname,
N_X, N_Y, N_frame, gen_dot, order=None,
do_figure=do_figure, do_video=do_video, N_quant_X=N_quant_X, N_quant_Y=N_quant_Y,
fixed_args=image[stimulus_tag]['args'], latency=latencies)
try:
for latency in latencies:
matname = mp.make_figname(figname, {'latency': latency}).replace(mp.figpath, mp.matpath) + '.npy'
image[stimulus_tag]['result'][label][latency] = np.load(matname)
except:
print('no result yet for ', matname)
|
{
"content_hash": "deac491119b8ce646164fc549f95e512",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 123,
"avg_line_length": 47.82857142857143,
"alnum_prop": 0.5669056152927121,
"repo_name": "laurentperrinet/Khoei_2017_PLoSCB",
"id": "9ce9d321064b9af4f8dc36183b809cefb753907b",
"size": "1674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/experiment_fle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "13826119"
},
{
"name": "Python",
"bytes": "114134"
},
{
"name": "TeX",
"bytes": "449498"
}
],
"symlink_target": ""
}
|
"""
A class for expressing and modifying the cost field
associated with your alqr planner.
It prescribes how much you care about getting to the
goal state, avoiding obstacles, keeping within actuator
constraints, avoiding excessive effort etc...
The state space must be ordered as:
[position_1, position_2, ..., position_n, velocity_1, velocity_2, ..., velocity_n]
"""
################################################# DEPENDENCIES
from __future__ import division
import numpy as np
import numpy.linalg as npl
from scipy.optimize import approx_fprime
################################################# PRIMARY CLASS
class Cost_Field:
"""
Instances must be initialized with:
---
nstates: the dimensionality of the state space
ncontrols: the dimensionality of the effort space
nobstates: the dimensionality of "obstacles"
goal0: the initial goal state
goal_weight: array or scalar in cost function (goal_weight * goal_error^2)
effort_weight: array or scalar in cost function (effort_weight * effort^2)
obstacle_weight: scalar in cost function (obstacle_weight * obstacle_nearness^2)
umin and umax: arrays of minimum and maximum allowable actuator efforts (default: no limits)
strictness: scalar for how strict the effort limits are... don't be too strict or poor convergence
arb costs: functions that add onto the state and effort cost calculations (default: no added cost)
(must take state or effort as argument respectively and return a scalar cost)
"""
def __init__(self, nstates, ncontrols, nobstates, goal0,
goal_weight, effort_weight, obstacle_weight,
umin=None, umax=None, strictness=100,
arb_state_cost=None, arb_effort_cost=None):
# Dimensionality
self.nstates = int(nstates)
self.ncontrols = int(ncontrols)
self.nobstates = int(nobstates)
# Get your goals in order and your priorities straight
self.set_goal(goal0)
self.set_weights(goal_weight, effort_weight, obstacle_weight)
self.reset_obstacles()
# Initialize and then set limits
self.umin = -np.inf * np.ones(ncontrols)
self.umax = np.inf * np.ones(ncontrols)
self.set_constraints(umin, umax, strictness)
# Initialize and then store arbitrary cost functions
self.arb_state_cost = lambda x: 0
self.arb_effort_cost = lambda u: 0
self.set_arb_costs(arb_state_cost, arb_effort_cost)
# Finite difference delta size and gradient functions
self.eps = (np.finfo(float).eps)**0.5
self.state_cost_gradient = lambda x: approx_fprime(x, self.state_cost, self.eps)
self.effort_cost_gradient = lambda u: approx_fprime(u, self.effort_cost, self.eps)
def state_cost(self, x):
"""
Computes the instantaneous cost for being at state x.
"""
# Distance from goal
goal_error = self.goal - x
# Upwards quadratic, centered at goal
c_goal = (self.goal_weight * goal_error).dot(goal_error)
# Find which obstacles we are in the region of influence of
if len(self.obstacle_ids):
distances = np.array([npl.norm(self.obstacle_positions - x[:self.nobstates], axis=1)]).T
contributors = (distances <= self.obstacle_rois)
# Apply cosine hump and arbitrary user-defined cost
c_obs = np.sum((self.obstacle_weight*(np.cos((distances/self.obstacle_rois)*np.pi)+1)/2)[contributors])
else:
c_obs = 0
return c_goal + c_obs + self.arb_state_cost(x)
def effort_cost(self, u):
"""
Computes the instantaneous cost for applying effort u.
"""
# Upwards quadratic, centered at zero effort
c = (self.effort_weight * u).dot(u)
# Quadratically increase cost for leaving effort bounds
for i, eff in enumerate(u):
if eff >= self.umax[i]:
c = c + self.strictness*(eff - self.umax[i])**2
elif eff <= self.umin[i]:
c = c + self.strictness*(eff - self.umin[i])**2
return c + self.arb_effort_cost(u)
def state_cost_hessian(self, x):
"""
Computes the jacobian of the gradient of the cost field
with respect to the state. One might approximate this as
np.outer(gradient, gradient), but not me, because reasons.
"""
Q = np.eye(self.nstates)
grad = self.state_cost_gradient(x)
for i in xrange(self.nstates):
x_perturbed = x.astype(np.float64)
x_perturbed[i] = x_perturbed[i] + self.eps
Q[:self.nstates, i] = (self.state_cost_gradient(x_perturbed) - grad)
return Q / self.eps
def effort_cost_hessian(self, u):
"""
Computes the jacobian of the gradient of the cost field
with respect to the effort. One might approximate this as
np.outer(gradient, gradient), but not me, because reasons.
"""
R = np.eye(self.ncontrols)
grad = self.effort_cost_gradient(u)
for i in xrange(self.ncontrols):
u_perturbed = u.astype(np.float64)
u_perturbed[i] = u_perturbed[i] + self.eps
R[:self.ncontrols, i] = (self.effort_cost_gradient(u_perturbed) - grad)
return R / self.eps
def add_obstacle(self, name, position, influence_radius):
"""
Obstacles must be given a name (string), a central
position (array), and an influence radius (float).
"""
if len(self.obstacle_ids) == 0:
self.obstacle_ids = np.array([name])
self.obstacle_positions = np.array(position, dtype=np.float64)
self.obstacle_rois = np.array([influence_radius], dtype=np.float64)
else:
self.obstacle_ids = np.vstack((self.obstacle_ids, name))
self.obstacle_positions = np.vstack((self.obstacle_positions, position))
self.obstacle_rois = np.vstack((self.obstacle_rois, influence_radius))
def remove_obstacle(self, name):
"""
Remove an obstacle by providing its name (string).
"""
keepers = (self.obstacle_ids != name).flatten()
self.obstacle_ids = self.obstacle_ids[keepers]
self.obstacle_positions = self.obstacle_positions[keepers]
self.obstacle_rois = self.obstacle_rois[keepers]
def reset_obstacles(self):
"""
Clears all obstacles.
"""
self.obstacle_ids = np.array([])
self.obstacle_positions = np.array([])
self.obstacle_rois = np.array([])
def set_goal(self, goal):
"""
Use to modify the overall goal state ("waypoint").
"""
if len(goal) == self.nstates:
self.goal = np.array(goal, dtype=np.float64)
else:
raise ValueError("The goal must be a state vector (with nstates elements).")
def set_weights(self, goal_weight=None, effort_weight=None, obstacle_weight=None):
"""
Use to modify the weights for the various influences on behavior.
The weight for obstacles must be a scalar, but the state and effort weights can be an array or scalar.
Weights that aren't given aren't changed.
"""
if goal_weight is not None:
if type(goal_weight) in [int, float]:
self.goal_weight = float(goal_weight) * np.ones(self.nstates)
elif len(goal_weight) == self.nstates:
self.goal_weight = np.array(goal_weight, dtype=np.float64)
else:
raise ValueError("The goal_weight must be a scalar or array of length nstates.")
if effort_weight is not None:
if type(effort_weight) in [int, float]:
self.effort_weight = float(effort_weight) * np.ones(self.ncontrols)
elif len(effort_weight) == self.ncontrols:
self.effort_weight = np.array(effort_weight, dtype=np.float64)
else:
raise ValueError("The effort_weight must be a scalar or array of length ncontrols.")
if obstacle_weight is not None:
if type(obstacle_weight) in [int, float]:
self.obstacle_weight = float(obstacle_weight)
else:
raise ValueError("The obstacle_weight must be a scalar.")
def set_constraints(self, umin=None, umax=None, strictness=None):
"""
Use to modify effort limits.
Limits that aren't given aren't changed.
"""
if umin is not None:
if len(umin) == self.ncontrols:
self.umin = np.array(umin, dtype=np.float64)
else:
raise ValueError("Actuator constraint umin must have same length as the number of controls.")
if umax is not None:
if len(umax) == self.ncontrols:
self.umax = np.array(umax, dtype=np.float64)
else:
raise ValueError("Actuator constraint umax must have same length as the number of controls.")
if strictness is not None:
self.strictness = strictness
def set_arb_costs(self, arb_state_cost=None, arb_effort_cost=None):
"""
Use to modify arbitrary additions to the cost field.
Arguments not given are not changed.
"""
if arb_state_cost is not None:
if hasattr(arb_state_cost, '__call__'):
self.arb_state_cost = arb_state_cost
else:
raise ValueError("Expected arb_state_cost to be a function.")
if arb_effort_cost is not None:
if hasattr(arb_effort_cost, '__call__'):
self.arb_effort_cost = arb_effort_cost
else:
raise ValueError("Expected arb_effort_cost to be a function.")
|
{
"content_hash": "e8ab80b44da685ac91cc6fd701094d50",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 106,
"avg_line_length": 33.75196850393701,
"alnum_prop": 0.6978887204012598,
"repo_name": "jnez71/aLQR",
"id": "264ff39642ee2e82decdf37bb6c05b8fce01c266",
"size": "8573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alqr/cost_field.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75333"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import six
from pants.build_graph.address import Addresses
class FromTarget(object):
"""Used in a BUILD file to redirect the value of the sources= attribute to another target.
"""
class ExpectedAddressError(Exception):
"""Thrown if an object that is not an address is added to an import attribute.
"""
def __init__(self, parse_context):
"""
:param ParseContext parse_context: build file context
"""
self._parse_context = parse_context
def __call__(self, address):
"""Expects a string representing an address."""
if not isinstance(address, six.string_types):
raise self.ExpectedAddressError("Expected string address argument, got type {type}"
.format(type(address)))
return Addresses(addresses=[address], rel_path=self._parse_context.rel_path)
|
{
"content_hash": "870c6b85f996e2336d497a343b4757e2",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 93,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.6747474747474748,
"repo_name": "megaserg/pants",
"id": "fa27880e2a726f27104931c496c4d5a5443334d3",
"size": "1137",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/core/from_target.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "64029"
},
{
"name": "Java",
"bytes": "307373"
},
{
"name": "JavaScript",
"bytes": "28962"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4127534"
},
{
"name": "Scala",
"bytes": "85457"
},
{
"name": "Shell",
"bytes": "49640"
},
{
"name": "Thrift",
"bytes": "2898"
}
],
"symlink_target": ""
}
|
import pytest
import numpy as np
from pathlib import Path
from qutip.qip.qasm import read_qasm, circuit_to_qasm_str
from qutip.qip.circuit import Measurement, QubitCircuit
from qutip import tensor, rand_ket, basis, rand_dm, identity
from qutip.qip.operations.gates import cnot, ry
@pytest.mark.parametrize(["filename", "error", "error_message"], [
pytest.param("command_error.qasm", SyntaxError,
"QASM: post is not a valid QASM command."),
pytest.param("bracket_error.qasm", SyntaxError,
"QASM: incorrect bracket formatting"),
pytest.param("qasm_error.qasm", SyntaxError,
"QASM: File does not contain QASM 2.0 header")])
def test_qasm_errors(filename, error, error_message):
filepath = Path(__file__).parent / 'qasm_files' / filename
with pytest.raises(error) as exc_info:
read_qasm(filepath)
assert error_message in str(exc_info.value)
def check_gate_defn(gate, gate_name, targets, controls=None,
classical_controls=None, control_value=None):
assert gate.name == gate_name
assert gate.targets == targets
assert gate.controls == controls
assert gate.classical_controls == classical_controls
assert gate.control_value == control_value
def check_measurement_defn(gate, gate_name, targets, classical_store):
assert gate.name == gate_name
assert gate.targets == targets
assert gate.classical_store == classical_store
def test_qasm_addcircuit():
filename = "test_add.qasm"
filepath = Path(__file__).parent / 'qasm_files' / filename
with pytest.warns(UserWarning, match="not preserved in QubitCircuit"):
qc = read_qasm(filepath)
assert qc.N == 2
assert qc.num_cbits == 2
check_gate_defn(qc.gates[0], "X", [1])
check_gate_defn(qc.gates[1], "SNOT", [0])
check_gate_defn(qc.gates[2], "SNOT", [1])
check_gate_defn(qc.gates[3], "CNOT", [1], [0])
check_gate_defn(qc.gates[4], "SNOT", [0])
check_gate_defn(qc.gates[5], "SNOT", [1])
check_gate_defn(qc.gates[6], "SNOT", [0], None, [0, 1], 0)
check_measurement_defn(qc.gates[7], "M", [0], 0)
check_measurement_defn(qc.gates[8], "M", [1], 1)
def test_custom_gates():
filename = "test_custom_gates.qasm"
filepath = Path(__file__).parent / 'qasm_files' / filename
qc = read_qasm(filepath)
unitaries = qc.propagators()
assert (unitaries[0] - unitaries[1]).norm() < 1e-12
ry_cx = cnot() * tensor(identity(2), ry(np.pi/2))
assert (unitaries[2] - ry_cx).norm() < 1e-12
def test_qasm_teleportation():
filename = "teleportation.qasm"
filepath = Path(__file__).parent / 'qasm_files' / filename
with pytest.warns(UserWarning, match="not preserved in QubitCircuit"):
teleportation = read_qasm(filepath)
final_measurement = Measurement("start", targets=[2])
initial_measurement = Measurement("start", targets=[0])
state = tensor(rand_ket(2), basis(2, 0), basis(2, 0))
_, initial_probabilities = initial_measurement.measurement_comp_basis(state)
teleportation_results = teleportation.run_statistics(state)
states = teleportation_results.get_final_states()
probabilities = teleportation_results.get_probabilities()
for i, state in enumerate(states):
final = state
prob = probabilities[i]
_, final_probabilities = final_measurement.measurement_comp_basis(final)
np.testing.assert_allclose(initial_probabilities,
final_probabilities)
assert prob == pytest.approx(0.25, abs=1e-7)
def test_qasm_str():
expected_qasm_str = ('// QASM 2.0 file generated by QuTiP\n\nOPENQASM 2.0;'
'\ninclude "qelib1.inc";\n\nqreg q[2];\ncreg c[1];\n\n'
'x q[0];\nmeasure q[1] -> c[0]\n')
simple_qc = QubitCircuit(2, num_cbits=1)
simple_qc.add_gate("X", targets=[0])
simple_qc.add_measurement("M", targets=[1], classical_store=0)
assert circuit_to_qasm_str(simple_qc) == expected_qasm_str
def test_export_import():
qc = QubitCircuit(3)
qc.add_gate("CRY", targets=1, controls=0, arg_value=np.pi)
qc.add_gate("CRX", targets=1, controls=0, arg_value=np.pi)
qc.add_gate("CRZ", targets=1, controls=0, arg_value=np.pi)
qc.add_gate("CNOT", targets=1, controls=0)
qc.add_gate("TOFFOLI", targets=2, controls=[0, 1])
# qc.add_gate("SQRTNOT", targets=0)
qc.add_gate("CS", targets=1, controls=0)
qc.add_gate("CT", targets=1, controls=0)
qc.add_gate("SWAP", targets=[0, 1])
qc.add_gate("QASMU", targets=[0], arg_value=[np.pi, np.pi, np.pi])
qc.add_gate("RX", targets=[0], arg_value=np.pi)
qc.add_gate("RY", targets=[0], arg_value=np.pi)
qc.add_gate("RZ", targets=[0], arg_value=np.pi)
qc.add_gate("SNOT", targets=[0])
qc.add_gate("X", targets=[0])
qc.add_gate("Y", targets=[0])
qc.add_gate("Z", targets=[0])
qc.add_gate("S", targets=[0])
qc.add_gate("T", targets=[0])
# qc.add_gate("CSIGN", targets=[0], controls=[1])
read_qc = read_qasm(circuit_to_qasm_str(qc), strmode=True)
props = qc.propagators()
read_props = read_qc.propagators()
for u0, u1 in zip(props, read_props):
assert (u0 - u1).norm() < 1e-12
|
{
"content_hash": "9bf0c1df25960355adeed4f1b282e592",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 80,
"avg_line_length": 39.74242424242424,
"alnum_prop": 0.6387723980175372,
"repo_name": "qutip/qutip",
"id": "caef7a3e8d9adcda4258cb26bc5260a6fab5dab0",
"size": "5246",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qutip/tests/test_qasm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "13979"
},
{
"name": "Cython",
"bytes": "354994"
},
{
"name": "OpenQASM",
"bytes": "1718"
},
{
"name": "Python",
"bytes": "2810040"
}
],
"symlink_target": ""
}
|
import sys
prevUser = None
nTotal = 0
for line in sys.stdin:
currentUser = line
if (prevUser != currentUser):
nTotal += 1
prevUser = currentUser
print '%d' % (nTotal)
|
{
"content_hash": "5d4cca8c17d7a4453af0dd4e3d632a82",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 33,
"avg_line_length": 16.666666666666668,
"alnum_prop": 0.595,
"repo_name": "jatinmistry13/PrimitiveRecommenderSystem",
"id": "aeccb7bfc908d276a528136a60d2c7917997bb77",
"size": "224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Task3-Lift/src/UserListReduce.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33765"
},
{
"name": "Shell",
"bytes": "34833"
}
],
"symlink_target": ""
}
|
import unittest
import os
from test.aiml_tests.client import TestClient
from programy.config.brain import BrainFileConfiguration
class UtiltyTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_configuration(self, arguments):
super(UtiltyTestClient, self).load_configuration(arguments)
self.configuration.brain_configuration._aiml_files = BrainFileConfiguration(files=os.path.dirname(__file__))
class UtiltyAIMLTests(unittest.TestCase):
def setUp(self):
UtiltyAIMLTests.test_client = UtiltyTestClient()
def test_util_function(self):
response = UtiltyAIMLTests.test_client.bot.ask_question("test", "KEITH IS A PROGRAMMER")
self.assertIsNotNone(response)
self.assertEqual(response, 'Ok, I will remember KEITH is a PROGRAMMER .')
|
{
"content_hash": "fbd5833331b0152773a0137d5ac3a808",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 116,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.7307692307692307,
"repo_name": "dkamotsky/program-y",
"id": "83359e744c69b7644027f5a12a8f5a03e128632a",
"size": "832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/aiml_tests/util_tests/test_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "937"
},
{
"name": "HTML",
"bytes": "1583"
},
{
"name": "Python",
"bytes": "1131157"
},
{
"name": "Shell",
"bytes": "3481"
}
],
"symlink_target": ""
}
|
server = {
'port': 7090,
'host': '0.0.0.0'
}
# Pecan Application Configurations
# See https://pecan.readthedocs.org/en/latest/configuration.html#application-configuration # noqa
app = {
'root': 'bm_instance_agent.api.controllers.root.RootController',
'modules': ['bm_instance_agent.api'],
'debug': False
}
|
{
"content_hash": "a089f3ecc6953d5acc919b63d10a2b23",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 97,
"avg_line_length": 27.25,
"alnum_prop": 0.6758409785932722,
"repo_name": "zstackorg/zstack-utility",
"id": "9bc853e948d1fe9b1f1ded3da3379f80fc77ffa1",
"size": "453",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bm-instance-agent/bm_instance_agent/api/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "1147"
},
{
"name": "HTML",
"bytes": "4445"
},
{
"name": "Pascal",
"bytes": "187"
},
{
"name": "Puppet",
"bytes": "10417"
},
{
"name": "Python",
"bytes": "2093719"
},
{
"name": "Shell",
"bytes": "232075"
}
],
"symlink_target": ""
}
|
"""
Contains basic interface (abstract base class) for word embeddings.
"""
import os
from abc import ABCMeta, abstractmethod
class IWordEmbedding(object):
"""
Abstract base class for word embeddings
"""
__metaclass__ = ABCMeta
def __init__(self, path, vector_length):
self.model = None
self.path = path
self.vector_length = vector_length
self.already_built = False
@abstractmethod
def _build(self):
raise NotImplementedError
@abstractmethod
def __getitem__(self, word):
raise NotImplementedError
def build(self):
""" Loads word embedding from its file """
if not self.already_built:
print("Loading pre-trained word embedding from {0}...".format(self.path))
self._build()
self.already_built = True
print("Pre-trained word embedding from {0} loaded!".format(self.path))
def get_embedding_model_path(self):
""" :return: absolute path to folder containing saved word embedding model """
return os.path.join(os.path.dirname(__file__), '../../../models/word_embeddings', self.path)
@staticmethod
def data_file_to_sentences(data_file_path):
"""
Converts a processed data file to generator of lists of words
:param data_file_path: path to data file
:return: iterator yielding sentences as lists of words
"""
with open(data_file_path, 'r') as f:
for line in f:
sentence = line.split(' ')[1]
yield map(lambda word: word.rstrip(), sentence.split(','))
def __str__(self):
return type(self).__name__
|
{
"content_hash": "e71d3786f6a41b6f300c3cc324185c64",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 100,
"avg_line_length": 31.71698113207547,
"alnum_prop": 0.6067816775728733,
"repo_name": "mikolajsacha/tweetsclassification",
"id": "7f8f6a7f28d8b06830ecdb7ea64f4bc472c4bc8e",
"size": "1681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/features/word_embeddings/iword_embedding.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "190976"
},
{
"name": "Makefile",
"bytes": "4259"
},
{
"name": "Python",
"bytes": "97368"
}
],
"symlink_target": ""
}
|
from hgwebcommit.actions import FunctionAction, manager
def action(name, label, params=None):
def _wrap(func):
obj = FunctionAction(name, label, func, params)
manager.add(obj)
return func
return _wrap
|
{
"content_hash": "75397d8454215af937a38de788c012e4",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 55,
"avg_line_length": 29.25,
"alnum_prop": 0.6709401709401709,
"repo_name": "tokibito/flask-hgwebcommit",
"id": "357751de098977d7973476f0d404b65ca3adeb16",
"size": "234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hgwebcommit/actions/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "29561"
}
],
"symlink_target": ""
}
|
import sys
from nltk.corpus.reader import util
from nltk.corpus.reader.api import *
from nltk.corpus.reader.util import *
class ChasenCorpusReader(CorpusReader):
def __init__(self, root, fileids, encoding="utf8", sent_splitter=None):
self._sent_splitter = sent_splitter
CorpusReader.__init__(self, root, fileids, encoding)
def words(self, fileids=None):
return concat(
[
ChasenCorpusView(fileid, enc, False, False, False, self._sent_splitter)
for (fileid, enc) in self.abspaths(fileids, True)
]
)
def tagged_words(self, fileids=None):
return concat(
[
ChasenCorpusView(fileid, enc, True, False, False, self._sent_splitter)
for (fileid, enc) in self.abspaths(fileids, True)
]
)
def sents(self, fileids=None):
return concat(
[
ChasenCorpusView(fileid, enc, False, True, False, self._sent_splitter)
for (fileid, enc) in self.abspaths(fileids, True)
]
)
def tagged_sents(self, fileids=None):
return concat(
[
ChasenCorpusView(fileid, enc, True, True, False, self._sent_splitter)
for (fileid, enc) in self.abspaths(fileids, True)
]
)
def paras(self, fileids=None):
return concat(
[
ChasenCorpusView(fileid, enc, False, True, True, self._sent_splitter)
for (fileid, enc) in self.abspaths(fileids, True)
]
)
def tagged_paras(self, fileids=None):
return concat(
[
ChasenCorpusView(fileid, enc, True, True, True, self._sent_splitter)
for (fileid, enc) in self.abspaths(fileids, True)
]
)
class ChasenCorpusView(StreamBackedCorpusView):
"""
A specialized corpus view for ChasenReader. Similar to ``TaggedCorpusView``,
but this'll use fixed sets of word and sentence tokenizer.
"""
def __init__(
self,
corpus_file,
encoding,
tagged,
group_by_sent,
group_by_para,
sent_splitter=None,
):
self._tagged = tagged
self._group_by_sent = group_by_sent
self._group_by_para = group_by_para
self._sent_splitter = sent_splitter
StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
def read_block(self, stream):
"""Reads one paragraph at a time."""
block = []
for para_str in read_regexp_block(stream, r".", r"^EOS\n"):
para = []
sent = []
for line in para_str.splitlines():
_eos = line.strip() == "EOS"
_cells = line.split("\t")
w = (_cells[0], "\t".join(_cells[1:]))
if not _eos:
sent.append(w)
if _eos or (self._sent_splitter and self._sent_splitter(w)):
if not self._tagged:
sent = [w for (w, t) in sent]
if self._group_by_sent:
para.append(sent)
else:
para.extend(sent)
sent = []
if len(sent) > 0:
if not self._tagged:
sent = [w for (w, t) in sent]
if self._group_by_sent:
para.append(sent)
else:
para.extend(sent)
if self._group_by_para:
block.append(para)
else:
block.extend(para)
return block
def demo():
import nltk
from nltk.corpus.util import LazyCorpusLoader
jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
print("/".join(jeita.words()[22100:22140]))
print(
"\nEOS\n".join(
"\n".join("{}/{}".format(w[0], w[1].split("\t")[2]) for w in sent)
for sent in jeita.tagged_sents()[2170:2173]
)
)
def test():
from nltk.corpus.util import LazyCorpusLoader
jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
assert isinstance(jeita.tagged_words()[0][1], str)
if __name__ == "__main__":
demo()
test()
|
{
"content_hash": "9e88aeefbcad9b2b137b023d6afcebf5",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 88,
"avg_line_length": 28.80921052631579,
"alnum_prop": 0.517698104590089,
"repo_name": "nltk/nltk",
"id": "c5b2c8cc7f9e5ed4c3b7a23e31e5f897849afa97",
"size": "4541",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "nltk/corpus/reader/chasen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "24786"
},
{
"name": "Jupyter Notebook",
"bytes": "55608"
},
{
"name": "Makefile",
"bytes": "7983"
},
{
"name": "Python",
"bytes": "4831858"
},
{
"name": "Shell",
"bytes": "10877"
}
],
"symlink_target": ""
}
|
from yambopy.tools.duck import isstring
import yamboparser
class YamboFile(yamboparser.YamboFile):
"""
Wrapper around the Yambofile class of yamboparser
"""
def from_dict(filename):
"""
intialize from a dicitonary
"""
pass
def from_file(filename):
"""
Read a file and find what type it is
"""
#detect the type
#store the data
pass
def write_json(self,filename):
"""
Write a json file with the data for this file
"""
pass
@staticmethod
def has_tag(filename,tags):
"""check if the filename has a tag in its name"""
if isstring(tags):
tags = (tags,)
return any([tag in filename for tag in tags])
@staticmethod
def is_output(filename):
"""check if the file is output file"""
return filename.startswith('o.') or filename.startswith('o-')
@staticmethod
def is_log(filename):
"""check if the file is log file"""
return filename.startswith('l.') or filename.startswith('l-')
@staticmethod
def is_report(filename):
"""check if the file is report file"""
return filename.startswith('r-')
class YamboGW(YamboFile):
"""
Provide functions specific of GW calculations
"""
def plot_gw(self):
pass
class YamboEPS(YamboFile):
"""
Provide functions specific of BSE calculations
"""
def plot_eps(self):
pass
|
{
"content_hash": "5653e5d8cabe5d888936b095f952c56a",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 69,
"avg_line_length": 24.047619047619047,
"alnum_prop": 0.5801980198019802,
"repo_name": "alexmoratalla/yambopy",
"id": "88da78e1c55a3a5ade5eb4c415d5f6fdb8e710ec",
"size": "1515",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "yambopy/io/yambofile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "887890"
},
{
"name": "Shell",
"bytes": "1296"
}
],
"symlink_target": ""
}
|
"""Generators
It's head-exploding time!
We recently wrote a function that, given lines
with dates and measurements, splits them up,
makes sure they only move forward, and prints them
out.
Printing is nice, but not impressively useful or
exciting. We want to _transform_ this data
(eventually into a chart), not just output it.
Instead of printing, the function can be made more
generally useful by returning a list. In fact,
that's what we've done here. Take a look and see
what it's doing. Keep in mind that in order to do
this, even though the file is read incrementally,
and the consumer may only need things one at a
time, the entire dataset must be in memory for
this to work.
Let's fix that using one of Python's more powerful
and elegant constructs: the **iterator generator**.
By placing a "yield" keyword in the function, the
function is changed to not merely return a single
value, but to return an _iterable_ that can
produce _all yielded values_ one at a time, when
asked. Recall that |for| loops work with
iterables, as does the |list| builtin.
Exercises
- Replace the code as described in the TODO
sections and see how it works (and notice that
we changed the name of the function to reflect
what it returns).
- Write a |for| loop in the main code (replace the
use of |_testmod| if you want) that
outputs the result of |parsed_measurements(...)|
with some lines of your own.
"""
__doc__ = """Some notes on 'parsed_measurements'.
This passes right now. Your job is to convert the
function to a generator and keep it passing.
>>> list(parsed_measurements(['2012-10-10 5.4',
... '2012-10-11 5.3']))
[('2012-10-10', '5.4'), ('2012-10-11', '5.3')]
"""
def parsed_measurements(lines):
# TODO:
# Remove this values list. Just kill it.
values = []
last_date = "" # less than all other strings
for line in lines:
line = line.strip()
if not line or line.startswith('#'):
continue
date, measurement = line.split()
if date <= last_date:
raise ValueError("Non-increasing: %s -> %s" % (last_date, date))
# TODO:
# Replace this line with
# yield date, measurement
# And remove the return statement completely.
# Then step back, run it, and see if you can figure
# out what is going on.
values.append((date, measurement))
return values
if __name__ == '__main__':
if _testmod().failed == 0:
print "Success!"
|
{
"content_hash": "9e5187f2b2e8d0aab7faceb8131ef83b",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 70,
"avg_line_length": 29.71951219512195,
"alnum_prop": 0.6918342224045958,
"repo_name": "shiblon/pytour",
"id": "d3dac9fdc0a9728b0b4946443fc4c1495d2f36a1",
"size": "2450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorials/generators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "195977"
},
{
"name": "HTML",
"bytes": "2110262"
},
{
"name": "JavaScript",
"bytes": "5106892"
},
{
"name": "Python",
"bytes": "15081380"
},
{
"name": "Shell",
"bytes": "1018"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from biz.idc.models import DataCenter, UserDataCenter
class DataCenterSerializer(serializers.ModelSerializer):
class Meta:
model = DataCenter
class UserDataCenterSerializer(serializers.ModelSerializer):
class Meta:
model = UserDataCenter
class DetailedUserDataCenterSerializer(serializers.ModelSerializer):
data_center = DataCenterSerializer(read_only=True)
class Meta:
model = UserDataCenter
fields = ['data_center', 'tenant_name', 'tenant_uuid', 'keystone_user']
|
{
"content_hash": "054becb1376bbc1624fd1ea94dab1b55",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 23.458333333333332,
"alnum_prop": 0.7442273534635879,
"repo_name": "zhanghui9700/eonboard",
"id": "eb8ea10a7768eb5af09597e78dec9c5942a29e31",
"size": "601",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "eoncloud_web/biz/idc/serializer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1099594"
},
{
"name": "HTML",
"bytes": "400059"
},
{
"name": "JavaScript",
"bytes": "523547"
},
{
"name": "Python",
"bytes": "649118"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
# coding=utf-8
# by BinSys <binsys@163.com>
# 支持 mtouch mtouch-64 mtouch.exe mandroid.exe 解包
# Readme
# 1. 将插件文件 MKBundleManager.py 放入 IDA Pro 的 plugins 目录
# 2. 用IDA打开待分析文件,等待分析完毕(左下角状态栏的 AU: idel)
# 3. IDA 菜单栏 点击 View -> Open subviews -> Bundled Assembly Manager
# 4. 在 Bundled Assembly Manager 窗口中可见程序集列表
# 5. 选择要修改的文件用右键菜单内 导出全部文件 或者 导出文件 命令导出到指定位置
# 6. 文件修改完毕后用右键菜单内 替换文件 命令 替换修改后的文件
# 7. 会在位于原程序所在目录内用原文件名+日期时间命名生成替换后的打包文件
# Note:
# 可能会存在问题,请看IDA的输出窗口获取详细出错信息
# .Net 程序集的修改可用 替换文件Radate .NET Reflector + Reflexil 插件
# 当修改后的文件被压缩后大于原始文件的压缩数据大小时无法替换,这时,请用Reflexil删除修改后的程序集的冗余IL指令,减少程序集大小
MKBundleManager_VERSION = "1.2"
# IDA libraries
import idaapi
import idautils
import idc
from idaapi import Form, Choose2, plugin_t
# Python modules
import io
import sys
import os
import shutil
import struct
import binascii
from datetime import datetime, date, time
import urllib2
import httplib
import zlib
import StringIO
import gzip
import traceback
from struct import *
InputFileType_EXE = 11
InputFileType_MachO = 25
InputFileType = -1
Is64Bit = False
string_type_map = {
0 : "ASCSTR_C", # C-string, zero terminated
1 : "ASCSTR_PASCAL", # Pascal-style ASCII string (length byte)
2 : "ASCSTR_LEN2", # Pascal-style, length is 2 bytes
3 : "ASCSTR_UNICODE", # Unicode string
4 : "ASCSTR_LEN4", # Delphi string, length is 4 bytes
5 : "ASCSTR_ULEN2", # Pascal-style Unicode, length is 2 bytes
6 : "ASCSTR_ULEN4", # Pascal-style Unicode, length is 4 bytes
}
filetype_t_map = {
0 : "f_EXE_old", # MS DOS EXE File
1 : "f_COM_old", # MS DOS COM File
2 : "f_BIN", # Binary File
3 : "f_DRV", # MS DOS Driver
4 : "f_WIN", # New Executable (NE)
5 : "f_HEX", # Intel Hex Object File
6 : "f_MEX", # MOS Technology Hex Object File
7 : "f_LX", # Linear Executable (LX)
8 : "f_LE", # Linear Executable (LE)
9 : "f_NLM", # Netware Loadable Module (NLM)
10 : "f_COFF", # Common Object File Format (COFF)
11 : "f_PE", # Portable Executable (PE)
12 : "f_OMF", # Object Module Format
13 : "f_SREC", # R-records
14 : "f_ZIP", # ZIP file (this file is never loaded to IDA database)
15 : "f_OMFLIB", # Library of OMF Modules
16 : "f_AR", # ar library
17 : "f_LOADER", # file is loaded using LOADER DLL
18 : "f_ELF", # Executable and Linkable Format (ELF)
19 : "f_W32RUN", # Watcom DOS32 Extender (W32RUN)
20 : "f_AOUT", # Linux a.out (AOUT)
21 : "f_PRC", # PalmPilot program file
22 : "f_EXE", # MS DOS EXE File
23 : "f_COM", # MS DOS COM File
24 : "f_AIXAR", # AIX ar library
25 : "f_MACHO", # Max OS X
}
class BundledAssembly():
def __init__(self):
self.Index = 0
self.FileItemStructOffset = 0
self.FileNameOffset = 0
self.FileName = ""
self.FileDataOffset = 0
self.FileSize = 0
self.FileSizeOffset = 0
self.FileCompressedSizeOffset = 0
self.FileCompressedSize = 0
self.IsGZip = ""
self.FileDataCompressed = ""
self.IsCompressed = True
self.IsME = False
pass
class MKBundleTool():
def __init__(self):
self.Is64Bit = False
self.InputFileType = -1
print("Input File:{}".format(GetInputFile()))
print("Input File Path:{}".format(GetInputFilePath()))
print("Idb File Path:{}".format(GetIdbPath()))
print("cpu_name:{}".format(idc.GetShortPrm(idc.INF_PROCNAME).lower()))
self.InputFileType = idc.GetShortPrm(idc.INF_FILETYPE)
#ida.hpp filetype_t f_PE=11 f_MACHO=25
print("InputFileType:{}".format(filetype_t_map.get(self.InputFileType, None)))
if self.InputFileType != InputFileType_EXE and self.InputFileType != InputFileType_MachO:
print "Error,Input file type must is PE or MachO!"
return
if (idc.GetShortPrm(idc.INF_LFLAGS) & idc.LFLG_64BIT) == idc.LFLG_64BIT:
self.Is64Bit = True
else:
self.Is64Bit = False
print("Is64Bit:{}".format(self.Is64Bit))
def GetBundledAssemblyList(self,UseScreenEA = False):
if not UseScreenEA:
StringEA = self.FindStringEA()
if StringEA == -1:
print "Can't find StringEA!"
return
Func = self.FindUnFunction(StringEA)
if not Func:
print "Can't find Func!"
return
FuncName = idc.GetFunctionName(Func.startEA)
print "Found Data Function:" + FuncName
BundledAssemblyListOffsetsVA = self.FindBundledAssemblyListOffsetsVA(Func.startEA)
if not BundledAssemblyListOffsetsVA:
print "Can't find BundledAssemblyListOffsetsVA!"
return
else:
BundledAssemblyListOffsetsVA = ScreenEA()
print("BundledAssemblyListOffsetsVA:0x{:016X}".format(BundledAssemblyListOffsetsVA))
#StructOffsetList = self.GetStructOffsetList(BundledAssemblyListOffsetsVA)
BundledAssemblyListOffsetList = self.GetBundledAssemblyListOffsetList(BundledAssemblyListOffsetsVA)
if len(BundledAssemblyListOffsetList) == 0:
print "Can't find BundledAssemblyListOffsetList!"
return
if len(BundledAssemblyListOffsetList) > 2:
difflen = BundledAssemblyListOffsetList[1] - BundledAssemblyListOffsetList[0]
else:
difflen = 0
return None
if difflen == 16 or difflen == 32 or difflen == -16 or difflen == -32:
IsCompressed = True
else:
IsCompressed = False
print "IsCompressed:{}".format(IsCompressed)
#return None
#print BundledAssemblyListOffsetList
BundledAssemblys = []
BundledAssemblyItemIndex = 0
for BundledAssemblyListOffset in BundledAssemblyListOffsetList:
BundledAssemblyItem = self.MakeBundledAssemblyStruct(BundledAssemblyListOffset, IsCompressed)
BundledAssemblyItem.Index = BundledAssemblyItemIndex
BundledAssemblys.append(BundledAssemblyItem)
BundledAssemblyItemIndex += 1
#print BundledAssemblys
return BundledAssemblys
def FindStringEA(self):
searchstr = str("mkbundle: Error %d decompressing data for %s\n")
searchstr2 = str("Error %d decompresing data for %s\n")
#Do not use default set up, we'll call setup().
s = idautils.Strings(default_setup = False)
# we want C & Unicode strings, and *only* existing strings.
s.setup(strtypes=Strings.STR_C | Strings.STR_UNICODE, ignore_instructions = True, display_only_existing_strings = True)
#loop through strings
for i, v in enumerate(s):
if not v:
#print("Failed to retrieve string at index {}".format(i))
return -1
else:
#print("[{}] ea: {:#x} ; length: {}; type: {}; '{}'".format(i, v.ea,
#v.length, string_type_map.get(v.type, None), str(v)))
if str(v) == searchstr or str(v) == searchstr2:
return v.ea
return -1
def FindUnFunction(self, StringEA):
for ref in DataRefsTo(StringEA):
f = idaapi.get_func(ref)
if f:
return f
return None
def FindBundledAssemblyListOffsetsVA(self, FuncEA):
for funcitem in FuncItems(FuncEA):
#print hex(funcitem)
for dataref in DataRefsFrom(funcitem):
return dataref
#print " " + hex(dataref)
return None
def GetBundledAssemblyListOffsetList(self, DataOffset):
if self.Is64Bit == True:
addv = 8
mf = MakeQword
vf = Qword
else:
mf = MakeDword
addv = 4
vf = Dword
AsmListStructListOffset = DataOffset
currentoffset = AsmListStructListOffset
mf(currentoffset)
currentvalue = vf(currentoffset)
currentoffset+=addv
AsmListStructListOffsetList = []
AsmListStructListOffsetList.append(currentvalue)
while currentvalue != 0:
mf(currentoffset)
currentvalue = vf(currentoffset)
if currentvalue != 0:
AsmListStructListOffsetList.append(currentvalue)
currentoffset+=addv
#print len(AsmListStructListOffsetList)
#for vv in AsmListStructListOffsetList:
#print hex(vv)
return AsmListStructListOffsetList
def MakeBundledAssemblyStruct(self, FileItemStructOffset, IsCompressed = True):
if self.Is64Bit == True:
addv = 8
mf = MakeQword
vf = Qword
else:
mf = MakeDword
addv = 4
vf = Dword
offset = FileItemStructOffset
mf(offset)
FileNameOffset = vf(offset)
FileName = idc.GetString(FileNameOffset)
offset+=addv
mf(offset)
FileDataOffset = vf(offset)
offset+=addv
mf(offset)
FileSize = vf(offset)
FileSizeOffset = offset
offset+=addv
if IsCompressed:
IsME = "N/A"
mf(offset)
FileCompressedSize = vf(offset)
FileCompressedSizeOffset = offset
offset+=addv
IsGZip = ""
FileDataCompressed = idc.GetManyBytes(FileDataOffset,3)
b1,b2,b3 = struct.unpack('ccc', FileDataCompressed[0:3])
if b1 == '\x1f' and b2 == '\x8b' and b3 == '\x08':
IsGZip = "Y"
else:
IsGZip = "N"
else:
IsME = ""
IsGZip = "N/A"
FileDataCompressed = idc.GetManyBytes(FileDataOffset,2)
b1,b2 = struct.unpack('cc', FileDataCompressed[0:2])
if b1 == '\x4d' and b2 == '\x45':
IsME = "Y"
else:
IsME = "N"
ba = BundledAssembly()
ba.FileItemStructOffset = FileItemStructOffset
ba.FileNameOffset = FileNameOffset
ba.FileName = FileName
ba.FileDataOffset = FileDataOffset
ba.FileSize = FileSize
ba.FileSizeOffset = FileSizeOffset
if IsCompressed:
ba.FileCompressedSizeOffset = FileCompressedSizeOffset
ba.FileCompressedSize = FileCompressedSize
ba.IsGZip = IsGZip
ba.IsME = IsME
if IsCompressed:
ba.IsCompressed = "Y"
else:
ba.IsCompressed = "N"
#ba.FileDataCompressed = FileDataCompressed
return ba
#return {\
# "FileItemStructOffset":FileItemStructOffset, \
# "FileNameOffset":FileNameOffset,\
# "FileName":FileName,\
# "FileDataOffset":FileDataOffset,\
# "FileSize":FileSize,\
# "FileSizeOffset":FileSizeOffset,\
# "FileCompressedSizeOffset":FileCompressedSizeOffset,\
# "FileCompressedSize":FileCompressedSize,\
# "IsGZip":IsGZip,\
# "FileDataCompressed":FileDataCompressed\
# }
#Python语言: Python Cookbook: 比系统自带的更加友好的makedir函数
#from: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/82465
def _mkdir(self, newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
_mkdir(head)
#print "_mkdir %s" % repr(newdir)
if tail:
os.mkdir(newdir)
def DecompressZLib(self, Data,Path):
#compressedstream = StringIO.StringIO(Data)
data2 = zlib.decompress(Data)
f = open(Path, 'wb')
f.write(data2)
f.close()
def DecompressGzipTo(self, Data,Path):
compressedstream = StringIO.StringIO(Data)
gziper = gzip.GzipFile(fileobj=compressedstream)
data2 = gziper.read() # 读取解压缩后数据
f = open(Path, 'wb')
f.write(data2)
f.close()
def DecompressFileTo(self, FileItem,OutputDir):
if FileItem.IsME == "Y":
extname = ".ME"
else:
extname = ""
newpath = '{}\\{}{}'.format(OutputDir, FileItem.FileName, extname)
if FileItem.IsCompressed == "Y":
FileDataCompressed = idc.GetManyBytes(FileItem.FileDataOffset,FileItem.FileCompressedSize)
if FileItem.IsGZip == "Y":
self.DecompressGzipTo(FileDataCompressed,newpath)
else:
self.DecompressZLib(FileDataCompressed,newpath)
else:
FileData = idc.GetManyBytes(FileItem.FileDataOffset,FileItem.FileSize)
f = open(newpath, 'wb')
f.write(FileData)
f.close()
pass
def CompressGzipToData(self, data):
buf = StringIO.StringIO()
f = gzip.GzipFile(mode='wb',compresslevel=9,fileobj=buf)
try:
f.write(data)
finally:
f.close()
compresseddata = buf.getvalue()
return compresseddata
def CompressZLibToData(self, data):
compressobj = zlib.compressobj(zlib.Z_BEST_COMPRESSION, zlib.DEFLATED, zlib.MAX_WBITS, 8 ,zlib.Z_DEFAULT_STRATEGY)
data2 = compressobj.compress(data)
data2 += compressobj.flush()
return data2
def ReplaceFile(self, FileItem,BundleFilePath,NewFilePath):
print "Start replace file: {}".format(NewFilePath)
if not os.path.exists(BundleFilePath):
print "BundleFilePath error!"
return
if not os.path.isfile(NewFilePath):
print "NewFilePath error!"
return
f = open(NewFilePath, 'rb')
NewFileData = f.read()
f.close()
if FileItem.IsCompressed == "Y":
if FileItem.IsGZip == "Y":
compresseddata = self.CompressGzipToData(NewFileData)
else:
compresseddata = self.CompressZLibToData(NewFileData)
sizediff = FileItem.FileCompressedSize - len(compresseddata)
print "FileCompressedSize - compresseddata = 0x{:016X} - 0x{:016X} = 0x{:016X}".format(FileItem.FileCompressedSize,len(compresseddata),sizediff)
if sizediff < 0:
print "FileCompressedSize < compresseddata,can't replace!"
return
else:
compresseddata = NewFileData
sizediff = FileItem.FileSize - len(compresseddata)
print "FileSize - compresseddata = 0x{:016X} - 0x{:016X} = 0x{:016X}".format(FileItem.FileSize,len(compresseddata),sizediff)
if sizediff < 0:
print "FileSize < compresseddata,can't replace!"
return
FileSizeOffset = idaapi.get_fileregion_offset(FileItem.FileSizeOffset)
if FileItem.IsCompressed == "Y":
FileCompressedSizeOffset = idaapi.get_fileregion_offset(FileItem.FileCompressedSizeOffset)
FileDataOffset = idaapi.get_fileregion_offset(FileItem.FileDataOffset)
#ea = idaapi.get_fileregion_ea(offset)
NewFileDataSize = len(NewFileData)
if FileItem.IsCompressed == "Y":
NewCompressedDataSize = len(compresseddata)
print "FileSizeOffset = 0x{:016X},FileCompressedSizeOffset = 0x{:016X},FileDataOffset = 0x{:016X}".format(FileSizeOffset,FileCompressedSizeOffset,FileDataOffset)
else:
print "FileSizeOffset = 0x{:016X},FileDataOffset = 0x{:016X}".format(FileSizeOffset,FileDataOffset)
input_file_dir = os.path.dirname(BundleFilePath)
input_file_fullname = os.path.basename(BundleFilePath)
input_file_name,input_file_extname = os.path.splitext(input_file_fullname)
#分离扩展名:os.path.splitext(r"c:\python\hello.py") --> ("c:\\python\\hello",
#".py")
output_file_fullname = '{}_{:%Y%m%d%H%M%S%f}{}'.format(input_file_name, datetime.now(),input_file_extname)
output_file_fullpath = os.path.join(input_file_dir,output_file_fullname)
#shutil.copy(BundleFilePath, output_file_fullpath)
print "new BundleFilePath path:{}".format(output_file_fullpath)
fp = open(BundleFilePath,"rb")
data = fp.read() #读出文件内容
fp.close()
if self.Is64Bit == True:
NewFileDataSizeData = struct.pack("q",NewFileDataSize)
if FileItem.IsCompressed == "Y":
NewCompressedDataSizeData = struct.pack("q",NewCompressedDataSize)
else:
NewFileDataSizeData = struct.pack("l",NewFileDataSize)
if FileItem.IsCompressed == "Y":
NewCompressedDataSizeData = struct.pack("l",NewCompressedDataSize)
#data 不可更改元素,是固定的,必须转成可对元素操作的list
data = list(data)
#try:
# data[0] = 'a'
# data[1] = 'a'
# data[2] = 'a'
#except Exception,e:
# print Exception,":",e
#range(m, n)这里,range()函数产生的是一个从 m至n-1的整数列表
#update FileSize
for i in range(0, len(NewFileDataSizeData)):
data[FileSizeOffset + i] = NewFileDataSizeData[i]
##update FileCompressedSize
if FileItem.IsCompressed == "Y":
for i in range(0, len(NewCompressedDataSizeData)):
data[FileCompressedSizeOffset + i] = NewCompressedDataSizeData[i]
##clear FileData
if FileItem.IsCompressed == "Y":
for i in range(0, FileItem.FileCompressedSize):
data[FileDataOffset + i] = chr(0x0)
pass
else:
for i in range(0, FileItem.FileSize):
data[FileDataOffset + i] = chr(0x0)
pass
##update FileData
for i in range(0, len(compresseddata)):
data[FileDataOffset + i] = compresseddata[i]
pass
fp2 = open(output_file_fullpath,"wb")
#把data的list转为str并写入文件
fp2.write(''.join(data))#重写
fp2.close()
print "replace ok!"
class ReplaceFileForm(Form):
def __init__(self,bundleFile,impFile):
Form.__init__(self,
r"""BUTTON YES* 替换
BUTTON CANCEL 取消
请选择文件(文件压缩后数据大小必须小于替换前压缩后数据大小)
{FormChangeCb}
<##选择被打包的文件:{bundleFile}>
<##选择修改后的文件:{impFile}>
""".decode('utf-8').encode(sys.getfilesystemencoding()), { 'bundleFile': Form.FileInput(open=True,value=bundleFile),
'impFile': Form.FileInput(open=True,value=impFile),
'FormChangeCb': Form.FormChangeCb(self.OnFormChange),
})
self.Compile()
def OnFormChange(self, fid):
# Form initialization
if fid == -1:
self.SetFocusedField(self.bundleFile)
self.EnableField(self.bundleFile, True)
self.EnableField(self.impFile, True)
# Form OK pressed
elif fid == -2:
pass
elif fid == self.bundleFile.id:
pass
return 1
class SaveItemsToDirForm(Form):
def __init__(self,defaultpath):
Form.__init__(self,
r"""BUTTON YES* 保存
BUTTON CANCEL 取消
请选择输出目录
{FormChangeCb}
<##输出目录:{impFile}>
""".decode('utf-8').encode(sys.getfilesystemencoding()), {
'impFile': Form.DirInput(value=defaultpath),
'FormChangeCb': Form.FormChangeCb(self.OnFormChange),
})
self.Compile()
def OnFormChange(self, fid):
# Form initialization
if fid == -1:
self.SetFocusedField(self.impFile)
self.EnableField(self.impFile, True)
# Form OK pressed
elif fid == -2:
pass
return 1
class BundledAssemblyManagerView(Choose2):
def __init__(self):
Choose2.__init__(self,
"Bundled Assembly Manager",
[
["Index", 6 | Choose2.CHCOL_DEC],
["FileItemStructOffset", 18 | Choose2.CHCOL_HEX],
["FileNameOffset", 18 | Choose2.CHCOL_HEX],
["FileDataOffset", 18 | Choose2.CHCOL_HEX],
["FileSize", 18 | Choose2.CHCOL_HEX],
["FileSizeOffset", 18 | Choose2.CHCOL_HEX],
["FileCompressedSizeOffset", 18 | Choose2.CHCOL_HEX],
["FileCompressedSize", 18 | Choose2.CHCOL_HEX],
["IsCompressed", 4 | Choose2.CHCOL_PLAIN],
["IsGZip", 4 | Choose2.CHCOL_PLAIN],
["IsME", 4 | Choose2.CHCOL_PLAIN],
["FileName", 18 | Choose2.CHCOL_PLAIN]
])
#self.popup_names = ["Insert", "Delete", "Edit", "Refresh"]
self.icon = 47
self.tool = None
self.items = []
self.items_data = []
# Command callbacks
self.cmd_Items_SaveAs = None
self.cmd_Item_SaveAs = None
self.cmd_Item_ReplaceBy = None
def show(self):
try:
# Initialize/Refresh the view
if self.refreshitems():
if self.Show() < 0: return False
#self.Refresh()
except:
traceback.print_exc()
# Attempt to open the view
if self.cmd_Items_SaveAs == None:
self.cmd_Items_SaveAs = self.AddCommand("导出全部文件...".decode('utf-8').encode(sys.getfilesystemencoding()), flags = idaapi.CHOOSER_POPUP_MENU | idaapi.CHOOSER_NO_SELECTION, icon=139)
if self.cmd_Item_SaveAs == None:
self.cmd_Item_SaveAs = self.AddCommand("导出文件...".decode('utf-8').encode(sys.getfilesystemencoding()), flags = idaapi.CHOOSER_POPUP_MENU, icon=139)
if self.cmd_Item_ReplaceBy == None:
self.cmd_Item_ReplaceBy = self.AddCommand("替换文件...".decode('utf-8').encode(sys.getfilesystemencoding()), flags = idaapi.CHOOSER_POPUP_MENU, icon=139)
return True
def refreshitems(self):
#print "refreshitems"
self.items_data = []
self.items = []
try:
#-1:cancel,0-no,1-ok
UseScreenEAInt = idc.AskYN(1,"是否自动获取数据位置?(否则使用ScreenEA)".decode('utf-8').encode(sys.getfilesystemencoding()))
if UseScreenEAInt == -1:
UseScreenEAInt = 1
if UseScreenEAInt == 1:
UseScreenEA = False
else:
UseScreenEA = True
print "UseScreenEA:{}".format(UseScreenEA)
self.tool = MKBundleTool()
asms = self.tool.GetBundledAssemblyList(UseScreenEA)
if not asms:
return False
for BundledAssemblyItem in asms:
#print BundledAssemblyItem
#print("FileItemStructOffset:{:016X} FileNameOffset:{:016X}
#FileDataOffset:{:016X} FileSize:{:016X} FileCompressedSize:{:016X}
#IsGZip:{} FileName:{}"\
#.format( \
#BundledAssemblyItem.FileItemStructOffset , \
#BundledAssemblyItem.FileNameOffset,\
#BundledAssemblyItem.FileDataOffset,\
#BundledAssemblyItem.FileSize,\
#BundledAssemblyItem.FileCompressedSize,\
#BundledAssemblyItem.IsGZip,\
#BundledAssemblyItem.FileName))
if self.tool.Is64Bit:
fstr = "0x%016X"
else:
fstr = "0x%08X"
self.items_data.append(BundledAssemblyItem)
self.items.append(["%d" % BundledAssemblyItem.Index,
fstr % BundledAssemblyItem.FileItemStructOffset,
fstr % BundledAssemblyItem.FileNameOffset,
fstr % BundledAssemblyItem.FileDataOffset,
fstr % BundledAssemblyItem.FileSize,
fstr % BundledAssemblyItem.FileSizeOffset,
fstr % BundledAssemblyItem.FileCompressedSizeOffset,
fstr % BundledAssemblyItem.FileCompressedSize,
BundledAssemblyItem.IsCompressed,
BundledAssemblyItem.IsGZip,
BundledAssemblyItem.IsME,
BundledAssemblyItem.FileName])
return True
except:
traceback.print_exc()
return False
def OnCommand(self, n, cmd_id):
if self.tool == None:
return 1;
if cmd_id == self.cmd_Items_SaveAs:
OutputDir = '{}_{:%Y%m%d%H%M%S%f}'.format(GetInputFilePath(), datetime.now())
f = SaveItemsToDirForm(OutputDir)
# Execute the form
ok = f.Execute()
if ok == 1:
try:
imp_file = f.impFile.value
self.tool._mkdir(imp_file)
for item in self.items_data:
self.tool.DecompressFileTo(item,OutputDir)
except:
traceback.print_exc()
# Dispose the form
f.Free()
elif cmd_id == self.cmd_Item_SaveAs:
item = self.items_data[n]
OutputDir = '{}_{:%Y%m%d%H%M%S%f}'.format(GetInputFilePath(), datetime.now())
f = SaveItemsToDirForm(OutputDir)
# Execute the form
ok = f.Execute()
if ok == 1:
try:
imp_file = f.impFile.value
self.tool._mkdir(imp_file)
self.tool.DecompressFileTo(item,OutputDir)
except:
traceback.print_exc()
# Dispose the form
f.Free()
elif cmd_id == self.cmd_Item_ReplaceBy:
item = self.items_data[n]
f = ReplaceFileForm(GetInputFilePath(),item.FileName)
# Execute the form
ok = f.Execute()
if ok == 1:
try:
self.tool.ReplaceFile(item,f.bundleFile.value,f.impFile.value)
except:
traceback.print_exc()
f.Free()
pass
return 1
def OnClose(self):
self.cmd_Items_SaveAs = None
self.cmd_Item_SaveAs = None
self.cmd_Item_ReplaceBy = None
def OnSelectLine(self, n):
idaapi.jumpto(self.items_data[n].FileItemStructOffset)
pass
def OnGetLine(self, n):
return self.items[n]
def OnGetIcon(self, n):
# Empty list
if not len(self.items) > 0:
return -1
#return -1
return 137
def OnGetSize(self):
return len(self.items)
def OnRefresh(self, n):
#print "OnRefresh"
self.refreshitems()
return n
def OnActivate(self):
#print "OnActivate"
self.refreshitems()
class MKBundleManager():
""" Class that manages GUI forms and MKBundleManager methods of the plugin. """
def __init__(self):
self.addmenu_item_ctxs = list()
self.bundledAssemblyManagerView = None
#self.bundledAssemblyManagerView = BundledAssemblyManagerView()
#--------------------------------------------------------------------------
# Menu Items
#--------------------------------------------------------------------------
def add_menu_item_helper(self, menupath, name, hotkey, flags, pyfunc, args):
# add menu item and report on errors
addmenu_item_ctx = idaapi.add_menu_item(menupath, name, hotkey, flags, pyfunc, args)
if addmenu_item_ctx is None:
return 1
else:
self.addmenu_item_ctxs.append(addmenu_item_ctx)
return 0
def add_menu_items(self):
if self.add_menu_item_helper("View/Open subviews/Problems", "Bundled Assembly Manager", "", 1, self.Show_BundledAssemblyManagerView, None): return 1
return 0
def del_menu_items(self):
for addmenu_item_ctx in self.addmenu_item_ctxs:
idaapi.del_menu_item(addmenu_item_ctx)
#--------------------------------------------------------------------------
# View Callbacks
#--------------------------------------------------------------------------
# BundledAssemblyManagerView View
def Show_BundledAssemblyManagerView(self):
try:
if not self.bundledAssemblyManagerView == None:
self.bundledAssemblyManagerView.Close()
self.bundledAssemblyManagerView = None
self.bundledAssemblyManagerView = BundledAssemblyManagerView()
self.bundledAssemblyManagerView.show()
except:
traceback.print_exc()
#--------------------------------------------------------------------------
# Plugin
#--------------------------------------------------------------------------
class MKBundleManager_t(plugin_t):
flags = idaapi.PLUGIN_UNL
comment = "MK Bundle Manager."
help = "MK Bundle Manager.."
wanted_name = "MK Bundle Manager"
wanted_hotkey = ""
def init(self):
global MKBundleManagerInstance
# Check if already initialized
if not 'MKBundleManagerInstance' in globals():
MKBundleManagerInstance = MKBundleManager()
if MKBundleManagerInstance.add_menu_items():
print "Failed to initialize MK Bundle Manager."
MKBundleManagerInstance.del_menu_items()
del MKBundleManagerInstance
return idaapi.PLUGIN_SKIP
else:
print("Initialized MKBundleManager v%s (c) BinSys <binsys@163.com>" % MKBundleManager_VERSION)
return idaapi.PLUGIN_KEEP
def run(self, arg):
global MKBundleManagerInstance
idc.Wait()
MKBundleManagerInstance.Show_BundledAssemblyManagerView()
def term(self):
pass
def PLUGIN_ENTRY():
return MKBundleManager_t()
#--------------------------------------------------------------------------
# Script / Testing
#--------------------------------------------------------------------------
def MKBundleManager_main():
global MKBundleManagerInstance
if 'MKBundleManagerInstance' in globals():
MKBundleManagerInstance.del_menu_items()
del MKBundleManagerInstance
MKBundleManagerInstance = MKBundleManager()
MKBundleManagerInstance.add_menu_items()
MKBundleManagerInstance.Show_BundledAssemblyManagerView()
if __name__ == '__main__':
try:
#print("Initialized MKBundleManager %s (c) BinSys <binsys@163.com>" % MKBundleManager_VERSION)
#MKBundleManager_main() #for Developer only
pass
except:
traceback.print_exc()
pass
|
{
"content_hash": "0bc3e2701260f7e8c73e99172defa906",
"timestamp": "",
"source": "github",
"line_count": 1003,
"max_line_length": 182,
"avg_line_length": 27.60319042871386,
"alnum_prop": 0.6403236292711117,
"repo_name": "azraelrabbit/MKBundleManager",
"id": "7e6182e6158141294696eff66cf5bf287faa6a09",
"size": "28528",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "MKBundleManager/MKBundleManager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "28528"
}
],
"symlink_target": ""
}
|
from shared_schema_tenants.settings import get_setting
from shared_schema_tenants.helpers.tenant_json_field import TenantJSONFieldHelper
class TenantExtraDataHelper(TenantJSONFieldHelper):
def __init__(self, instance=None):
super(TenantExtraDataHelper, self).__init__(
instance_field_name='extra_data', instance=instance,
tenant_fields=get_setting('DEFAULT_TENANT_EXTRA_DATA_FIELDS'),
tenant_default_fields_values=get_setting('DEFAULT_TENANT_EXTRA_DATA'))
|
{
"content_hash": "4ef7a03bb3790f3c65107ecd0dc45546",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 82,
"avg_line_length": 42.333333333333336,
"alnum_prop": 0.7322834645669292,
"repo_name": "hugobessa/django-shared-schema-tenants",
"id": "3da3330e311ac31177bb1a6104f2141a7e30a528",
"size": "508",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "shared_schema_tenants/helpers/tenant_extra_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1653"
},
{
"name": "Python",
"bytes": "234851"
}
],
"symlink_target": ""
}
|
from PyQt4 import QtCore, QtGui
class SqChatWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(SqChatWidget, self).__init__(parent)
self.sendBtn = QtGui.QPushButton('Send', self)
self.sendBtn.resize(self.sendBtn.sizeHint())
self.settingsBtn = QtGui.QPushButton('Settings', self)
self.settingsBtn.resize(self.settingsBtn.sizeHint())
# Put the button in an hbox with a spacer
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.settingsBtn)
hbox.addStretch(1)
hbox.addWidget(self.sendBtn)
self.log = QtGui.QTextEdit(self)
self.log.setReadOnly(True)
self.message = QtGui.QTextEdit(self)
# Don't allow initial messages
self.disableMessageInput()
chatVbox = QtGui.QVBoxLayout()
chatVbox.addWidget(self.log)
chatVbox.addWidget(self.message)
chatVbox.addLayout(hbox)
#chatVbox.addLayout(mainVbox)
self.setLayout(chatVbox)
def clearLog(self):
self.log.clear()
def clearMessage(self):
self.message.clear()
def getMessage(self):
return str(self.message.toPlainText())
def addLogMessage(self, user, message):
text = "<b>"+user + " ></b> " + message
self.log.append(text)
self.log.append("")
def logError(self, message):
self.addLogNotification(message, "red")
def logNote(self, message):
self.addLogNotification(message, "blue")
def logCommand(self, message):
self.addLogNotification(message, "green")
def addLogNotification(self, notice, color="black"):
text = "<span style='color:" + color + "'>" + notice + "</span>"
self.log.append(text)
self.log.append("")
def disableMessageInput(self):
self.message.setReadOnly(True)
def enableMessageInput(self):
self.message.setReadOnly(False)
|
{
"content_hash": "5fe18caf8f7e68903b34cac66db7d2b7",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 72,
"avg_line_length": 28.214285714285715,
"alnum_prop": 0.6131645569620253,
"repo_name": "destinmoulton/squabble",
"id": "f5cada3dbb26aa00433e5b96bcfd8f9c76a011f1",
"size": "1975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonclient/squabblegui/sqchatwidget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "28"
},
{
"name": "JavaScript",
"bytes": "5124"
},
{
"name": "Python",
"bytes": "1872727"
},
{
"name": "Shell",
"bytes": "3785"
}
],
"symlink_target": ""
}
|
import struct
from yabgp.message.attribute import Attribute
from yabgp.message.attribute import AttributeID
from yabgp.message.attribute import AttributeFlag
from yabgp.common import exception as excep
from yabgp.common import constants as bgp_cons
class Community(Attribute):
"""
COMMUNITIES path attribute is an optional
transitive attribute of variable length. The attribute consists of a
set of four octet values, each of which specify a community. All
routes with this attribute belong to the communities listed in the
attribute.
The COMMUNITIES attribute has Type Code 8.
http://www.iana.org/assignments/bgp-well-known-communities/bgp-well-known-communities.xml
"""
ID = AttributeID.COMMUNITY
FLAG = AttributeFlag.OPTIONAL + AttributeFlag.TRANSITIVE
@classmethod
def parse(cls, value):
"""
parse BGP community.
:param value:
"""
community = []
if value:
try:
length = len(value) / 2
value_list = list(struct.unpack('!%dH' % length, value))
while value_list:
value_type = value_list[0] * 16 * 16 * 16 * 16 + value_list[1]
if value_type in bgp_cons.WELL_KNOW_COMMUNITY_INT_2_STR:
community.append(bgp_cons.WELL_KNOW_COMMUNITY_INT_2_STR[value_type])
else:
community.append("%s:%s" % (value_list[0], value_list[1]))
value_list = value_list[2:]
except Exception:
raise excep.UpdateMessageError(
sub_error=bgp_cons.ERR_MSG_UPDATE_ATTR_LEN,
data=value)
return community
@classmethod
def construct(cls, value):
"""
construct a COMMUNITY path attribute
:param value:
"""
community_hex = b''
for community in value:
if community.upper() in bgp_cons.WELL_KNOW_COMMUNITY_STR_2_INT:
value = bgp_cons.WELL_KNOW_COMMUNITY_STR_2_INT[community.upper()]
community_hex += struct.pack('!I', value)
else:
value = community.split(':')
value = int(value[0]) * 16 * 16 * 16 * 16 + int(value[1])
community_hex += struct.pack('!I', value)
return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) \
+ struct.pack('!B', len(community_hex)) + community_hex
|
{
"content_hash": "f98099cc5e6ad12b295577d1804c8e05",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 97,
"avg_line_length": 38.50769230769231,
"alnum_prop": 0.5813024370755094,
"repo_name": "abn/yabgp",
"id": "14ca60ac0ce6af5d8eac00866f122a00c8993aed",
"size": "3135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yabgp/message/attribute/community.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "249675"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import gettext_lazy as _
from .models import User, CocRecord
from .forms import AdminUserChangeForm, UserCreationForm
@admin.register(User)
class UserAdmin(UserAdmin):
fieldsets = (
(
None,
{'fields': ('email', 'password')}
),
(
_('Personal info'),
{
'fields': (
'speaker_name', 'bio', 'photo',
'twitter_id', 'github_id', 'facebook_profile_url',
),
},
),
(
_('Permissions'),
{
'fields': (
'verified', 'is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions',
),
},
),
(
_('Important dates'),
{'fields': ('last_login', 'date_joined')},
),
)
add_fieldsets = (
(
None, {
'classes': ('wide',),
'fields': (
'email', 'password1', 'password2',
'speaker_name', 'bio', 'verified',
),
},
),
)
form = AdminUserChangeForm
add_form = UserCreationForm
list_display = ('email', 'is_staff', 'as_hash')
list_filter = (
'verified', 'is_active', 'is_staff', 'is_superuser',
'groups',
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ('groups', 'user_permissions',)
@admin.register(CocRecord)
class CocRecordAdmin(admin.ModelAdmin):
list_display = ('user', 'coc_version', 'agreed_at', )
list_filter = ('coc_version', )
raw_id_fields = ('user', )
|
{
"content_hash": "1ac445169ed999eaf799a79a40da83c6",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 72,
"avg_line_length": 26.26086956521739,
"alnum_prop": 0.46467991169977924,
"repo_name": "pycontw/pycontw2016",
"id": "e663a2b0cd271900b74288f59813be52fb0fb908",
"size": "1812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/users/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "236762"
},
{
"name": "HTML",
"bytes": "605550"
},
{
"name": "JavaScript",
"bytes": "24923"
},
{
"name": "Python",
"bytes": "479686"
},
{
"name": "Shell",
"bytes": "389"
}
],
"symlink_target": ""
}
|
import os
import json
import numpy as np
from pprint import pprint
from sys import argv
#args : SUBJECT_DIR, ${overlapFlag}, JSONTABLE filename, number_ROIS
subject_dir = argv[1]
overlapName = argv[2]
jsonFile = argv[3]
nb_ROIS = argv[4]
DIR_Surfaces = os.path.join(subject_dir, 'OutputSurfaces' + overlapName, 'labelSurfaces')
#Open Json file and parse
with open(jsonFile) as data_file:
data = json.load(data_file)
#Create file for seedList
seedPath = subject_dir + '/seeds.txt'
seedList = open(seedPath, 'w')
#Put all MatrixRow to -1
for seed in data:
seed['MatrixRow']=-1
seedID = 0
#For each file in DIR
# for i in range(int(nb_ROIS)):
# numberStr = str(i+1)
# file = DIR_Surfaces + numberStr + ".asc"
# val = os.path.isfile(file)
# if (val == True):
# #Write in seedList Path
# seedList.write(file + "\n")
# seedID = seedID + 1
#Update JSON file : 'MatrixRow'
for j in data:
filename = os.path.join(DIR_Surfaces, str(j["AAL_ID"]) + ".asc")
if(os.path.isfile(filename)):
j['MatrixRow'] = seedID
seedID = seedID + 1
seedList.write(filename + "\n")
seedList.close()
#Update JSON file
with open(jsonFile, 'w') as txtfile:
json.dump(data, txtfile, indent = 2)
|
{
"content_hash": "e9800a5ae5aaf42f1959cc15209d6886",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 89,
"avg_line_length": 25.53061224489796,
"alnum_prop": 0.6514788169464428,
"repo_name": "NIRALUser/CIVILITY",
"id": "c6a4bd147f31748a3e3b565f174b1b95b44391d7",
"size": "1251",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/civility-tractography/scripts/writeSeedList.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12590"
},
{
"name": "HTML",
"bytes": "34045"
},
{
"name": "JavaScript",
"bytes": "118585"
},
{
"name": "Python",
"bytes": "20599"
},
{
"name": "Shell",
"bytes": "8424"
}
],
"symlink_target": ""
}
|
import importlib
import os
from pathlib import Path
import pytest
def test_testing_dbs(tmp_path_factory: pytest.TempPathFactory):
tmp_path = tmp_path_factory.mktemp("data")
cwd = os.getcwd()
os.chdir(tmp_path)
test_db = Path("./test.db")
if test_db.is_file(): # pragma: nocover
test_db.unlink()
# Import while creating the client to create the DB after starting the test session
from docs_src.sql_databases.sql_app.tests import test_sql_app
# Ensure import side effects are re-executed
importlib.reload(test_sql_app)
test_sql_app.test_create_user()
if test_db.is_file(): # pragma: nocover
test_db.unlink()
os.chdir(cwd)
|
{
"content_hash": "781c8fb132e4e5a40cce650199848da1",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 87,
"avg_line_length": 30,
"alnum_prop": 0.6826086956521739,
"repo_name": "tiangolo/fastapi",
"id": "6f667dea03ce191393602b07ed5049d53619f66e",
"size": "690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tutorial/test_sql_databases/test_testing_databases.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "187"
},
{
"name": "Python",
"bytes": "1928986"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
}
|
"""Switch between depending on pyglib.app or an OSS replacement."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-import-not-at-top
# pylint: disable=wildcard-import
from . import control_imports
if control_imports.USE_OSS and control_imports.OSS_APP:
from tensorflow.python.platform.default._app import *
else:
from tensorflow.python.platform.google._app import *
# Import 'flags' into this module
from tensorflow.python.platform import flags # pylint: disable=unused-import
|
{
"content_hash": "3d2bc40bd7763e9d7448732592ec7130",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 77,
"avg_line_length": 37.733333333333334,
"alnum_prop": 0.773851590106007,
"repo_name": "RyanYoung25/tensorflow",
"id": "4bc45464b5d4176a0c219c89531e3bc2e59d3bba",
"size": "1244",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/python/platform/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "151630"
},
{
"name": "C++",
"bytes": "6922849"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "657597"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "16175"
},
{
"name": "Jupyter Notebook",
"bytes": "777942"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "89536"
},
{
"name": "Python",
"bytes": "3835693"
},
{
"name": "Shell",
"bytes": "66697"
},
{
"name": "TypeScript",
"bytes": "329009"
}
],
"symlink_target": ""
}
|
'''
This modules provides a method to parse an ISO 8601:2004 date string to a
python datetime.date instance.
It supports all basic, extended and expanded formats as described in the ISO
standard. The only limitations it has, are given by the Python datetime.date
implementation, which does not support dates before 0001-01-01.
'''
import re
from datetime import date, timedelta
from isodate.isostrf import strftime, DATE_EXT_COMPLETE
from isodate.isoerror import ISO8601Error
DATE_REGEX_CACHE = {}
# A dictionary to cache pre-compiled regular expressions.
# A set of regular expressions is identified, by number of year digits allowed
# and whether a plus/minus sign is required or not. (This option is changeable
# only for 4 digit years).
def build_date_regexps(yeardigits=4, expanded=False):
'''
Compile set of regular expressions to parse ISO dates. The expressions will
be created only if they are not already in REGEX_CACHE.
It is necessary to fix the number of year digits, else it is not possible
to automatically distinguish between various ISO date formats.
ISO 8601 allows more than 4 digit years, on prior agreement, but then a +/-
sign is required (expanded format). To support +/- sign for 4 digit years,
the expanded parameter needs to be set to True.
'''
if yeardigits != 4:
expanded = True
if (yeardigits, expanded) not in DATE_REGEX_CACHE:
cache_entry = []
# ISO 8601 expanded DATE formats allow an arbitrary number of year
# digits with a leading +/- sign.
if expanded:
sign = 1
else:
sign = 0
# 1. complete dates:
# YYYY-MM-DD or +- YYYYYY-MM-DD... extended date format
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
r"-(?P<month>[0-9]{2})-(?P<day>[0-9]{2})"
% (sign, yeardigits)))
# YYYYMMDD or +- YYYYYYMMDD... basic date format
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
r"(?P<month>[0-9]{2})(?P<day>[0-9]{2})"
% (sign, yeardigits)))
# 2. complete week dates:
# YYYY-Www-D or +-YYYYYY-Www-D ... extended week date
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
r"-W(?P<week>[0-9]{2})-(?P<day>[0-9]{1})"
% (sign, yeardigits)))
# YYYYWwwD or +-YYYYYYWwwD ... basic week date
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})W"
r"(?P<week>[0-9]{2})(?P<day>[0-9]{1})"
% (sign, yeardigits)))
# 3. ordinal dates:
# YYYY-DDD or +-YYYYYY-DDD ... extended format
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
r"-(?P<day>[0-9]{3})"
% (sign, yeardigits)))
# YYYYDDD or +-YYYYYYDDD ... basic format
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
r"(?P<day>[0-9]{3})"
% (sign, yeardigits)))
# 4. week dates:
# YYYY-Www or +-YYYYYY-Www ... extended reduced accuracy week date
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
r"-W(?P<week>[0-9]{2})"
% (sign, yeardigits)))
# YYYYWww or +-YYYYYYWww ... basic reduced accuracy week date
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})W"
r"(?P<week>[0-9]{2})"
% (sign, yeardigits)))
# 5. month dates:
# YYY-MM or +-YYYYYY-MM ... reduced accuracy specific month
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
r"-(?P<month>[0-9]{2})"
% (sign, yeardigits)))
# 6. year dates:
# YYYY or +-YYYYYY ... reduced accuracy specific year
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}(?P<year>[0-9]{%d})"
% (sign, yeardigits)))
# 7. century dates:
# YY or +-YYYY ... reduced accuracy specific century
cache_entry.append(re.compile(r"(?P<sign>[+-]){%d}"
r"(?P<century>[0-9]{%d})"
% (sign, yeardigits - 2)))
DATE_REGEX_CACHE[(yeardigits, expanded)] = cache_entry
return DATE_REGEX_CACHE[(yeardigits, expanded)]
def parse_date(datestring, yeardigits=4, expanded=False):
'''
Parse an ISO 8601 date string into a datetime.date object.
As the datetime.date implementation is limited to dates starting from
0001-01-01, negative dates (BC) and year 0 can not be parsed by this
method.
For incomplete dates, this method chooses the first day for it. For
instance if only a century is given, this method returns the 1st of
January in year 1 of this century.
supported formats: (expanded formats are shown with 6 digits for year)
YYYYMMDD +-YYYYYYMMDD basic complete date
YYYY-MM-DD +-YYYYYY-MM-DD extended complete date
YYYYWwwD +-YYYYYYWwwD basic complete week date
YYYY-Www-D +-YYYYYY-Www-D extended complete week date
YYYYDDD +-YYYYYYDDD basic ordinal date
YYYY-DDD +-YYYYYY-DDD extended ordinal date
YYYYWww +-YYYYYYWww basic incomplete week date
YYYY-Www +-YYYYYY-Www extended incomplete week date
YYY-MM +-YYYYYY-MM incomplete month date
YYYY +-YYYYYY incomplete year date
YY +-YYYY incomplete century date
@param datestring: the ISO date string to parse
@param yeardigits: how many digits are used to represent a year
@param expanded: if True then +/- signs are allowed. This parameter
is forced to True, if yeardigits != 4
@return: a datetime.date instance represented by datestring
@raise ISO8601Error: if this function can not parse the datestring
@raise ValueError: if datestring can not be represented by datetime.date
'''
if yeardigits != 4:
expanded = True
isodates = build_date_regexps(yeardigits, expanded)
for pattern in isodates:
match = pattern.match(datestring)
if match:
groups = match.groupdict()
# sign, century, year, month, week, day,
# FIXME: negative dates not possible with python standard types
sign = (groups['sign'] == '-' and -1) or 1
if 'century' in groups:
return date(sign * (int(groups['century']) * 100 + 1), 1, 1)
if not 'month' in groups: # weekdate or ordinal date
ret = date(sign * int(groups['year']), 1, 1)
if 'week' in groups:
isotuple = ret.isocalendar()
if 'day' in groups:
days = int(groups['day'] or 1)
else:
days = 1
# if first week in year, do weeks-1
return ret + timedelta(weeks=int(groups['week']) -
(((isotuple[1] == 1) and 1) or 0),
days = -isotuple[2] + days)
elif 'day' in groups: # ordinal date
return ret + timedelta(days=int(groups['day'])-1)
else: # year date
return ret
# year-, month-, or complete date
if 'day' not in groups or groups['day'] is None:
day = 1
else:
day = int(groups['day'])
return date(sign * int(groups['year']),
int(groups['month']) or 1, day)
raise ISO8601Error('Unrecognised ISO 8601 date format: %r' % datestring)
def date_isoformat(tdate, format=DATE_EXT_COMPLETE, yeardigits=4):
'''
Format date strings.
This method is just a wrapper around isodate.isostrf.strftime and uses
Date-Extended-Complete as default format.
'''
return strftime(tdate, format, yeardigits)
|
{
"content_hash": "2246d71062a2d5b22891f1526f1e1eb0",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 79,
"avg_line_length": 49.24,
"alnum_prop": 0.5326679818962516,
"repo_name": "unor/schemaorg",
"id": "8bafa207fcab1076cf40c02fef08e694eea57be3",
"size": "10165",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "lib/isodate/isodates.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5585717"
},
{
"name": "Python",
"bytes": "2417294"
},
{
"name": "Shell",
"bytes": "81"
},
{
"name": "Smarty",
"bytes": "26365"
},
{
"name": "Tcl",
"bytes": "98801"
}
],
"symlink_target": ""
}
|
import sys
import logging
import copy
from urllib2 import URLError
from httplib import HTTPException
from Bio import Entrez
RESULTS_FILE = 'dois-1000-per-line.txt'
LOG_FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
log = logging.getLogger(__name__)
def fail(msg, original_exception):
global log
log.critical(msg)
raise original_exception
def warn_skip(msg, pmid):
global log
log.warn(msg)
append_file('skipped_pmids.txt', ',' + pmid)
def warn_problem_ncbi_record(msg, pmid):
global log
log.warn(msg)
append_file('cant_get_doi_pmids.txt', ',' + pmid)
def to_file(filename, s):
with open(filename, 'wb') as f:
f.write(str(s))
def append_file(filename, s):
with open(filename, 'ab') as f:
f.write(str(s))
def main(argv=None):
if not argv:
argv = sys.argv
resume_at_result_number = 0
resuming = False
if len(argv) > 1:
if argv[1] == '--resume':
from count_results import count_results
resume_at_result_number = count_results(RESULTS_FILE)
resuming = True
Entrez.email = 'emanuil@cottagelabs.com'
query = "Wellcome[GRNT]"
log.info('Sending this query to NCBI: {0}'.format(query))
log.info('Starting from result number: {}'.format(resume_at_result_number))
try:
handle = Entrez.esearch(db="pubmed", term=query, retmax=100000,
retstart=resume_at_result_number)
except URLError as e:
fail('''NCBI query failed due to an URL Error. Are you connected
to the internet? (It could be that the EUtils API is down or
Biopython is generating the wrong URL.)''', e)
record = Entrez.read(handle, validate=False)
log.info('NCBI holds {} records related to this query.'.format(record['Count']))
results = OAGPrep(RESULTS_FILE, resuming=resuming)
for pmid in record['IdList']:
try:
individual_handle = Entrez.efetch(db='pubmed', retmode='xml',
id=pmid)
individual_record = Entrez.read(individual_handle,
validate=False)
except ValueError as e:
warn_skip(
'''ValueError, Biopython probably couldn\'t parse
something or the returned XML was invalid. Skipping PMID {}.
Original error {}'''.format(pmid, e),
pmid)
continue
except (URLError, HTTPException) as e:
warn_skip(
'''Networking error. Skipping PMID {}.
Original error {}'''.format(pmid, e),
pmid)
continue
except Exception as e:
warn_skip('''Some other error while fetching individual record for PMID {}. Skipping it.
Original error: "{}"'''.format(pmid, e),
pmid)
continue
if len(individual_record) > 1:
warn_problem_ncbi_record('PMID {}: NCBI response contains multiple items in the individual record list'.format(pmid), pmid)
if 'PubmedData' not in individual_record[0]:
warn_problem_ncbi_record('PMID {}: NCBI response did not contain PubmedData key'.format(pmid), pmid)
if 'ArticleIdList' not in individual_record[0]['PubmedData']:
warn_problem_ncbi_record('PMID {}: NCBI response did not contain the ArticleIdList key in the PubmedData dict'.format(pmid), pmid)
for identifier in individual_record[0]['PubmedData']['ArticleIdList']:
try:
if identifier.attributes['IdType'] == 'doi':
results.add(identifier)
log.info('Did another one! {}'.format(pmid))
except AttributeError:
warn_problem_ncbi_record('PMID {}: Can\'t add PMID, no associated DOI.'.format(pmid), pmid)
except Exception as e:
warn_skip('''Some other error while recording result from PMID {}.
Original error: "{}"'''.format(pmid, e),
pmid)
class OAGPrep:
current_row = []
rows = [current_row]
count = 0
def __init__(self, results_filename, resuming=False):
self.results_filename = results_filename
self.do_not_overwrite_files = resuming
if self.do_not_overwrite_files:
append_file(self.results_filename, "\n")
else:
to_file(self.results_filename, '')
def add(self, identifier):
self.count = self.count + 1
# add commas in front of all items, but skip the comma before
# the first item of every line
if self.count % 1000 == 1:
append_file(self.results_filename, identifier)
else:
append_file(self.results_filename, ',' + identifier)
# add a newline after each set of 1000 items
if self.count % 1000 == 0:
append_file(self.results_filename, "\n")
#if len(self.current_row) == 1000:
# full_row = copy.copy(self.current_row)
# self.rows.insert(0, full_row)
# self.current_row = []
#self.current_row.append(identifier)
def __str__(self):
# unroll the rows (lists) into a single string for outputting
return "\n".join([','.join(row) for row in self.rows])
if __name__ == '__main__':
main()
|
{
"content_hash": "6b8e984b99a30d4d56bb1ef95c2baeb4",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 142,
"avg_line_length": 32.93827160493827,
"alnum_prop": 0.5970764617691154,
"repo_name": "emanuil-tolev/wellcome-outputs-from-ncbi",
"id": "8b00781102d83a379694f06e717261ef3749f46d",
"size": "5336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get_wellcome_ncbi_objects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6919"
}
],
"symlink_target": ""
}
|
ACCESS_TOKEN_URL = 'https://auth.aweber.com/1.0/oauth/access_token'
API_BASE = 'https://api.aweber.com/1.0'
AUTHORIZE_URL = 'https://auth.aweber.com/1.0/oauth/authorize'
REQUEST_TOKEN_URL = 'https://auth.aweber.com/1.0/oauth/request_token'
class APIException(Exception):
"""APIExceptions."""
class AWeberBase(object):
"""Provides functionality shared accross all AWeber objects"""
collections_map = {
'account': ['lists', 'integrations'],
'broadcast_campaign': ['links', 'messages', 'stats'],
'component': [],
'custom_field': [],
'followup_campaign': ['links', 'messages', 'stats'],
'integration': [],
'link': ['clicks'],
'list': [
'campaigns',
'custom_fields',
'subscribers',
'web_forms',
'web_form_split_tests',
],
'message': ['opens', 'tracked_events'],
'service-root': 'accounts',
'subscriber': [],
'tracked_events': [],
'web_form': [],
'web_form_split_test': ['components'],
}
@property
def user(self):
return self.adapter.user
def load_from_url(self, url):
"""Gets an AWeberCollection or AWeberEntry from a given URL."""
response = self.adapter.request('GET', url)
return self._read_response(url, response)
def _method_for(self, type):
if not self.type == type:
raise AttributeError('Method does not exist')
def _read_response(self, url, response):
if 'entries' in response:
from aweber_api.collection import AWeberCollection
return AWeberCollection(url, response, self.adapter)
if 'resource_type_link' in response:
from aweber_api.entry import AWeberEntry
return AWeberEntry(url, response, self.adapter)
raise TypeError('Unknown value returned')
def _parseNamedOperation(self, data):
from aweber_api.entry import AWeberEntry
entries = []
for item in data:
entries.append(
AWeberEntry(
item['self_link'].replace(API_BASE, ''),
item,
self.adapter,
)
)
return entries
def _partition_url(self):
try:
url_parts = self.url.split('/')
#If top of tree - no parent entry
if len(url_parts) <= 3:
return None
except AttributeError:
return None
return url_parts
def _construct_parent_url(self, url_parts, child_position):
"""Remove collection id and slash from end of url."""
url = '/'.join(url_parts[:-child_position])
return url
|
{
"content_hash": "59c292ee12987203c8f3d71de915ea6f",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 71,
"avg_line_length": 31.295454545454547,
"alnum_prop": 0.5566448801742919,
"repo_name": "aweber/AWeber-API-Python-Library",
"id": "799f1ba46e826e2c791fc9b2710a19923c7c12fb",
"size": "2754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aweber_api/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "58256"
}
],
"symlink_target": ""
}
|
"""Schemas for BigQuery tables / queries."""
class SchemaField(object):
"""Describe a single field within a table schema.
Args:
name (str): the name of the field.
field_type (str): the type of the field. See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.type
mode (str): the mode of the field. See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.mode
description (Optional[str]):description for the field.
fields (Tuple[:class:`~google.cloud.bigquery.schema.SchemaField`]):
subfields (requires ``field_type`` of 'RECORD').
"""
def __init__(self, name, field_type, mode='NULLABLE',
description=None, fields=()):
self._name = name
self._field_type = field_type
self._mode = mode
self._description = description
self._fields = tuple(fields)
@classmethod
def from_api_repr(cls, api_repr):
"""Return a ``SchemaField`` object deserialized from a dictionary.
Args:
api_repr (Mapping[str, str]): The serialized representation
of the SchemaField, such as what is output by
:meth:`to_api_repr`.
Returns:
google.cloud.biquery.schema.SchemaField:
The ``SchemaField`` object.
"""
# Handle optional properties with default values
mode = api_repr.get('mode', 'NULLABLE')
description = api_repr.get('description')
fields = api_repr.get('fields', ())
return cls(
field_type=api_repr['type'].upper(),
fields=[cls.from_api_repr(f) for f in fields],
mode=mode.upper(),
description=description,
name=api_repr['name'],
)
@property
def name(self):
"""str: The name of the field."""
return self._name
@property
def field_type(self):
"""str: The type of the field.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.type
"""
return self._field_type
@property
def mode(self):
"""str: The mode of the field.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.mode
"""
return self._mode
@property
def is_nullable(self):
"""bool: whether 'mode' is 'nullable'."""
return self._mode == 'NULLABLE'
@property
def description(self):
"""Optional[str]: description for the field."""
return self._description
@property
def fields(self):
"""tuple: Subfields contained in this field.
Must be empty unset if ``field_type`` is not 'RECORD'.
"""
return self._fields
def to_api_repr(self):
"""Return a dictionary representing this schema field.
Returns:
dict: A dictionary representing the SchemaField in a serialized
form.
"""
# Put together the basic representation. See http://bit.ly/2hOAT5u.
answer = {
'mode': self.mode.upper(),
'name': self.name,
'type': self.field_type.upper(),
'description': self.description,
}
# If this is a RECORD type, then sub-fields are also included,
# add this to the serialized representation.
if self.field_type.upper() == 'RECORD':
answer['fields'] = [f.to_api_repr() for f in self.fields]
# Done; return the serialized dictionary.
return answer
def _key(self):
"""A tuple key that uniquely describes this field.
Used to compute this instance's hashcode and evaluate equality.
Returns:
tuple: The contents of this
:class:`~google.cloud.bigquery.schema.SchemaField`.
"""
return (
self._name,
self._field_type.upper(),
self._mode.upper(),
self._description,
self._fields,
)
def __eq__(self, other):
if not isinstance(other, SchemaField):
return NotImplemented
return self._key() == other._key()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._key())
def __repr__(self):
return 'SchemaField{}'.format(self._key())
def _parse_schema_resource(info):
"""Parse a resource fragment into a schema field.
Args:
info: (Mapping[str->dict]): should contain a "fields" key to be parsed
Returns:
(Union[Sequence[:class:`google.cloud.bigquery.schema.SchemaField`],None])
a list of parsed fields, or ``None`` if no "fields" key found.
"""
if 'fields' not in info:
return ()
schema = []
for r_field in info['fields']:
name = r_field['name']
field_type = r_field['type']
mode = r_field.get('mode', 'NULLABLE')
description = r_field.get('description')
sub_fields = _parse_schema_resource(r_field)
schema.append(
SchemaField(name, field_type, mode, description, sub_fields))
return schema
def _build_schema_resource(fields):
"""Generate a resource fragment for a schema.
Args:
fields [Sequence[:class:`~google.cloud.bigquery.schema.SchemaField`]):
schema to be dumped
Returns: (Sequence[dict])
mappings describing the schema of the supplied fields.
"""
return [field.to_api_repr() for field in fields]
|
{
"content_hash": "c1fce4d3aea533e556b5cdaf74e23b77",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 94,
"avg_line_length": 30.405405405405407,
"alnum_prop": 0.5767111111111111,
"repo_name": "jonparrott/google-cloud-python",
"id": "759d7c3cbe6530f1ebf383fd595108c05ba117d3",
"size": "6200",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bigquery/google/cloud/bigquery/schema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62009"
},
{
"name": "Python",
"bytes": "3459300"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
}
|
from django.contrib.auth import get_user_model
from django.test import override_settings
from django.urls import reverse
from django.utils import encoding
from example.tests import TestBase
class ModelViewSetTests(TestBase):
"""
Test usage with ModelViewSets, also tests pluralization, camelization,
and underscore.
[<RegexURLPattern user-list ^identities/$>,
<RegexURLPattern user-detail ^identities/(?P<pk>[^/]+)/$>]
"""
list_url = reverse("user-list")
def setUp(self):
super().setUp()
self.detail_url = reverse("user-detail", kwargs={"pk": self.miles.pk})
def test_key_in_list_result(self):
"""
Ensure the result has a 'user' key since that is the name of the model
"""
with override_settings(JSON_API_FORMAT_FIELD_NAMES="dasherize"):
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, 200)
user = get_user_model().objects.all()[0]
expected = {
"data": [
{
"type": "users",
"id": encoding.force_str(user.pk),
"attributes": {
"first-name": user.first_name,
"last-name": user.last_name,
"email": user.email,
},
}
],
"links": {
"first": "http://testserver/identities?page%5Bnumber%5D=1",
"last": "http://testserver/identities?page%5Bnumber%5D=2",
"next": "http://testserver/identities?page%5Bnumber%5D=2",
"prev": None,
},
"meta": {"pagination": {"page": 1, "pages": 2, "count": 2}},
}
assert expected == response.json()
def test_page_two_in_list_result(self):
"""
Ensure that the second page is reachable and is the correct data.
"""
with override_settings(JSON_API_FORMAT_FIELD_NAMES="dasherize"):
response = self.client.get(self.list_url, {"page[number]": 2})
self.assertEqual(response.status_code, 200)
user = get_user_model().objects.all()[1]
expected = {
"data": [
{
"type": "users",
"id": encoding.force_str(user.pk),
"attributes": {
"first-name": user.first_name,
"last-name": user.last_name,
"email": user.email,
},
}
],
"links": {
"first": "http://testserver/identities?page%5Bnumber%5D=1",
"last": "http://testserver/identities?page%5Bnumber%5D=2",
"next": None,
"prev": "http://testserver/identities?page%5Bnumber%5D=1",
},
"meta": {"pagination": {"page": 2, "pages": 2, "count": 2}},
}
assert expected == response.json()
def test_page_range_in_list_result(self):
"""
Ensure that the range of a page can be changed from the client,
tests pluralization as two objects means it converts ``user`` to
``users``.
"""
with override_settings(JSON_API_FORMAT_FIELD_NAMES="dasherize"):
response = self.client.get(self.list_url, {"page[size]": 2})
self.assertEqual(response.status_code, 200)
users = get_user_model().objects.all()
expected = {
"data": [
{
"type": "users",
"id": encoding.force_str(users[0].pk),
"attributes": {
"first-name": users[0].first_name,
"last-name": users[0].last_name,
"email": users[0].email,
},
},
{
"type": "users",
"id": encoding.force_str(users[1].pk),
"attributes": {
"first-name": users[1].first_name,
"last-name": users[1].last_name,
"email": users[1].email,
},
},
],
"links": {
"first": "http://testserver/identities?page%5Bnumber%5D=1&page%5Bsize%5D=2",
"last": "http://testserver/identities?page%5Bnumber%5D=1&page%5Bsize%5D=2",
"next": None,
"prev": None,
},
"meta": {"pagination": {"page": 1, "pages": 1, "count": 2}},
}
assert expected == response.json()
def test_key_in_detail_result(self):
"""
Ensure the result has a 'user' key.
"""
with override_settings(JSON_API_FORMAT_FIELD_NAMES="dasherize"):
response = self.client.get(self.detail_url)
self.assertEqual(response.status_code, 200)
expected = {
"data": {
"type": "users",
"id": encoding.force_str(self.miles.pk),
"attributes": {
"first-name": self.miles.first_name,
"last-name": self.miles.last_name,
"email": self.miles.email,
},
}
}
assert expected == response.json()
def test_patch_requires_id(self):
"""
Verify that 'id' is required to be passed in an update request.
"""
data = {
"data": {"type": "users", "attributes": {"first-name": "DifferentName"}}
}
response = self.client.patch(self.detail_url, data=data)
self.assertEqual(response.status_code, 400)
def test_patch_requires_correct_id(self):
"""
Verify that 'id' is the same then in url
"""
data = {
"data": {
"type": "users",
"id": self.miles.pk + 1,
"attributes": {"first-name": "DifferentName"},
}
}
response = self.client.patch(self.detail_url, data=data)
self.assertEqual(response.status_code, 409)
def test_key_in_post(self):
"""
Ensure a key is in the post.
"""
self.client.login(username="miles", password="pw")
data = {
"data": {
"type": "users",
"id": encoding.force_str(self.miles.pk),
"attributes": {
"first-name": self.miles.first_name,
"last-name": self.miles.last_name,
"email": "miles@trumpet.org",
},
}
}
with override_settings(JSON_API_FORMAT_FIELD_NAMES="dasherize"):
response = self.client.put(self.detail_url, data=data)
assert data == response.json()
# is it updated?
self.assertEqual(
get_user_model().objects.get(pk=self.miles.pk).email, "miles@trumpet.org"
)
def test_404_error_pointer(self):
self.client.login(username="miles", password="pw")
not_found_url = reverse("user-detail", kwargs={"pk": 12345})
errors = {
"errors": [{"detail": "Not found.", "status": "404", "code": "not_found"}]
}
response = self.client.get(not_found_url)
assert 404 == response.status_code
assert errors == response.json()
|
{
"content_hash": "b9def88b3cdeaa6436391715381ffc9c",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 92,
"avg_line_length": 34.345622119815665,
"alnum_prop": 0.48450288474439823,
"repo_name": "leo-naeka/django-rest-framework-json-api",
"id": "21a641f8b8474df25aa53c757f87694af93c0c5f",
"size": "7453",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "example/tests/test_model_viewsets.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "382042"
}
],
"symlink_target": ""
}
|
"""
babushka
~~~~~~~~
Russian Doll Caching for Jinja2 & NDB on GAE.
:license: MIT License, see LICENSE for more details.
:documentation: See README.md for documentation.
"""
from google.appengine.api import memcache
from jinja2 import nodes
from jinja2.ext import Extension
import hashlib
# --------------------------------------------------------------------
# Babushka Jinja Extension
# --------------------------------------------------------------------
class BabushkaExtension(Extension):
tags = set(['cache', 'babushka'])
def parse(self, parser):
lineno = parser.stream.next().lineno
args = [parser.parse_expression()]
if parser.stream.skip_if('comma'):
args.append(parser.parse_expression())
else:
args.append(nodes.Const(None))
body = parser.parse_statements(['name:endcache', 'name:endbabushka'], drop_needle=True)
checksum = hashlib.md5(str(body)).hexdigest()
args.append(nodes.Const(checksum))
args.append(nodes.Const(parser.filename))
return nodes.CallBlock(
self.call_method('_cache', args), [], [], body
).set_lineno(lineno)
def _cache(self, cache_key, timeout, checksum, filename, caller):
if not cache_key:
return caller()
cache_key = '%s/%s/%s' % (cache_key, checksum, filename)
value = memcache.get(cache_key)
if not value:
value = caller()
memcache.add(cache_key, value, time=timeout or 0)
return value
cache = babushka = BabushkaExtension
|
{
"content_hash": "fed8340469a57cb79f647fbe901ddac7",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 95,
"avg_line_length": 27.603448275862068,
"alnum_prop": 0.5665209244222361,
"repo_name": "ozburo/babushka",
"id": "c3e48ef360d9297f9c8a13aa7352ba2fd6addf4c",
"size": "1625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "babushka/extension.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "244"
},
{
"name": "Python",
"bytes": "17015"
}
],
"symlink_target": ""
}
|
from typing import Dict
import datetime
import json
import uuid
from flask import Flask, jsonify, request
from ambassador.utils import parse_yaml
class FakeScoutApp (Flask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.counts: Dict[str, int] = {}
app = FakeScoutApp(__name__)
def merge_dicts(x, y):
z = x.copy()
z.update(y)
return z
@app.route('/scout', methods=['POST'])
def report():
payload = request.json
print("\n---- %s" % datetime.datetime.now().isoformat())
print(json.dumps(payload, sort_keys=True, indent=4))
application = str(payload.get('application', '')).lower()
if application not in app.counts:
app.counts[application] = 0
app.counts[application] += 1
result = {
"latest_version": "0.52.1",
"application": application,
"cached": False,
"count": app.counts[application],
"timestamp": datetime.datetime.now().timestamp(),
"notices": [{ "level": "warning", "message": "Scout response is faked!" }]
}
return jsonify(result), 200
def main():
print("fake_scout listening on port 9999")
app.run(host='0.0.0.0', port=9999, debug=True)
if __name__ == '__main__':
main()
|
{
"content_hash": "ff10dec6b85480977772693cda5b5bde",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 82,
"avg_line_length": 21.79310344827586,
"alnum_prop": 0.6052215189873418,
"repo_name": "datawire/ambassador",
"id": "e3ca2ec1f4c3c7a4c331001ccc3238b8e084fb3a",
"size": "1264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/devloop-helpers/fake_scout.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "20990"
},
{
"name": "Go",
"bytes": "564752"
},
{
"name": "HTML",
"bytes": "25150"
},
{
"name": "JavaScript",
"bytes": "32368"
},
{
"name": "Makefile",
"bytes": "113905"
},
{
"name": "Python",
"bytes": "1158187"
},
{
"name": "Shell",
"bytes": "188832"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import fileinput
import os
from resource_management import *
from resource_management.core.logger import Logger
def ranger(name=None):
if name == 'ranger_admin':
setup_ranger_admin()
if name == 'ranger_usersync':
setup_usersync()
def setup_ranger_admin():
import params
check_db_connnection()
File(params.downloaded_custom_connector,
content = DownloadSource(params.driver_curl_source),
mode = 0644
)
Execute(('cp', '--remove-destination', params.downloaded_custom_connector, params.driver_curl_target),
path=["/bin", "/usr/bin/"],
sudo=True)
File(params.driver_curl_target, mode=0644)
ModifyPropertiesFile(format("{ranger_home}/install.properties"),
properties = params.config['configurations']['admin-properties']
)
custom_config = dict()
custom_config['unix_user'] = params.unix_user
custom_config['unix_group'] = params.unix_group
ModifyPropertiesFile(format("{ranger_home}/install.properties"),
properties=custom_config
)
##if db flavor == oracle - set oracle home env variable
if params.db_flavor.lower() == 'oracle' and params.oracle_home:
env_dict = {'JAVA_HOME': params.java_home, 'ORACLE_HOME':params.oracle_home, 'LD_LIBRARY_PATH':params.oracle_home}
else:
env_dict = {'JAVA_HOME': params.java_home}
setup_sh = format("cd {ranger_home} && ") + as_sudo([format('{ranger_home}/setup.sh')])
Execute(setup_sh,
environment=env_dict,
logoutput=True,
)
ModifyPropertiesFile(format("{ranger_conf}/xa_system.properties"),
properties = params.config['configurations']['ranger-site'],
)
ModifyPropertiesFile(format("{ranger_conf}/ranger_webserver.properties"),
properties = params.config['configurations']['ranger-site'],
mode=0744
)
Directory(params.admin_log_dir,
owner = params.unix_user,
group = params.unix_group
)
def setup_usersync():
import params
PropertiesFile(format("{usersync_home}/install.properties"),
properties = params.config['configurations']['usersync-properties'],
)
custom_config = dict()
custom_config['unix_user'] = params.unix_user
custom_config['unix_group'] = params.unix_group
ModifyPropertiesFile(format("{usersync_home}/install.properties"),
properties=custom_config
)
cmd = format("cd {usersync_home} && ") + as_sudo([format('{usersync_home}/setup.sh')])
Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True)
File([params.usersync_start, params.usersync_stop],
owner = params.unix_user
)
File(params.usersync_services_file,
mode = 0755,
)
Directory(params.usersync_log_dir,
owner = params.unix_user,
group = params.unix_group
)
def check_db_connnection():
import params
Logger.info('Checking DB connection')
env_dict = {}
if params.db_flavor.lower() == 'mysql':
cmd = format('{sql_command_invoker} -u {db_root_user} --password={db_root_password!p} -h {db_host} -s -e "select version();"')
elif params.db_flavor.lower() == 'oracle':
cmd = format("{sql_command_invoker} '{db_root_user}/\"{db_root_password}\"@{db_host}' AS SYSDBA")
env_dict = {'ORACLE_HOME':params.oracle_home, 'LD_LIBRARY_PATH':params.oracle_home}
elif params.db_flavor.lower() == 'postgres':
cmd = 'true'
elif params.db_flavor.lower() == 'mssql':
cmd = 'true'
try:
Execute(cmd,
environment=env_dict,
logoutput=True)
except Fail as ex:
Logger.error(str(ex))
raise Fail('Ranger Database connection check failed')
|
{
"content_hash": "51c41aab0f1d12ed47585b95fae5ff0a",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 131,
"avg_line_length": 31.75,
"alnum_prop": 0.6956924502084298,
"repo_name": "alexryndin/ambari",
"id": "2379dd3015baa17499b9d705f5512b4787672f05",
"size": "4340",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch-adh-1.5",
"path": "ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER/package/scripts/setup_ranger.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "44884"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "786184"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "89958"
},
{
"name": "HTML",
"bytes": "2514774"
},
{
"name": "Java",
"bytes": "29565801"
},
{
"name": "JavaScript",
"bytes": "19033151"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "316489"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "17215686"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "33764"
},
{
"name": "SQLPL",
"bytes": "4277"
},
{
"name": "Shell",
"bytes": "886011"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
}
|
"""Correctness tests for tf.keras using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import random_seed
from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.preprocessing import sequence
from tensorflow.python.util import nest
_RANDOM_SEED = 1337
_EVAL_STEPS = 20
_GLOBAL_BATCH_SIZE = 64
# Note: Please make sure the tests in this file are also covered in
# keras_backward_compat_test for features that are supported with both APIs.
all_strategies = [
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.tpu_strategy, # steps_per_run=2
strategy_combinations.tpu_strategy_one_step,
]
def eager_mode_test_configuration():
return combinations.combine(
mode='eager', use_numpy=[True, False], use_validation_data=[True, False])
def graph_mode_test_configuration():
return combinations.combine(
mode='graph', use_numpy=[True, False], use_validation_data=[True, False])
def all_strategy_and_input_config_combinations():
return (combinations.times(
combinations.combine(
distribution=all_strategies,
experimental_run_tf_function=[True, False]),
eager_mode_test_configuration() + graph_mode_test_configuration()))
def strategy_minus_tpu_and_input_config_combinations_eager():
return (combinations.times(
combinations.combine(
distribution=strategy_combinations.strategies_minus_tpu),
eager_mode_test_configuration()))
def strategies_for_embedding_models():
"""Returns distribution strategies to test for embedding models.
Since embedding models take longer to train, we disregard DefaultStrategy
in order to prevent testing timeouts.
"""
return [
s for s in all_strategies if s.required_tpu or s.required_gpus or
s is strategy_combinations.one_device_strategy
]
def test_combinations_for_embedding_model():
# TODO(sourabhbajaj): Enable tests for eager mode
eager_mode_strategies = [
s for s in strategies_for_embedding_models() if not s.required_tpu
]
return (combinations.times(
combinations.combine(
distribution=strategies_for_embedding_models(),
experimental_run_tf_function=[True, False]),
(graph_mode_test_configuration())) + combinations.times(
combinations.combine(
distribution=eager_mode_strategies,
experimental_run_tf_function=[False]),
(eager_mode_test_configuration())))
def test_combinations_with_tpu_strategies():
tpu_strategies = [
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_one_step
]
return (combinations.times(
combinations.combine(distribution=tpu_strategies),
graph_mode_test_configuration()))
class MaybeDistributionScope(object):
"""Provides a context allowing no distribution strategy."""
def __init__(self, distribution):
self._distribution = distribution
self._scope = None
def __enter__(self):
if self._distribution:
self._scope = self._distribution.scope()
self._scope.__enter__()
def __exit__(self, exc_type, value, traceback):
if self._distribution:
self._scope.__exit__(exc_type, value, traceback)
self._scope = None
def batch_wrapper(dataset, batch_size, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
return dataset.batch(batch_size)
def get_batch_size(global_batch_size, distribution):
batch_size = global_batch_size
# TODO(b/118776054): Use global batch size for Keras/DS support.
use_per_core_batch_size = (
distribution and
not distributed_training_utils.global_batch_size_supported(distribution))
if use_per_core_batch_size:
batch_size //= distribution.num_replicas_in_sync
return batch_size
def get_data_size(data):
"""Gets the size of data in list, tuple, dict, or a numpy array."""
assert isinstance(data, (np.ndarray, list, dict, tuple))
if isinstance(data, np.ndarray):
return len(data)
if isinstance(data, (list, tuple)):
return len(data[0])
return len(six.next(six.itervalues(data)))
def get_shapes(data):
shapes = None
if all(hasattr(x, 'shape') for x in nest.flatten(data)):
shapes = nest.map_structure(lambda x: x.shape, data)
return shapes
def get_correctness_test_inputs(use_numpy, use_validation_data,
with_distribution, x_train, y_train, x_eval,
y_eval, x_predict, training_epochs):
"""Generates the inputs for correctness check when enable Keras with DS."""
global_batch_size = _GLOBAL_BATCH_SIZE
batch_size = get_batch_size(global_batch_size, with_distribution)
if use_numpy:
training_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
'epochs': training_epochs,
'shuffle': False,
}
if use_validation_data:
eval_inputs = None
training_inputs['validation_data'] = (x_eval, y_eval)
else:
eval_inputs = {
'batch_size': batch_size,
'x': x_eval,
'y': y_eval,
}
predict_inputs = {'x': x_predict}
else:
training_data_size = get_data_size(x_train)
# For dataset inputs, we do not pass batch_size to
# keras.fit/evaluate/predict. The batch size is part of the dataset.
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
x = batch_wrapper(train_dataset, batch_size, repeat=training_epochs)
steps_per_epoch = int(np.ceil(1.0 * training_data_size / global_batch_size))
training_inputs = {
'batch_size': None,
'x': x,
'y': None,
'epochs': training_epochs,
'shuffle': False,
'steps_per_epoch': steps_per_epoch
}
if use_validation_data:
eval_inputs = None # Remove the eval_inputs
eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_eval, y_eval))
x = batch_wrapper(eval_dataset, batch_size)
training_inputs['validation_data'] = x
training_inputs['validation_steps'] = 5
else:
eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_eval, y_eval))
x = batch_wrapper(eval_dataset, batch_size)
eval_steps = int(np.ceil(1.0 * get_data_size(x_eval) / global_batch_size))
eval_inputs = {
'batch_size': None,
'x': x,
'y': None,
'steps': eval_steps,
}
predict_batch_size = get_batch_size(
get_data_size(x_predict), with_distribution)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset, predict_batch_size)
predict_inputs = {
'steps': 1,
'x': predict_dataset,
}
return training_inputs, eval_inputs, predict_inputs
def fit_eval_and_predict(initial_weights,
input_fn,
model_fn,
experimental_run_tf_function=None,
distribution=None,
is_stateful_model=False):
"""Generates results for fit/predict/evaluate for given model."""
training_inputs, eval_inputs, predict_inputs = input_fn()
model = model_fn(
experimental_run_tf_function=experimental_run_tf_function,
initial_weights=initial_weights,
distribution=distribution,
input_shapes=get_shapes(training_inputs['x']))
result = {}
result['training_history_1'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_1'] = model.evaluate(**eval_inputs)
result['weights_1'] = model.get_weights()
if predict_inputs is not None:
# Check correctness of the result of predict() invoked
# multiple times -- as for stateful models, result of
# predict may differ for each batch.
predict_length = 1
if is_stateful_model:
predict_length = 3
for i in range(predict_length):
result_key = 'predict_result_{}'.format(i)
result[result_key] = model.predict(**predict_inputs)
# Train and eval again to mimic user's flow.
result['training_history_2'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_2'] = model.evaluate(**eval_inputs)
result['weights_2'] = model.get_weights()
return result
def compare_results(results_with_ds,
results_without_ds,
distribution,
testcase,
partial_last_batch=None):
"""Compares results of model compiled with/without distribution strategy."""
if policy.global_policy().compute_dtype in ('float16', 'bfloat16'):
default_tolerance = 1e-2
relaxed_tolerance = 1e-2
elif partial_last_batch == 'train_and_eval':
# We relax the tolerence a lot in the partial last batch case as
# 1. the examples in uneven batches may have different weights when
# applying the gradients in the distributed case.
# 2. TF Keras and TF Keras DS have different ways to handle the case when
# training with epochs > 1 with numpy inputs. In TF Keras, every epoch
# may have a partial batch. While in TF Keras DS, as we convert
# numpy inputs into dataset, it will do a repeat() first and calculate
# steps_per_epoch, so it will at most have one partial batch. This
# makes the 1-CPU result even different.
default_tolerance = 1e-3
relaxed_tolerance = 1e-3
else:
default_tolerance = 1e-5
relaxed_tolerance = 1e-4
def _get_compare_result_tolerance(key):
"""Returns tolerance to compare results."""
# TODO(b/119257215): For MirroredStrategy, weights are not exactly the same,
# so use larger tolerance for now. Predict should be related to weights.
if (isinstance(distribution,
(mirrored_strategy.MirroredStrategy,
distribute_lib._DefaultDistributionStrategy)) and # pylint: disable=protected-access
key.startswith(('weights_1', 'weights_2', 'predict_result'))):
return relaxed_tolerance
return default_tolerance
for key in sorted(results_with_ds.keys()):
if (key.startswith('training_history') and
isinstance(distribution,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)) and
distribution.extended.steps_per_run > 1):
# TODO(b/119894254): Enable this test for all cases once the
# underlying bug is fixed.
continue
tolerance = _get_compare_result_tolerance(key)
# We don't compare the loss as loss is currently not computed as metric
# in Keras, the loss value is inaccurate for last partial batch due to
# more weights for the last batch samples.
if partial_last_batch is not None:
if key.startswith('eval_result'):
results_with_ds[key] = results_with_ds[key][1:]
results_without_ds[key] = results_without_ds[key][1:]
if key.startswith('training_history'):
results_with_ds[key]['val_loss'] = 0
results_without_ds[key]['val_loss'] = 0
testcase.assertAllClose(
results_with_ds[key],
results_without_ds[key],
atol=tolerance,
rtol=tolerance,
msg='Fail to assert {}.'.format(key))
def should_skip_tpu_with_eager(distribution):
return (context.executing_eagerly() and
isinstance(distribution,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)))
class LearningRateBatchScheduler(keras.callbacks.Callback):
"""Scheduler that dynamically sets the learning rate of model."""
def __init__(self, update_freq=None):
self._update_freq = update_freq
def on_batch_begin(self, batch, logs=None):
if self._update_freq and batch % self._update_freq != 0:
return
# To avoid divergence, limit the value range.
lr = 0.001 * (batch % 10)
keras.backend.set_value(self.model.optimizer.lr, lr)
class TestDistributionStrategyCorrectnessBase(test.TestCase,
parameterized.TestCase):
"""Model agnostic testing infra to test correctness of Keras models."""
def set_up_test_config(self,
use_numpy=False,
use_validation_data=False,
with_batch_norm=False):
self.use_numpy = use_numpy
self.use_validation_data = use_validation_data
self.with_batch_norm = with_batch_norm
keras.backend.set_image_data_format('channels_last')
np.random.seed(_RANDOM_SEED)
random_seed.set_random_seed(_RANDOM_SEED)
def get_data(self):
num_samples = 10000
x_train = np.random.randint(0, 2, num_samples)
x_train = np.reshape(x_train, (num_samples, 1))
y_train = x_train
return (x_train.astype('float32'), y_train.astype('float32'), None)
def get_data_with_partial_last_batch(self):
raise NotImplementedError
def get_data_with_partial_last_batch_eval(self):
raise NotImplementedError
def get_input_for_correctness_test(self, **kwargs):
"""Generates inputs that are dictionaries.
We only provide a default implementation of this method here. If you need
more customized way of providing input to your model, overwrite this method.
Arguments:
**kwargs: key word arguments about how to create the input dictionaries
Returns:
Three dictionaries representing the input for fit(), evalutate() and
predict()
"""
return get_correctness_test_inputs(**kwargs)
def get_model(self,
distribution=None,
experimental_run_tf_function=None,
input_shapes=None):
raise NotImplementedError
def run_correctness_test(self,
distribution,
use_numpy,
use_validation_data,
experimental_run_tf_function=None,
with_batch_norm=False,
is_stateful_model=False,
partial_last_batch=None,
training_epochs=2):
with self.cached_session():
self.set_up_test_config(use_numpy, use_validation_data, with_batch_norm)
if partial_last_batch == 'eval':
x_train, y_train, x_eval, y_eval, x_predict = (
self.get_data_with_partial_last_batch_eval())
elif partial_last_batch == 'train_and_eval':
x_train, y_train, x_eval, y_eval, x_predict = (
self.get_data_with_partial_last_batch())
else:
x_train, y_train, x_predict = self.get_data()
x_eval = x_train
y_eval = y_train
# The model is built once and the initial weights are saved.
# This is used to initialize the model for both the distribution and
# non-distribution run.
model = self.get_model(
experimental_run_tf_function=experimental_run_tf_function,
input_shapes=get_shapes(x_train))
initial_weights = model.get_weights()
ds_input_fn = functools.partial(
self.get_input_for_correctness_test,
use_numpy=use_numpy,
use_validation_data=use_validation_data,
with_distribution=distribution,
x_train=x_train,
y_train=y_train,
x_eval=x_eval,
y_eval=y_eval,
x_predict=x_predict,
training_epochs=training_epochs)
nods_input_fn = functools.partial(
self.get_input_for_correctness_test,
use_numpy=use_numpy,
use_validation_data=use_validation_data,
with_distribution=None,
x_train=x_train,
y_train=y_train,
x_eval=x_eval,
y_eval=y_eval,
x_predict=x_predict,
training_epochs=training_epochs)
results_with_ds = fit_eval_and_predict(
initial_weights,
input_fn=ds_input_fn,
model_fn=self.get_model,
experimental_run_tf_function=experimental_run_tf_function,
distribution=distribution,
is_stateful_model=is_stateful_model)
results_without_ds = fit_eval_and_predict(
initial_weights,
input_fn=nods_input_fn,
model_fn=self.get_model,
experimental_run_tf_function=experimental_run_tf_function,
distribution=None,
is_stateful_model=is_stateful_model)
# First, special case, for multi-replica distributed training, batch
# norm is not aggregated globally. So it is expected to have different
# weights.
if (self.with_batch_norm and distribution.num_replicas_in_sync > 1):
with self.assertRaises(AssertionError):
compare_results(
results_with_ds,
results_without_ds,
distribution,
testcase=self,
partial_last_batch=partial_last_batch)
else:
compare_results(
results_with_ds,
results_without_ds,
distribution,
testcase=self,
partial_last_batch=partial_last_batch)
def get_input_for_dynamic_lr_test(self, **kwargs):
"""Generates inputs that are dictionaries.
We only provide a default implementation of this method here. If you need
more customized way of providing input to your model, overwrite this method.
Arguments:
**kwargs: key word arguments about how to create the input dictionaries
Returns:
Three dictionaries representing the input for fit(), evalutate() and
predict()
"""
training_input = kwargs
return training_input, None, None
def run_dynamic_lr_test(self,
distribution,
experimental_run_tf_function=None):
with self.cached_session():
self.set_up_test_config()
x_train, y_train, _ = self.get_data()
model = self.get_model(
experimental_run_tf_function=experimental_run_tf_function,
input_shapes=get_shapes(x_train))
initial_weights = model.get_weights()
update_freq = None
if (isinstance(distribution, tpu_strategy.TPUStrategyV1) and
distribution.extended.steps_per_run > 1):
# For TPUStrategy with steps_per_run > 1, the callback is not invoked
# every step. So, to compare the CPU/TPU, we let the CPU to behave the
# same as TPU.
update_freq = distribution.extended.steps_per_run
training_epochs = 2
global_batch_size = 64
ds_batch_size = get_batch_size(global_batch_size, distribution)
nods_batch_size = get_batch_size(global_batch_size, None)
ds_input_fn = functools.partial(
self.get_input_for_dynamic_lr_test,
x=x_train,
y=y_train,
batch_size=ds_batch_size,
shuffle=False,
epochs=training_epochs,
callbacks=[LearningRateBatchScheduler(update_freq)],
validation_data=(x_train, y_train))
nods_input_fn = functools.partial(
self.get_input_for_dynamic_lr_test,
x=x_train,
y=y_train,
batch_size=nods_batch_size,
shuffle=False,
epochs=training_epochs,
callbacks=[LearningRateBatchScheduler(update_freq)],
validation_data=(x_train, y_train))
results_with_ds = fit_eval_and_predict(
initial_weights,
input_fn=ds_input_fn,
model_fn=self.get_model,
experimental_run_tf_function=experimental_run_tf_function,
distribution=distribution)
results_without_ds = fit_eval_and_predict(
initial_weights,
input_fn=nods_input_fn,
model_fn=self.get_model,
experimental_run_tf_function=experimental_run_tf_function,
distribution=None)
compare_results(
results_with_ds, results_without_ds, distribution, testcase=self)
class TestDistributionStrategyEmbeddingModelCorrectnessBase(
TestDistributionStrategyCorrectnessBase):
"""Base class to test correctness of Keras models with embedding layers."""
def get_data(self,
count=(_GLOBAL_BATCH_SIZE * _EVAL_STEPS),
min_words=5,
max_words=10,
max_word_id=19,
num_classes=2):
distribution = []
for _ in range(num_classes):
dist = np.abs(np.random.randn(max_word_id))
dist /= np.sum(dist)
distribution.append(dist)
features = []
labels = []
for _ in range(count):
label = np.random.randint(0, num_classes, size=1)[0]
num_words = np.random.randint(min_words, max_words, size=1)[0]
word_ids = np.random.choice(
max_word_id, size=num_words, replace=True, p=distribution[label])
word_ids = word_ids
labels.append(label)
features.append(word_ids)
features = sequence.pad_sequences(
features, maxlen=max_words)
x_train = np.asarray(features, dtype=np.float32)
y_train = np.asarray(labels, dtype=np.int32).reshape((count, 1))
x_predict = x_train[:_GLOBAL_BATCH_SIZE]
return x_train, y_train, x_predict
if __name__ == '__main__':
test.main()
|
{
"content_hash": "c99d80fd03f6d54939bd3943ba4094cf",
"timestamp": "",
"source": "github",
"line_count": 622,
"max_line_length": 105,
"avg_line_length": 35.28456591639871,
"alnum_prop": 0.647104387843441,
"repo_name": "adit-chandra/tensorflow",
"id": "097a550dd20ec79fa06665084a49abd974cd0082",
"size": "22636",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/distribute/keras_correctness_test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45988"
},
{
"name": "C",
"bytes": "773694"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "76734263"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "81136"
},
{
"name": "Go",
"bytes": "1679107"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "952944"
},
{
"name": "Jupyter Notebook",
"bytes": "567243"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1299322"
},
{
"name": "Makefile",
"bytes": "61397"
},
{
"name": "Objective-C",
"bytes": "104706"
},
{
"name": "Objective-C++",
"bytes": "297753"
},
{
"name": "PHP",
"bytes": "24055"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "17546"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38764318"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "643787"
},
{
"name": "Smarty",
"bytes": "34727"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
import mock
import pytest
import time
from collections import OrderedDict
from django.utils import timezone
from addons.osfstorage import settings as osfstorage_settings
from api_tests.utils import create_test_file
from framework.auth import Auth
from osf.management.commands.update_institution_project_counts import update_institution_project_counts
from osf.management.commands.project_to_draft_registration_contributor_sync import retrieve_draft_registrations_to_sync, project_to_draft_registration_contributor_sync
from osf.models import RegistrationSchema
from osf.metrics import InstitutionProjectCounts, UserInstitutionProjectCounts
from osf_tests.factories import (
AuthUserFactory,
InstitutionFactory,
PreprintFactory,
ProjectFactory,
RegistrationFactory,
RegionFactory,
UserFactory,
DraftRegistrationFactory,
)
from osf.utils.permissions import ADMIN, WRITE, READ
from tests.base import DbTestCase
from osf.management.commands.data_storage_usage import (
process_usages,
)
# Using powers of two so that any combination of file sizes will give a unique total
# If a summary value is incorrect, subtract out the values that are correct and convert
# to binary. Each of the 1s will correspond something that wasn't handled properly.
def next_file_size():
size = 1
while True:
yield size
size *= 2
class TestDataStorageUsage(DbTestCase):
def setUp(self):
super(TestDataStorageUsage, self).setUp()
self.region_us = RegionFactory(_id='US', name='United States')
@staticmethod
def add_file_version(file_to_version, user, size, version=1):
file_to_version.create_version(user, {
'object': '06d80e' + str(version),
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': size,
'contentType': 'img/png'
}).save()
def project(self, creator, is_public=True, is_deleted=False, region=None, parent=None):
if region is None:
region = self.region_us
project = ProjectFactory(creator=creator, is_public=is_public, is_deleted=is_deleted)
addon = project.get_addon('osfstorage')
addon.region = region
addon.save()
return project
def registration(self, project, creator, withdrawn=False):
schema = RegistrationSchema.objects.first()
draft_reg = DraftRegistrationFactory(branched_from=project)
registration = project.register_node(schema, Auth(user=creator), draft_reg)
registration.is_public = True
registration.save()
if withdrawn:
registration.retract_registration(creator)
withdrawal = registration.retraction
token = list(withdrawal.approval_state.values())[0]['approval_token']
with mock.patch('osf.models.AbstractNode.update_search'):
withdrawal.approve_retraction(creator, token)
withdrawal.save()
return registration
@pytest.fixture()
def component(self, parent, user):
return ProjectFactory(creator=user, parent=parent)
@pytest.fixture()
def project_deleted(self, user):
return ProjectFactory(creator=user, is_deleted=True)
@mock.patch('website.settings.ENABLE_ARCHIVER', False)
def test_data_storage_usage_command(self):
import logging
logger = logging.getLogger(__name__)
expected_summary_data = OrderedDict([
('date', None),
('total', 0),
('deleted', 0),
('registrations', 0),
('nd_public_nodes', 0),
('nd_private_nodes', 0),
('nd_preprints', 0),
('nd_supp_nodes', 0),
('canada_montreal', 0),
('australia_sydney', 0),
('germany_frankfurt', 0),
('united_states', 0),
])
user = UserFactory()
user_addon = user.get_addon('osfstorage')
user_addon.default_region_id = self.region_us
region_ca = RegionFactory(_id='CA-1', name=u'Canada - Montréal')
region_de = RegionFactory(_id='DE-1', name='Germany - Frankfurt')
region_au = RegionFactory(_id='AU-1', name='Australia - Sydney')
project_public_us = self.project(creator=user, is_public=True)
small_size = next_file_size()
file_size = next(small_size)
project_public_us_test_file = create_test_file(
target=project_public_us,
user=user,
size=file_size
)
logger.debug(u'Public project, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_public_nodes'] += file_size
expected_summary_data['united_states'] += file_size
file_size = next(small_size)
self.add_file_version(
project_public_us_test_file,
user=user,
size=file_size,
)
logger.debug(u'Public project file version, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_public_nodes'] += file_size
expected_summary_data['united_states'] += file_size
project_private_au = self.project(creator=user, is_public=False, region=region_au)
file_size = next(small_size)
create_test_file(
target=project_private_au,
user=user,
size=file_size
)
logger.debug(u'Private project, AU: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_private_nodes'] += file_size
expected_summary_data['australia_sydney'] += file_size
component_private_small_deleted_de = self.project(
creator=user,
is_public=False,
region=region_de,
parent=project_public_us
)
file_size = next(small_size)
deleted_file = create_test_file(
target=component_private_small_deleted_de,
user=user,
size=file_size,
)
logger.debug('Before deletion: {}'.format(deleted_file.target.title))
deleted_file.delete(user=user, save=True)
logger.debug(u'Deleted project, DE: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['deleted'] += file_size
expected_summary_data['germany_frankfurt'] += file_size
logger.debug('After deletion: {}'.format(deleted_file.target.title))
file_size = next(small_size)
PreprintFactory(creator=user, file_size=file_size) # preprint_us
logger.debug(u'Preprint, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_preprints'] += file_size
expected_summary_data['united_states'] += file_size
user_addon.default_region_id = region_ca
user_addon.save()
file_size = next(small_size)
preprint_with_supplement_ca = PreprintFactory(creator=user, file_size=file_size)
logger.debug(u'Preprint, CA: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_preprints'] += file_size
expected_summary_data['canada_montreal'] += file_size
user_addon.default_region_id = self.region_us
user_addon.save()
supplementary_node_public_au = self.project(creator=user, is_public=True, region=region_au)
preprint_with_supplement_ca.node = supplementary_node_public_au
preprint_with_supplement_ca.save()
file_size = next(small_size)
create_test_file(
target=supplementary_node_public_au,
user=user,
size=file_size
)
logger.debug(u'Public supplemental project of Canadian preprint, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_supp_nodes'] += file_size
expected_summary_data['nd_public_nodes'] += file_size
expected_summary_data['australia_sydney'] += file_size
file_size = next(small_size)
withdrawn_preprint_us = PreprintFactory(creator=user, file_size=file_size)
withdrawn_preprint_us.date_withdrawn = timezone.now()
withdrawn_preprint_us.save()
logger.debug(u'Withdrawn preprint, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['nd_preprints'] += file_size
expected_summary_data['united_states'] += file_size
project_to_register_us = self.project(creator=user, is_public=True, region=self.region_us)
registration = self.registration(project=project_to_register_us, creator=user)
file_size = next(small_size)
create_test_file(
target=registration,
user=user,
size=file_size
)
assert registration.get_addon('osfstorage').region == self.region_us
logger.debug(u'Registration, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['united_states'] += file_size
expected_summary_data['registrations'] += file_size
withdrawal = self.registration(project=project_to_register_us, creator=user, withdrawn=True)
file_size = next(small_size)
create_test_file(
target=withdrawal,
user=user,
size=file_size
)
logger.debug(u'Withdrawn registration, US: {}'.format(file_size))
expected_summary_data['total'] += file_size
expected_summary_data['united_states'] += file_size
expected_summary_data['registrations'] += file_size
actual_summary_data = process_usages(dry_run=True, page_size=2)
actual_keys = actual_summary_data.keys()
for key in actual_summary_data:
logger.info('Actual field: {}'.format(key))
expected_keys = expected_summary_data.keys()
for key in expected_summary_data:
logger.info('Expected field: {}'.format(key))
assert actual_keys == expected_keys
assert len(actual_keys) != 0
for key in actual_keys:
if key != 'date':
assert (key, expected_summary_data[key]) == (key, actual_summary_data[key])
@pytest.mark.es
@pytest.mark.django_db
class TestInstitutionMetricsUpdate:
@pytest.fixture()
def institution(self):
# Private: 14, Public: 4
return InstitutionFactory()
@pytest.fixture()
def user1(self, institution):
# Private: 4, Public: 4 (+1 from user2 fixture)
user = AuthUserFactory()
institution.osfuser_set.add(user)
institution.save()
for i in range(5):
project = ProjectFactory(creator=user, is_public=False)
project.affiliated_institutions.add(institution)
project.save()
project.delete()
for i in range(3):
project = ProjectFactory(creator=user, is_public=True)
project.affiliated_institutions.add(institution)
project.save()
ProjectFactory(creator=user, is_public=True)
ProjectFactory(creator=user, is_public=False)
return user
@pytest.fixture()
def user2(self, institution, user1):
# Private: 10, Public: 1
user = AuthUserFactory()
institution.osfuser_set.add(user)
institution.save()
for i in range(10):
project = ProjectFactory(creator=user, is_public=False)
project.affiliated_institutions.add(institution)
project.save()
for i in range(1):
project = ProjectFactory(creator=user, is_public=True)
project.add_contributor(user1)
project.affiliated_institutions.add(institution)
project.save()
return user
@pytest.fixture()
def user3(self, institution):
# Private: 0, Public: 0
user = AuthUserFactory()
institution.osfuser_set.add(user)
institution.save()
return user
@pytest.fixture()
def user4(self):
# Projects should not be included in results
user = AuthUserFactory()
for i in range(3):
project = ProjectFactory(creator=user, is_public=False)
project.save()
for i in range(6):
project = ProjectFactory(creator=user, is_public=True)
project.save()
return user
def test_update_institution_counts(self, app, institution, user1, user2, user3, user4):
update_institution_project_counts()
time.sleep(2)
user_search = UserInstitutionProjectCounts.get_current_user_metrics(institution)
user_results = user_search.execute()
sorted_results = sorted(user_results, key=lambda x: x['private_project_count'])
user3_record = sorted_results[0]
user1_record = sorted_results[1]
user2_record = sorted_results[2]
assert user1_record['user_id'] == user1._id
assert user1_record['public_project_count'] == 4
assert user1_record['private_project_count'] == 4
assert user2_record['user_id'] == user2._id
assert user2_record['public_project_count'] == 1
assert user2_record['private_project_count'] == 10
assert user3_record['user_id'] == user3._id
assert user3_record['public_project_count'] == 0
assert user3_record['private_project_count'] == 0
institution_results = InstitutionProjectCounts.get_latest_institution_project_document(institution)
assert institution_results['public_project_count'] == 4
assert institution_results['private_project_count'] == 14
@pytest.mark.django_db
class TestProjectDraftRegContributorSync:
@pytest.fixture()
def initiator(self):
return AuthUserFactory()
@pytest.fixture()
def draft_reg_contributor(self):
return AuthUserFactory()
@pytest.fixture()
def project_admin_contributor(self):
return AuthUserFactory()
@pytest.fixture()
def project_read_contributor(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, initiator):
project = ProjectFactory(creator=initiator)
return project
@pytest.fixture()
def active_draft_registration(self, project, initiator):
return DraftRegistrationFactory(branched_from=project, initiator=initiator)
@pytest.fixture()
def inactive_draft_registration(self, project, initiator):
draft_reg = DraftRegistrationFactory(branched_from=project, initiator=initiator)
RegistrationFactory(draft_registration=draft_reg, creator=initiator)
return draft_reg
@pytest.fixture()
def active_draft_registration_multiple_contributor(self, project, initiator, draft_reg_contributor):
draft_reg = DraftRegistrationFactory(branched_from=project, initiator=initiator)
draft_reg.add_contributor(draft_reg_contributor, WRITE)
return draft_reg
@pytest.fixture()
def no_project_draft_registration(self, initiator):
return DraftRegistrationFactory()
def test_draft_reg_to_sync_retrieval(
self, app, active_draft_registration, inactive_draft_registration, active_draft_registration_multiple_contributor, no_project_draft_registration):
# Tests if the function used to retrieve draft registrations to copy project contributors
# to is limited to those without registrations
active_unsynced_draft_regs = retrieve_draft_registrations_to_sync()
assert active_draft_registration in active_unsynced_draft_regs
assert inactive_draft_registration not in active_unsynced_draft_regs
assert no_project_draft_registration not in active_unsynced_draft_regs
assert active_draft_registration_multiple_contributor not in active_unsynced_draft_regs
def test_project_draft_reg_contributor_sync(
self, app, initiator, project_admin_contributor,
project_read_contributor, project, active_draft_registration):
# Contributors added to the project here because the draft registration should be created with a single contributor (the initiator)
project.add_contributor(project_admin_contributor, ADMIN)
project.add_contributor(project_read_contributor, READ)
# The removal of the initiator from the project ensures that contributors are copied from the
# project but without overwriting the draft registration contributor permission
project.remove_contributor(initiator, auth=project_admin_contributor.auth, log=False)
assert project_admin_contributor in project.contributors.all()
assert project_read_contributor in project.contributors.all()
assert initiator not in project.contributors.all()
assert active_draft_registration.contributors.count() == 1
assert initiator in active_draft_registration.contributors.all()
assert project_admin_contributor not in active_draft_registration.contributors.all()
assert project_read_contributor not in active_draft_registration.contributors.all()
project_to_draft_registration_contributor_sync()
assert initiator in active_draft_registration.contributors.all()
assert project_admin_contributor in active_draft_registration.contributors.all()
assert project_read_contributor in active_draft_registration.contributors.all()
assert active_draft_registration.contributors.count() == 3
assert active_draft_registration.has_permission(initiator, ADMIN)
assert active_draft_registration.has_permission(project_admin_contributor, ADMIN)
assert active_draft_registration.has_permission(project_read_contributor, READ)
|
{
"content_hash": "3b8211b6dbee9f847a702db15fc2463b",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 167,
"avg_line_length": 39.36784140969163,
"alnum_prop": 0.6543389470150507,
"repo_name": "brianjgeiger/osf.io",
"id": "5589d03ad26f268bc3f8d398dbbf82b7031bf304",
"size": "17898",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "osf_tests/test_management_commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93287"
},
{
"name": "Dockerfile",
"bytes": "5876"
},
{
"name": "HTML",
"bytes": "364479"
},
{
"name": "JavaScript",
"bytes": "1789498"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "685055"
},
{
"name": "Python",
"bytes": "11891113"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
from test.base_test_case import BaseTestCase
import os
class TestParseConfigs(BaseTestCase):
def test_folders_exists(self):
self.assertTrue(os.path.exists(self.packager_config.root))
self.assertTrue(os.path.isdir(self.packager_config.root))
self.assertTrue(os.path.exists(self.packager_config.get_test_folder()))
self.assertTrue(os.path.exists(self.packager_config.get_code_folder()))
self.assertTrue(os.path.isdir(self.packager_config.get_test_folder()))
self.assertTrue(os.path.isdir(self.packager_config.get_code_folder()))
|
{
"content_hash": "d467acbeeb9c6d29bd0e4c157024858e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 41.642857142857146,
"alnum_prop": 0.7272727272727273,
"repo_name": "nma/coding-pytest-extension",
"id": "e880e73bf0aff61788a9d79903b7ed0ce07ab628",
"size": "583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_parse_configs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23684"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
}
|
from requests import Session, Request
from ..models import Person
class PeopleSvc(object):
"""
Class that wraps the SWAPI /person endpoint
"""
def __init__(self):
self.history = []
self.base_url = 'https://swapi.co/api/people'
self.session = Session()
def get_person(self, person_id):
"""
Returns Star Wars person from /people endpoint based
on ID passed in
:param person_id: int Person ID
:return: requests.Response object containing JSON for Person
"""
req = Request()
req.method = 'GET'
req.url = '{base_url}/{person_id}'.format(base_url=self.base_url, person_id=person_id)
prepped = self.session.prepare_request(req)
response = self.session.send(prepped)
self.history.append(response)
return Person(response)
|
{
"content_hash": "bb10567d6c09a76e7dfb3679cae75aad",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 94,
"avg_line_length": 29.862068965517242,
"alnum_prop": 0.6120092378752887,
"repo_name": "j4ramos/swapi_example",
"id": "26c75ca393f7d924b3fdbcf1a311d62307c0f5ce",
"size": "867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/services/people_svc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2840"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import functools
import mock
from django.template.loader import render_to_string
from exam import fixture
from sentry.interfaces.base import InterfaceValidationError
from sentry.interfaces.stacktrace import (
Frame, Stacktrace, get_context, slim_frame_data
)
from sentry.models import Event
from sentry.testutils import TestCase
class GetContextTest(TestCase):
def test_works_with_empty_filename(self):
result = get_context(0, 'hello world')
assert result == [(0, 'hello world')]
class StacktraceTest(TestCase):
@fixture
def interface(self):
return Stacktrace.to_python(dict(frames=[
{
'filename': 'foo/bar.py'
},
{
'filename': 'foo/baz.py',
'lineno': 1,
'in_app': True,
}
]))
def test_legacy_interface(self):
# Simple test to ensure legacy data works correctly with the ``Frame``
# objects
event = self.event
interface = Stacktrace.to_python(event.data['sentry.interfaces.Stacktrace'])
assert len(interface.frames) == 1
assert interface == event.interfaces['sentry.interfaces.Stacktrace']
def test_requires_filename(self):
with self.assertRaises(InterfaceValidationError):
Stacktrace.to_python(dict(frames=[{}]))
Stacktrace.to_python(dict(frames=[{
'filename': 'foo.py',
}]))
Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.py',
}]))
def test_requires_frames(self):
with self.assertRaises(InterfaceValidationError):
Stacktrace.to_python({})
with self.assertRaises(InterfaceValidationError):
Stacktrace.to_python(dict(frames=[]))
with self.assertRaises(InterfaceValidationError):
Stacktrace.to_python(dict(frames=1))
def test_allows_abs_path_without_filename(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'abs_path': 'foo/bar/baz.py',
}]))
frame = interface.frames[0]
assert frame.filename == 'foo/bar/baz.py'
assert frame.abs_path == frame.filename
def test_coerces_url_filenames(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'http://foo.com/foo.js',
}]))
frame = interface.frames[0]
assert frame.filename == '/foo.js'
assert frame.abs_path == 'http://foo.com/foo.js'
def test_does_not_overwrite_filename(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.js',
'abs_path': 'http://foo.com/foo.js',
}]))
frame = interface.frames[0]
assert frame.filename == 'foo.js'
assert frame.abs_path == 'http://foo.com/foo.js'
def test_ignores_results_with_empty_path(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'http://foo.com',
}]))
frame = interface.frames[0]
assert frame.filename == 'http://foo.com'
assert frame.abs_path == frame.filename
def test_serialize_returns_frames(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.py',
}]))
result = interface.to_json()
assert 'frames' in result
def test_hash_without_system_frames(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.py',
'in_app': True,
}, {
'lineno': 1,
'filename': 'bar.py',
'in_app': None,
}]))
result = interface.get_hash(system_frames=False)
assert result == ['foo.py', 1]
result = interface.get_hash(system_frames=True)
assert result == ['foo.py', 1, 'bar.py', 1]
def test_compute_hashes(self):
interface = Stacktrace.to_python(dict(frames=[{
'lineno': 1,
'filename': 'foo.py',
'in_app': True,
}, {
'lineno': 1,
'filename': 'bar.py',
'in_app': None,
}]))
result = interface.compute_hashes('python')
assert result == [['foo.py', 1, 'bar.py', 1], ['foo.py', 1]]
def test_get_hash_with_minimal_app_frames(self):
frames = [{
'lineno': 1,
'filename': 'foo.py',
'in_app': True,
}] + [{
'lineno': 1,
'filename': 'bar.py',
'in_app': False,
} for _ in range(11)]
interface = Stacktrace.to_python(dict(frames=frames))
result = interface.get_hash(system_frames=False)
assert not result
def test_get_hash_with_only_required_vars(self):
interface = Frame.to_python({
'lineno': 1,
'filename': 'foo.py',
})
result = interface.get_hash()
self.assertEquals(result, ['foo.py', 1])
def test_get_hash_sanitizes_block_functions(self):
# This is Ruby specific
interface = Frame.to_python({
'filename': 'foo.py',
'function': 'block in _conditional_callback_around_233',
})
result = interface.get_hash()
self.assertEquals(result, ['foo.py', 'block'])
def test_get_hash_sanitizes_versioned_filenames(self):
# This is Ruby specific
interface = Frame.to_python({
'filename': '/data/foo/releases/20140114151955/app/views/foo.html.erb',
'context_line': '<% if @hotels.size > 0 %>',
})
result = interface.get_hash()
self.assertEquals(result, [
'/data/foo/releases/<version>/app/views/foo.html.erb',
'<% if @hotels.size > 0 %>',
])
interface = Frame.to_python({
'filename': '20140114151955/app/views/foo.html.erb',
'context_line': '<% if @hotels.size > 0 %>',
})
result = interface.get_hash()
self.assertEquals(result, [
'<version>/app/views/foo.html.erb',
'<% if @hotels.size > 0 %>',
])
def test_get_hash_ignores_java8_lambda_module(self):
interface = Frame.to_python({
'module': 'foo.bar.Baz$$Lambda$40/1673859467',
'function': 'call',
})
result = interface.get_hash()
self.assertEquals(result, [
'<module>',
'call',
])
def test_get_hash_ignores_java8_lambda_function(self):
interface = Frame.to_python({
'module': 'foo.bar.Baz',
'function': 'lambda$work$1',
})
result = interface.get_hash()
self.assertEquals(result, [
'foo.bar.Baz',
'<function>',
])
def test_get_hash_ignores_ENHANCED_spring_classes(self):
interface = Frame.to_python({
'module': 'invalid.gruml.talkytalkyhub.common.config.'
'JipJipConfig$$EnhancerBySpringCGLIB$$1ebdddb0',
'function': 'jipJipManagementApplication'
})
result = interface.get_hash()
self.assertEquals(result, [
'invalid.gruml.talkytalkyhub.common.config.JipJipConfig'
'$$EnhancerBySpringCGLIB$$<auto>',
'jipJipManagementApplication',
])
def test_get_hash_ignores_extra_ENHANCED_spring_classes(self):
interface = Frame.to_python({
'module': 'invalid.gruml.talkytalkyhub.common.config.'
'JipJipConfig$$EnhancerBySpringCGLIB$$1ebdddb0'
'$$EnhancerBySpringCGLIB$$8219cd38'
'$$FastClassBySpringCGLIB$$6c0b35d1',
'function': 'jipJipManagementApplication'
})
result = interface.get_hash()
self.assertEquals(result, [
'invalid.gruml.talkytalkyhub.common.config.JipJipConfig'
'$$EnhancerBySpringCGLIB$$<auto>$$EnhancerBySpringCGLIB$$<auto>'
'$$FastClassBySpringCGLIB$$<auto>',
'jipJipManagementApplication',
])
def test_get_hash_ignores_sun_java_generated_methods(self):
interface = Frame.to_python({
'module': 'sun.reflect.GeneratedMethodAccessor12345',
'function': 'invoke',
})
result = interface.get_hash()
self.assertEquals(result, [
'sun.reflect.GeneratedMethodAccessor',
'invoke',
])
def test_get_hash_sanitizes_erb_templates(self):
# This is Ruby specific
interface = Frame.to_python({
'filename': 'foo.html.erb',
'function': '_foo_html_erb__3327151541118998292_70361296749460',
})
result = interface.get_hash()
self.assertEquals(result, [
'foo.html.erb', '_foo_html_erb__<anon>_<anon>',
])
def test_get_hash_ignores_filename_if_blob(self):
interface = Frame.to_python({
'filename': 'blob:http://example.com/7f7aaadf-a006-4217-9ed5-5fbf8585c6c0',
})
result = interface.get_hash()
self.assertEquals(result, [])
def test_get_hash_ignores_filename_if_http(self):
interface = Frame.to_python({
'context_line': 'hello world',
'filename': 'http://foo.com/foo.py',
'function': 'test',
})
result = interface.get_hash()
self.assertEquals(result, ['hello world'])
def test_get_hash_ignores_filename_if_https(self):
interface = Frame.to_python({
'context_line': 'hello world',
'filename': 'https://foo.com/foo.py',
'function': 'test',
})
result = interface.get_hash()
self.assertEquals(result, ['hello world'])
def test_get_hash_ignores_filename_if_abs_path_is_http(self):
interface = Frame.to_python({
'context_line': 'hello world',
'abs_path': 'https://foo.com/foo.py',
'function': 'test',
'filename': 'foo.py',
})
result = interface.get_hash()
self.assertEquals(result, ['hello world'])
def test_get_hash_uses_module_over_filename(self):
interface = Frame.to_python({
'lineno': 1,
'filename': 'foo.py',
'module': 'foo'
})
result = interface.get_hash()
self.assertEquals(result, ['foo', 1])
def test_get_hash_uses_function_over_lineno(self):
interface = Frame.to_python({
'lineno': 1,
'filename': 'foo.py',
'function': 'bar'
})
result = interface.get_hash()
self.assertEquals(result, ['foo.py', 'bar'])
def test_get_hash_uses_context_line_over_function(self):
interface = Frame.to_python({
'context_line': 'foo bar',
'lineno': 1,
'filename': 'foo.py',
'function': 'bar'
})
result = interface.get_hash()
self.assertEquals(result, ['foo.py', 'foo bar'])
def test_get_hash_discards_seemingly_useless_stack(self):
interface = Stacktrace.to_python({
'frames': [{
'context_line': '<HTML>',
'lineno': 1,
'abs_path': 'http://example.com/foo',
'filename': 'foo',
'function': '?',
}],
})
result = interface.get_hash()
assert result == []
def test_get_hash_does_not_discard_non_urls(self):
interface = Stacktrace.to_python({
'frames': [{
'context_line': '<HTML>',
'lineno': 1,
'abs_path': 'foo',
'filename': 'foo',
'function': '?',
}],
})
result = interface.get_hash()
assert result != []
def test_get_hash_excludes_single_frame_urls(self):
"""
Browser JS will often throw errors (from inlined code in an HTML page)
which contain only a single frame, no function name, and have the HTML
document as the filename.
In this case the hash is often not usable as the context cannot be
trusted and the URL is dynamic.
"""
interface = Stacktrace.to_python({
'frames': [{
'context_line': 'hello world',
'abs_path': 'http://foo.com/bar/',
'lineno': 107,
'filename': '/bar/',
'module': '<unknown module>',
}],
})
result = interface.get_hash()
assert result == []
def test_cocoa_culprit(self):
stacktrace = Stacktrace.to_python(dict(frames=[
{
'filename': 'foo/baz.c',
'package': '/foo/bar/baz.dylib',
'lineno': 1,
'in_app': True,
'function': '-[CRLCrashAsyncSafeThread crash]',
}
]))
assert stacktrace.get_culprit_string(platform='cocoa') == '-[CRLCrashAsyncSafeThread crash]'
def test_emoji_culprit(self):
stacktrace = Stacktrace.to_python(dict(frames=[
{
'filename': 'foo/baz.c',
'package': '/foo/bar/baz.dylib',
'module': u'\U0001f62d',
'lineno': 1,
'in_app': True,
'function': u'\U0001f60d',
}
]))
assert stacktrace.get_culprit_string(platform='javascript') == u'\U0001f60d(\U0001f62d)'
def test_exclude_libswiftCore_from_in_app(self):
stacktrace = Stacktrace.to_python(dict(frames=[
{
'filename': 'foo/baz.c',
'package': '/foo/bar/libswiftCore.dylib',
'lineno': 1,
'in_app': True,
'function': 'fooBar',
}
]))
assert stacktrace.frames[0].in_app is False
def test_cocoa_strict_stacktrace(self):
stacktrace = Stacktrace.to_python(dict(frames=[
{
'filename': 'foo/baz.c',
'package': '/foo/bar/libswiftCore.dylib',
'lineno': 1,
'in_app': False,
'function': 'fooBar',
},
{
'package': '/foo/bar/MyApp',
'in_app': True,
'function': 'fooBar2',
},
{
'filename': 'Mycontroller.swift',
'package': '/foo/bar/MyApp',
'in_app': True,
'function': '-[CRLCrashAsyncSafeThread crash]',
}
]))
assert stacktrace.get_culprit_string(platform='cocoa') == '-[CRLCrashAsyncSafeThread crash]'
def test_get_hash_does_not_group_different_js_errors(self):
interface = Stacktrace.to_python({
'frames': [{
'context_line': '{snip}',
'lineno': 20,
'filename': 'https://foo.com/index.js',
'function': '?',
}],
})
result = interface.get_hash()
assert result == []
def test_get_hash_uses_symbol_instead_of_function(self):
interface = Frame.to_python({
'module': 'libfoo',
'function': 'int main()',
'symbol': '_main',
})
result = interface.get_hash()
self.assertEquals(result, [
'libfoo',
'_main',
])
def test_get_hash_skips_symbol_if_unknown(self):
interface = Frame.to_python({
'module': 'libfoo',
'function': 'main',
'symbol': '?',
})
result = interface.get_hash()
self.assertEquals(result, [
'libfoo',
'main',
])
@mock.patch('sentry.interfaces.stacktrace.Stacktrace.get_stacktrace')
def test_to_string_returns_stacktrace(self, get_stacktrace):
event = mock.Mock(spec=Event())
interface = Stacktrace(frames=[])
result = interface.to_string(event)
get_stacktrace.assert_called_once_with(event, system_frames=False, max_frames=10)
self.assertEquals(result, get_stacktrace.return_value)
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
@mock.patch('sentry.interfaces.stacktrace.Stacktrace.get_stacktrace')
def test_get_traceback_response(self, get_stacktrace):
event = mock.Mock(spec=Event())
event.message = 'foo'
get_stacktrace.return_value = 'bar'
interface = Stacktrace.to_python(dict(frames=[{'lineno': 1, 'filename': 'foo.py'}]))
result = interface.get_traceback(event)
get_stacktrace.assert_called_once_with(event, newest_first=None)
self.assertEquals(result, 'foo\n\nbar')
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
def test_get_stacktrace_with_only_filename(self):
event = mock.Mock(spec=Event())
interface = Stacktrace.to_python(dict(frames=[{'filename': 'foo'}, {'filename': 'bar'}]))
result = interface.get_stacktrace(event)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\n File "foo"\n File "bar"')
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
def test_get_stacktrace_with_module(self):
event = mock.Mock(spec=Event())
interface = Stacktrace.to_python(dict(frames=[{'module': 'foo'}, {'module': 'bar'}]))
result = interface.get_stacktrace(event)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\n Module "foo"\n Module "bar"')
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
def test_get_stacktrace_with_filename_and_function(self):
event = mock.Mock(spec=Event())
interface = Stacktrace.to_python(dict(frames=[{'filename': 'foo', 'function': 'biz'}, {'filename': 'bar', 'function': 'baz'}]))
result = interface.get_stacktrace(event)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\n File "foo", in biz\n File "bar", in baz')
@mock.patch('sentry.interfaces.stacktrace.is_newest_frame_first', mock.Mock(return_value=False))
def test_get_stacktrace_with_filename_function_lineno_and_context(self):
event = mock.Mock(spec=Event())
interface = Stacktrace.to_python(dict(frames=[
{'filename': 'foo', 'function': 'biz', 'lineno': 3, 'context_line': ' def foo(r):'},
{'filename': 'bar', 'function': 'baz', 'lineno': 5, 'context_line': ' return None'},
]))
result = interface.get_stacktrace(event)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\n File "foo", line 3, in biz\n def foo(r):\n File "bar", line 5, in baz\n return None')
def test_bad_input(self):
with self.assertRaises(InterfaceValidationError):
Frame.to_python({
'filename': 1,
})
with self.assertRaises(InterfaceValidationError):
Frame.to_python({
'filename': 'foo',
'abs_path': 1,
})
with self.assertRaises(InterfaceValidationError):
Frame.to_python({
'function': 1,
})
with self.assertRaises(InterfaceValidationError):
Frame.to_python({
'module': 1,
})
with self.assertRaises(InterfaceValidationError):
Frame.to_python({
'function': '?',
})
def test_context_with_nan(self):
self.assertEquals(
Frame.to_python({
'filename': 'x',
'vars': {'x': float('inf')},
}).vars,
{'x': '<inf>'},
)
self.assertEquals(
Frame.to_python({
'filename': 'x',
'vars': {'x': float('-inf')},
}).vars,
{'x': '<-inf>'},
)
self.assertEquals(
Frame.to_python({
'filename': 'x',
'vars': {'x': float('nan')},
}).vars,
{'x': '<nan>'},
)
def test_address_normalization(self):
interface = Frame.to_python({
'lineno': 1,
'filename': 'blah.c',
'function': 'main',
'instruction_addr': 123456,
'symbol_addr': '123450',
'image_addr': '0x0',
})
assert interface.instruction_addr == '0x1e240'
assert interface.symbol_addr == '0x1e23a'
assert interface.image_addr == '0x0'
class SlimFrameDataTest(TestCase):
def test_under_max(self):
interface = Stacktrace.to_python({'frames': [{'filename': 'foo'}]})
slim_frame_data(interface, 4)
assert len(interface.frames) == 1
assert not interface.frames_omitted
def test_over_max(self):
values = []
for n in range(5):
values.append({
'filename': 'frame %d' % n,
'vars': {'foo': 'bar'},
'context_line': 'b',
'pre_context': ['a'],
'post_context': ['c'],
})
interface = Stacktrace.to_python({'frames': values})
slim_frame_data(interface, 4)
assert len(interface.frames) == 5
for value, num in zip(interface.frames[:2], range(2)):
assert value.filename == 'frame %d' % num
assert value.vars is not None
assert value.pre_context is not None
assert value.post_context is not None
for value, num in zip(interface.frames[3:], range(3, 5)):
assert value.filename == 'frame %d' % num
assert value.vars is not None
assert value.pre_context is not None
assert value.post_context is not None
value = interface.frames[2]
assert value.filename == 'frame 2'
assert not value.vars
assert not value.pre_context
assert not value.post_context
def test_java_frame_rendering():
render = functools.partial(render_to_string, 'sentry/partial/frames/java.txt')
# This is the ideal case.
assert render({
'module': 'com.getsentry.example.Example',
'function': 'test',
'filename': 'Example.java',
'lineno': 1,
}).strip() == 'at com.getsentry.example.Example.test(Example.java:1)'
# Legacy support for frames without filename.
assert render({
'module': 'com.getsentry.example.Example',
'function': 'test',
'lineno': 1,
}).strip() == 'at com.getsentry.example.Example.test'
# (This shouldn't happen, but...)
assert render({
'module': 'com.getsentry.example.Example',
'function': 'test',
'filename': 'foo/bar/Example.java',
'lineno': 1,
}).strip() == 'at com.getsentry.example.Example.test(Example.java:1)'
# Native methods don't have line numbers.
assert render({
'function': 'test',
'filename': 'Example.java',
'lineno': -2,
}).strip() == 'at test(Example.java)'
assert render({
'function': 'test',
'filename': 'Example.java',
'lineno': 1,
}).strip() == 'at test(Example.java:1)'
|
{
"content_hash": "d0a281048cbc39dd000a6fa6be340096",
"timestamp": "",
"source": "github",
"line_count": 665,
"max_line_length": 168,
"avg_line_length": 35.2796992481203,
"alnum_prop": 0.5407271642299987,
"repo_name": "BuildingLink/sentry",
"id": "30f09923937c45b1a16f3f1b4a1d74bde53c2c3f",
"size": "23486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/interfaces/test_stacktrace.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "259940"
},
{
"name": "HTML",
"bytes": "297498"
},
{
"name": "JavaScript",
"bytes": "1051088"
},
{
"name": "Lua",
"bytes": "45617"
},
{
"name": "Makefile",
"bytes": "6255"
},
{
"name": "Python",
"bytes": "14120672"
},
{
"name": "Ruby",
"bytes": "4084"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
SPECIALIZATIONS = [(2, 2, 2),
(2, 2, 3),
(2, 2, 4),
(2, 2, "Eigen::Dynamic"),
(2, 3, 3),
(2, 3, 4),
(2, 3, 9),
(2, 3, "Eigen::Dynamic"),
(2, 4, 3),
(2, 4, 4),
(2, 4, 8),
(2, 4, 9),
(2, 4, "Eigen::Dynamic"),
(2, "Eigen::Dynamic", "Eigen::Dynamic"),
(4, 4, 2),
(4, 4, 3),
(4, 4, 4),
(4, 4, "Eigen::Dynamic"),
("Eigen::Dynamic", "Eigen::Dynamic", "Eigen::Dynamic")]
HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
// Template specialization of PartitionedMatrixView.
//
// ========================================
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
// This file is generated using generate_partitioned_matrix_view_specializations.py.
// Editing it manually is not recommended.
"""
DYNAMIC_FILE = """
#include "ceres/partitioned_matrix_view_impl.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
template class PartitionedMatrixView<%s, %s, %s>;
} // namespace internal
} // namespace ceres
"""
SPECIALIZATION_FILE = """
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
template class PartitionedMatrixView<%s, %s, %s>;
} // namespace internal
} // namespace ceres
#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
"""
FACTORY_FILE_HEADER = """
#include "ceres/linear_solver.h"
#include "ceres/partitioned_matrix_view.h"
#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
PartitionedMatrixViewBase*
PartitionedMatrixViewBase::Create(const LinearSolver::Options& options,
const BlockSparseMatrix& matrix) {
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
"""
FACTORY_CONDITIONAL = """ if ((options.row_block_size == %s) &&
(options.e_block_size == %s) &&
(options.f_block_size == %s)) {
return new PartitionedMatrixView<%s, %s, %s>(
matrix, options.elimination_groups[0]);
}
"""
FACTORY_FOOTER = """
#endif
VLOG(1) << "Template specializations not found for <"
<< options.row_block_size << ","
<< options.e_block_size << ","
<< options.f_block_size << ">";
return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
matrix, options.elimination_groups[0]);
};
} // namespace internal
} // namespace ceres
"""
def SuffixForSize(size):
if size == "Eigen::Dynamic":
return "d"
return str(size)
def SpecializationFilename(prefix, row_block_size, e_block_size, f_block_size):
return "_".join([prefix] + map(SuffixForSize, (row_block_size,
e_block_size,
f_block_size)))
def Specialize():
"""
Generate specialization code and the conditionals to instantiate it.
"""
f = open("partitioned_matrix_view.cc", "w")
f.write(HEADER)
f.write(FACTORY_FILE_HEADER)
for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
output = SpecializationFilename("generated/partitioned_matrix_view",
row_block_size,
e_block_size,
f_block_size) + ".cc"
fptr = open(output, "w")
fptr.write(HEADER)
template = SPECIALIZATION_FILE
if (row_block_size == "Eigen::Dynamic" and
e_block_size == "Eigen::Dynamic" and
f_block_size == "Eigen::Dynamic"):
template = DYNAMIC_FILE
fptr.write(template % (row_block_size, e_block_size, f_block_size))
fptr.close()
f.write(FACTORY_CONDITIONAL % (row_block_size,
e_block_size,
f_block_size,
row_block_size,
e_block_size,
f_block_size))
f.write(FACTORY_FOOTER)
f.close()
if __name__ == "__main__":
Specialize()
|
{
"content_hash": "a8fb805f979bbfdc9ba7aab43d81f0fc",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 84,
"avg_line_length": 34.1731843575419,
"alnum_prop": 0.5993133889161354,
"repo_name": "ikoruk/ceres-solver",
"id": "a352d29203b535ad8010d6d73e8ec53f7285d7b3",
"size": "8457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "internal/ceres/generate_partitioned_matrix_view_specializations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8570"
},
{
"name": "C++",
"bytes": "4211467"
},
{
"name": "CMake",
"bytes": "128493"
},
{
"name": "Makefile",
"bytes": "13400"
},
{
"name": "Python",
"bytes": "19673"
},
{
"name": "Ruby",
"bytes": "3254"
},
{
"name": "Shell",
"bytes": "4839"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
import plotly.graph_objs as go
import pytest
class TestGraphObjConstructor(TestCase):
def test_kwarg(self):
m = go.scatter.Marker(color="green")
self.assertEqual(m.to_plotly_json(), {"color": "green"})
def test_valid_arg_dict(self):
m = go.scatter.Marker(dict(color="green"))
self.assertEqual(m.to_plotly_json(), {"color": "green"})
def test_valid_underscore_kwarg(self):
m = go.scatter.Marker(line_color="green")
self.assertEqual(m.to_plotly_json(), {"line": {"color": "green"}})
def test_valid_arg_obj(self):
m = go.scatter.Marker(go.scatter.Marker(color="green"))
self.assertEqual(m.to_plotly_json(), {"color": "green"})
def test_kwarg_takes_precedence(self):
m = go.scatter.Marker(dict(color="green", size=12), color="blue", opacity=0.6)
self.assertEqual(
m.to_plotly_json(), {"color": "blue", "size": 12, "opacity": 0.6}
)
def test_invalid_kwarg(self):
with pytest.raises(ValueError):
go.scatter.Marker(bogus=[1, 2, 3])
def test_invalid_arg(self):
with pytest.raises(ValueError):
go.scatter.Marker([1, 2, 3])
def test_valid_arg_with_invalid_key_name(self):
with pytest.raises(ValueError):
go.scatter.Marker({"bogus": 12})
def test_valid_arg_with_invalid_key_value(self):
with pytest.raises(ValueError):
go.scatter.Marker({"color": "bogus"})
|
{
"content_hash": "018df39d767a5e27eaa43114df8f7dbd",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 86,
"avg_line_length": 33.422222222222224,
"alnum_prop": 0.613031914893617,
"repo_name": "plotly/python-api",
"id": "09fa78857b8bc3cfed5872cbdf2b42c059c3cd1b",
"size": "1504",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/tests/test_core/test_graph_objs/test_constructor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
"""
CNI implementation
Demultiplexes on the CNI_COMMAND and runs the necessary operation
"""
import argparse
import inspect
import json
import logging
import os
import sys
# set parent directory in sys.path
cfile = os.path.abspath(inspect.getfile(inspect.currentframe())) # nopep8
sys.path.append(os.path.dirname(os.path.dirname(cfile))) # nopep8
from common.cni import Cni as Cni
from common.veth import CniVEthPair as CniVEthPair
from common.macvlan import CniMacVlan as CniMacVlan
from contrail.vrouter import VRouter as VRouter
from contrail.vrouter import Error as Error
# Error codes
CONTRAIL_CNI_UNSUPPORTED_CMD = 501
# logger for the file
logger = None
class ContrailCni():
# Additional CNI commands supported by Contrail. Used in debugging and
# developement cycles
CONTRAIL_CNI_CMD_GET = 'get'
CONTRAIL_CNI_CMD_POLL = 'poll'
# Container orchestrator modes
CONTRAIL_CNI_MODE_K8S = "k8s"
CONTRAIL_CNI_MODE_MESOS = "mesos"
# Type of virtual interface to be created for container
CONTRAIL_VIF_TYPE_VETH = "veth"
CONTRAIL_VIF_TYPE_MACVLAN = "macvlan"
# In case of macvlan, the container interfaces will run as sub-interface
# to interface on host network-namespace. Name of the interface inside
# host network-namespace is defined below
CONTRAIL_PARENT_INTERFACE = "eth0"
# Logging parameters
CONTRAIL_LOG_FILE = '/var/log/contrail/cni/opencontrail.log'
CONTRAIL_LOG_LEVEL = 'WARNING'
def __init__(self):
# set logging
self.log_file = ContrailCni.CONTRAIL_LOG_FILE
self.log_level = ContrailCni.CONTRAIL_LOG_LEVEL
self.mode = ContrailCni.CONTRAIL_CNI_MODE_K8S
self.vif_type = ContrailCni.CONTRAIL_VIF_TYPE_VETH
self.parent_interface = ContrailCni.CONTRAIL_PARENT_INTERFACE
self.conf_file = None
self.stdin_string = None
self.args_uuid = None
# Read CLI arguments
self._get_params_from_cli()
# Get contrail specific parameters
self._get_params()
# Get logging parameters and configure logging
self._configure_logging()
global logger
logger = logging.getLogger('contrail-cni')
self.vrouter = VRouter(self.stdin_string)
self.cni = Cni(self.stdin_string)
self.cni.update(self.args_uuid, None)
return
# Read parameters passed as cli-arguments
def _get_params_from_cli(self):
parser = argparse.ArgumentParser(description='CNI Arguments')
parser.add_argument('-c', '--command',
help='CNI command add/del/version/get/poll')
parser.add_argument('-v', '--version', action='version', version='0.1')
parser.add_argument('-f', '--file', help='Contrail CNI config file')
parser.add_argument('-u', '--uuid', help='Container UUID')
args = parser.parse_args()
# Override CNI_COMMAND environment
if args.command is not None:
os.environ['CNI_COMMAND'] = args.command
# Set UUID from argument. If valid-uuid is found, it will overwritten
# later. Useful in case of UT where valid uuid for pod cannot be found
self.args_uuid = args.uuid
self.conf_file = args.file
return
@staticmethod
def parse_mode(mode):
if mode.lower() == ContrailCni.CONTRAIL_CNI_MODE_K8S:
return ContrailCni.CONTRAIL_CNI_MODE_K8S
if mode.lower() == ContrailCni.CONTRAIL_CNI_MODE_MESOS:
return ContrailCni.CONTRAIL_CNI_MODE_MESOS
return ContrailCni.CONTRAIL_CNI_MODE_K8S
@staticmethod
def parse_vif_type(vif_type):
if vif_type.lower() == ContrailCni.CONTRAIL_VIF_TYPE_VETH:
return ContrailCni.CONTRAIL_VIF_TYPE_VETH
if vif_type.lower() == ContrailCni.CONTRAIL_VIF_TYPE_MACVLAN:
return ContrailCni.CONTRAIL_VIF_TYPE_MACVLAN
return ContrailCni.CONTRAIL_VIF_TYPE_VETH
def _get_params(self):
# Read config file from STDIN or optionally from a file
if self.conf_file:
with open(self.conf_file, 'r') as f:
self.stdin_string = f.read()
else:
self.stdin_string = sys.stdin.read()
self.stdin_json = json.loads(self.stdin_string)
contrail_json = self.stdin_json.get('contrail')
if contrail_json is None:
return
if contrail_json.get('log-file') is not None:
self.log_file = contrail_json['log-file']
if contrail_json.get('log-level') is not None:
self.log_level = contrail_json['log-level']
if contrail_json.get('mode') != None:
self.mode = self.parse_mode(contrail_json['mode'])
if contrail_json.get('vif-type') != None:
self.vif_type = self.parse_vif_type(contrail_json['vif-type'])
if contrail_json.get('parent-interface') != None:
self.parent_interface = contrail_json['parent-interface']
return
def _configure_logging(self):
# Configure logger
time_format = '%(asctime)s:%(name)s:%(levelname)s:%(message)s '
date_format = '%m/%d/%Y %I:%M:%S %p '
logging.basicConfig(filename=self.log_file,
level=self.log_level.upper(),
format=time_format, datefmt=date_format)
return
def build_response(self, vr_resp):
self.cni.build_response(vr_resp['ip-address'], vr_resp['plen'],
vr_resp['gateway'], vr_resp['dns-server'])
return
def get_cmd(self):
resp = self.vrouter.get_cmd(self.cni.container_uuid,
self.cni.container_vn)
return self.build_response(resp)
def poll_cmd(self):
resp = self.vrouter.poll_cmd(self.cni.container_uuid,
self.cni.container_vn)
return self.build_response(resp)
def _make_interface(self, mac, vlan_tag):
# Create the interface object
intf = None
if self.vif_type == ContrailCni.CONTRAIL_VIF_TYPE_MACVLAN:
host_ifname = self.parent_interface
intf = CniMacVlan(self.cni, mac, host_ifname, vlan_tag)
else:
intf = CniVEthPair(self.cni, mac)
host_ifname = intf.host_ifname
return intf, host_ifname
def add_cmd(self):
'''
ADD handler for a container
- Pre-fetch interface configuration from VRouter.
- Gets MAC address for the interface
- In case of sub-interface, gets VLAN-Tag for the interface
- Create interface based on the "mode"
- Invoke Add handler from VRouter module
- Update interface with configuration got from VRouter
- Configures IP address
- Configures routes
- Bring-up the interface
- stdout CNI response
'''
# Pre-fetch initial configuration for the interface from vrouter
# This will give MAC address for the interface and in case of
# VMI sub-interface, we will also get the vlan-tag
cfg = self.vrouter.poll_cfg_cmd(self.cni.container_uuid,
self.cni.container_vn)
# Create the interface object
intf, host_ifname = self._make_interface(cfg.get('mac-address'),
cfg.get('vlan-id'))
# Create the interface both in host-os and inside container
intf.create_interface()
# Inform vrouter about interface-add. The interface inside container
# must be created by this time
resp = self.vrouter.add_cmd(self.cni.container_uuid,
self.cni.container_id,
self.cni.container_name,
None, host_ifname,
self.cni.container_ifname,
self.cni.container_vn)
# Configure the interface based on config received above
intf.configure_interface(resp['ip-address'], resp['plen'],
resp['gateway'])
# Build CNI response and print on stdout
return self.build_response(resp)
def delete_cmd(self):
'''
DEL handler for a container
- Delete veth pair
- Invoke Delete handler from VRouter module
- stdout VRouter response
'''
# Create the interface object
intf, host_ifname = self._make_interface('00:00:00:00:00:00', None)
# Delete the interface
intf.delete_interface()
# Inform VRouter about interface delete
self.vrouter.delete_cmd(self.cni.container_uuid,
self.cni.container_vn)
self.cni.delete_response()
return
def Version(self):
'''
Return Version
'''
self.cni.version_response()
return
def Run(self):
'''
main method for CNI plugin
'''
if self.cni.command.lower() == Cni.CNI_CMD_VERSION:
resp = self.Version()
elif self.cni.command.lower() == Cni.CNI_CMD_ADD:
resp = self.add_cmd()
elif (self.cni.command.lower() == Cni.CNI_CMD_DELETE or
self.cni.command.lower() == Cni.CNI_CMD_DEL):
resp = self.delete_cmd()
elif self.cni.command.lower() == ContrailCni.CONTRAIL_CNI_CMD_GET:
resp = self.get_cmd()
elif self.cni.command.lower() == ContrailCni.CONTRAIL_CNI_CMD_POLL:
resp = self.poll_cmd()
else:
raise Error(CONTRAIL_CNI_UNSUPPORTED_CMD,
'Invalid command ' + self.cni.command)
return
def log(self):
logger.debug('mode = ' + self.mode + ' vif-type = ' + self.vif_type +
' parent-interface = ' + self.parent_interface)
self.cni.log()
self.vrouter.log()
return
|
{
"content_hash": "1e681f6224a0fb6a810354986a57610f",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 79,
"avg_line_length": 37.174721189591075,
"alnum_prop": 0.5998,
"repo_name": "rombie/contrail-controller",
"id": "ebb44668caa592222695c9b6242d7d6546aa2959",
"size": "10114",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/container/cni/cni/contrail/contrail_cni.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "722850"
},
{
"name": "C++",
"bytes": "22461123"
},
{
"name": "GDB",
"bytes": "39260"
},
{
"name": "Go",
"bytes": "59593"
},
{
"name": "Java",
"bytes": "91653"
},
{
"name": "Lua",
"bytes": "13345"
},
{
"name": "PowerShell",
"bytes": "2391"
},
{
"name": "Python",
"bytes": "7791777"
},
{
"name": "Roff",
"bytes": "41295"
},
{
"name": "Ruby",
"bytes": "13596"
},
{
"name": "Shell",
"bytes": "52086"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0009_merge_20180623_1742'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='auto_duty',
),
migrations.RemoveField(
model_name='profile',
name='duty',
),
migrations.DeleteModel(
name='DutyTime',
),
]
|
{
"content_hash": "0be2de87865292b8fafa6b4e74efc6e7",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 52,
"avg_line_length": 21,
"alnum_prop": 0.5238095238095238,
"repo_name": "hackerspace-ntnu/website",
"id": "b3267ed400cf7ddb58c35e8fef6689afd2f6b1a5",
"size": "512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userprofile/migrations/0010_auto_20190129_1624.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16771"
},
{
"name": "HTML",
"bytes": "235369"
},
{
"name": "JavaScript",
"bytes": "43249"
},
{
"name": "Python",
"bytes": "323186"
}
],
"symlink_target": ""
}
|
""" smashlib.patches
"""
from smashlib.patches.base import Patch
from smashlib.patches.cd import PatchCDMagic
from smashlib.patches.pushd import PatchPushdMagic
__all__ = [
Patch.__name__,
PatchCDMagic.__name__,
PatchPushdMagic.__name__,
]
|
{
"content_hash": "fc0688f69f890763f2edc3825719d790",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 50,
"avg_line_length": 25.2,
"alnum_prop": 0.7142857142857143,
"repo_name": "mattvonrocketstein/smash",
"id": "765becf28baeaf614af252a3ffd483d0b0b83ddb",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smashlib/patches/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162188"
},
{
"name": "HTML",
"bytes": "32106"
},
{
"name": "JavaScript",
"bytes": "1615935"
},
{
"name": "Makefile",
"bytes": "550"
},
{
"name": "Python",
"bytes": "4934398"
},
{
"name": "Shell",
"bytes": "2990"
}
],
"symlink_target": ""
}
|
import errno
import os
import resource
import time
import signal
def daemonize():
try:
pid = os.fork()
except OSError as e:
raise e
if pid == 0:
os.setsid()
try:
pid = os.fork()
except OSError as e:
raise e
if pid == 0:
os.chdir('/')
os.umask(0)
else:
os._exit(0)
else:
return False
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
maxfd = 1024
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError:
pass
os.open(os.devnull, os.O_RDWR)
os.dup2(0, 1)
os.dup2(0, 2)
return True
class TimeoutExpired(Exception):
pass
def is_running(pid):
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def kill(pid, timeout=2, ignore_errors=False):
if pid is None or pid == 0:
return
try:
os.kill(pid, signal.SIGTERM)
wait(pid, timeout)
except TimeoutExpired:
os.kill(pid, signal.SIGKILL)
wait(pid)
except OSError:
if not ignore_errors:
raise
def wait(pid, timeout=None):
def check_timeout(_delay):
if timeout is not None:
if time.time() >= stop_at:
raise TimeoutExpired
time.sleep(_delay)
return min(_delay * 2, 0.04)
if timeout is not None:
waitcall = lambda: os.waitpid(pid, os.WNOHANG)
stop_at = time.time() + timeout
else:
waitcall = lambda: os.waitpid(pid, 0)
delay = 0.0001
while 1:
try:
retpid, status = waitcall()
except OSError, err:
if err.errno == errno.EINTR:
delay = check_timeout(delay)
continue
elif err.errno == errno.ECHILD:
# This has two meanings:
# - pid is not a child of os.getpid() in which case
# we keep polling until it's gone
# - pid never existed in the first place
# In both cases we'll eventually return None as we
# can't determine its exit status code.
while 1:
if is_running(pid):
delay = check_timeout(delay)
else:
return
else:
raise
else:
if retpid == 0:
# WNOHANG was used, pid is still running
delay = check_timeout(delay)
continue
# process exited due to a signal; return the integer of
# that signal
if os.WIFSIGNALED(status):
return os.WTERMSIG(status)
# process exited using exit(2) system call; return the
# integer exit(2) system call has been called with
elif os.WIFEXITED(status):
return os.WEXITSTATUS(status)
else:
# should never happen
raise RuntimeError("unknown process exit status")
|
{
"content_hash": "583e5f7b32fcbb0396ebdba919e39ae7",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 69,
"avg_line_length": 25.575539568345324,
"alnum_prop": 0.5085794655414908,
"repo_name": "letconex/MMT",
"id": "8bdd68e31dac9185e597c7df55e59e19f744d1cb",
"size": "3555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cli/libs/daemon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AutoHotkey",
"bytes": "1609"
},
{
"name": "C",
"bytes": "2310"
},
{
"name": "C++",
"bytes": "3479743"
},
{
"name": "CMake",
"bytes": "42508"
},
{
"name": "Java",
"bytes": "810385"
},
{
"name": "Perl",
"bytes": "93820"
},
{
"name": "Protocol Buffer",
"bytes": "947"
},
{
"name": "Python",
"bytes": "232214"
},
{
"name": "Roff",
"bytes": "25856"
},
{
"name": "Shell",
"bytes": "15583"
}
],
"symlink_target": ""
}
|
import os
import sys
import pbr.version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenStack Command Line Client'
copyright = u'2012-2013 OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version_info = pbr.version.VersionInfo('python-openstackclient')
#
# The short X.Y version.
version = version_info.version_string()
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenStackCommandLineClientdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
# .
latex_documents = [
('index', 'OpenStackCommandLineClient.tex',
u'OpenStack Command Line Client Documentation',
u'OpenStack', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
'man/openstack',
'openstack',
u'OpenStack Command Line Client',
[u'OpenStack contributors'],
1,
),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OpenStackCommandLineClient',
u'OpenStack Command Line Client Documentation',
u'OpenStack', 'OpenStackCommandLineClient',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
{
"content_hash": "20673bda3abbd6fc370a4cf4e7b5f0d9",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 88,
"avg_line_length": 31.83266932270916,
"alnum_prop": 0.700250312891114,
"repo_name": "citrix-openstack-build/python-openstackclient",
"id": "47025b6daa59f500ded9a536e3f526bd7f30d3ff",
"size": "8432",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "675669"
},
{
"name": "Shell",
"bytes": "1128"
}
],
"symlink_target": ""
}
|
"""
Assessments
This module currently contains 2 types of Assessments
* Flexible Impact Assessments (including Mobile access)
* Rapid Assessment Tool (from ECB: http://www.ecbproject.org/page/48)
@ToDo: Migrate this to a Template in the Survey module
@ToDo Validation similar to sitrep_school_report_onvalidation()
http://bazaar.launchpad.net/~flavour/sahana-eden/trunk/annotate/head:/models/sitrep.py#L99
It also contains some Baseline Data:
* Populations
http://eden.sahanafoundation.org/wiki/BluePrintBaselineData
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
# Define the Model
# @ToDo: Move to modules/s3db/assess.py
# - here it isn't visible to s3db.load_all_models() or Sync
# -----------------------------------------------------------------------------
assess_severity_opts = {
0: T("Low"),
1: T("Medium"),
2: T("High"),
3: T("Very High"),
}
assess_colour_opts = {
0:"green",
1:"yellow",
2:"orange",
3:"red"
}
def s3_assess_severity_represent(value):
if value:
return IMG(_src="/%s/static/img/%s_circle_16px.png" %
(appname, assess_colour_opts[value]),
_alt= value,
_align="middle"
)
else:
return NONE
repr_select = lambda l: len(l.name) > 48 and "%s..." % l.name[:44] or l.name
S3Represent = s3base.S3Represent
add_component = s3db.add_component
configure = s3db.configure
crud_strings = s3.crud_strings
define_table = db.define_table
location_id = s3db.gis_location_id
person_id = s3db.pr_person_id
organisation_id = s3db.org_organisation_id
organisation_represent = s3db.org_organisation_represent
sector_id = s3db.org_sector_id
human_resource_id = s3db.hrm_human_resource_id
ireport_id = s3db.irs_ireport_id
# Impact as component of assessments
add_component("impact_impact", assess_assess="assess_id")
def assess_tables():
""" Load the Assess Tables when needed """
module = "assess"
# =========================================================================
# Flexible Impact Assessments
# =========================================================================
# Assessment
#
resourcename = "assess"
tablename = "assess_assess"
table = define_table(tablename,
Field("datetime", "datetime",
label = T("Date & Time"),
default = request.utcnow),
location_id(widget = S3LocationAutocompleteWidget(),
requires = IS_LOCATION()),
organisation_id(widget = S3OrganisationAutocompleteWidget(default_from_profile=True)),
person_id("assessor_person_id",
label = T("Assessor"),
default = s3_logged_in_person()),
s3_comments(),
ireport_id(), # Assessment can be linked to an Incident Report
*s3_meta_fields())
assess_id = S3ReusableField("assess_id", table,
requires = IS_NULL_OR(
IS_ONE_OF(db, "assess_assess.id", "%(id)s")
),
represent = lambda id: id,
label = T("Assessment"),
ondelete = "RESTRICT")
# CRUD strings
ADD_ASSESSMENT = T("Add Assessment")
crud_strings[tablename] = Storage(
title_create = ADD_ASSESSMENT,
title_display = T("Assessment Details"),
title_list = T("Assessments"),
title_update = T("Edit Assessment"),
title_search = T("Search Assessments"),
subtitle_create = T("Add New Assessment"),
label_list_button = T("List Assessments"),
label_create_button = ADD_ASSESSMENT,
label_delete_button = T("Delete Assessment"),
msg_record_created = T("Assessment added"),
msg_record_modified = T("Assessment updated"),
msg_record_deleted = T("Assessment deleted"),
msg_list_empty = T("No Assessments currently registered"),
name_nice = T("Assessment"),
name_nice_plural = T("Assessments"))
# assess_assess as component of org_organisation
add_component(table, org_organisation="organisation_id")
# Hide Add Assessment functionality. Users should only add assessments
# through the Basic Assessment.
configure(tablename,
insertable=False)
# =========================================================================
# Baseline Type
#
tablename = "assess_baseline_type"
table = define_table(tablename,
Field("name", length=128, notnull=True, unique=True),
*s3_meta_fields())
# CRUD strings
ADD_BASELINE_TYPE = T("Add Baseline Type")
crud_strings[tablename] = Storage(
title_create = ADD_BASELINE_TYPE,
title_display = T("Baseline Type Details"),
title_list = T("Baseline Types"),
title_update = T("Edit Baseline Type"),
title_search = T("Search Baseline Type"),
subtitle_create = T("Add New Baseline Type"),
label_list_button = T("List Baseline Types"),
label_create_button = ADD_BASELINE_TYPE,
label_delete_button = T("Delete Baseline Type"),
msg_record_created = T("Baseline Type added"),
msg_record_modified = T("Baseline Type updated"),
msg_record_deleted = T("Baseline Type deleted"),
msg_list_empty = T("No Baseline Types currently registered"),
name_nice = T("Baseline Type"),
name_nice_plural = T("Baseline Types"))
def baseline_type_comment():
# ToDo: Is this membership check required?
if auth.has_membership(auth.id_group("'Administrator'")):
return S3AddResourceLink(c="assess",
f="baseline_type",
label=ADD_BASELINE_TYPE)
else:
return None
represent = S3Represent(tablename)
baseline_type_id = S3ReusableField("baseline_type_id", table,
sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db,
"assess_baseline_type.id",
represent,
sort=True)),
represent = represent,
label = T("Baseline Type"),
comment = baseline_type_comment(),
ondelete = "RESTRICT"
)
# =========================================================================
# Baseline
#
tablename = "assess_baseline"
table = define_table(tablename,
# Hide FK fields in forms
assess_id(readable = False, writable = False),
baseline_type_id(),
Field("value", "double"),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_BASELINE = T("Add Baseline")
crud_strings[tablename] = Storage(
title_create = ADD_BASELINE,
title_display = T("Baselines Details"),
title_list = T("Baselines"),
title_update = T("Edit Baseline"),
title_search = T("Search Baselines"),
subtitle_create = T("Add New Baseline"),
label_list_button = T("List Baselines"),
label_create_button = ADD_BASELINE,
label_delete_button = T("Delete Baseline"),
msg_record_created = T("Baseline added"),
msg_record_modified = T("Baseline updated"),
msg_record_deleted = T("Baseline deleted"),
msg_list_empty = T("No Baselines currently registered"),
name_nice = T("Baseline"),
name_nice_plural = T("Baselines"))
# Baseline as component of assessments
add_component(table, assess_assess="assess_id")
# =========================================================================
# Summary
#
tablename = "assess_summary"
table = define_table(tablename,
assess_id(readable = False, writable = False),
sector_id(),
#Field("value", "double"),
Field("value", "integer",
default = 0,
label = T("Severity"),
requires = IS_EMPTY_OR(IS_IN_SET(assess_severity_opts)),
widget = SQLFORM.widgets.radio.widget,
represent = s3_assess_severity_represent),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ASSESS_SUMMARY = T("Add Assessment Summary")
crud_strings[tablename] = Storage(
title_create = ADD_ASSESS_SUMMARY,
title_display = T("Assessment Summary Details"),
title_list = T("Assessment Summaries"),
title_update = T("Edit Assessment Summary"),
title_search = T("Search Assessment Summaries"),
subtitle_create = T("Add New Assessment Summary"),
label_list_button = T("List Assessment Summaries"),
label_create_button = ADD_ASSESS_SUMMARY,
label_delete_button = T("Delete Assessment Summary"),
msg_record_created = T("Assessment Summary added"),
msg_record_modified = T("Assessment Summary updated"),
msg_record_deleted = T("Assessment Summary deleted"),
msg_list_empty = T("No Assessment Summaries currently registered"),
name_nice = T("Assessment"),
name_nice_plural = T("Assessments"))
# Summary as component of assessments
add_component(table, assess_assess="assess_id")
# Pass variables back to global scope (response.s3.*)
return dict(
assess_id = assess_id
)
# =========================================================================
# Rapid Assessment Tool
# =========================================================================
def rat_tables():
""" Load the RAT Tables when needed """
module = "assess"
# Load the models we depend on
if settings.has_module("cr"):
shelter_id = s3db.shelter_id
if settings.has_module("hrm"):
human_resource_id = s3db.hrm_human_resource_id
else:
human_resource_id = s3db.pr_person_id
# Section CRUD strings
ADD_SECTION = T("Add Section")
rat_section_crud_strings = Storage(
title_create = ADD_SECTION,
title_display = T("Section Details"),
title_list = T("Sections"),
title_update = "",
title_search = T("Search Sections"),
subtitle_create = "",
label_list_button = T("List Sections"),
label_create_button = ADD_SECTION,
label_delete_button = T("Delete Section"),
msg_record_created = T("Section updated"),
msg_record_modified = T("Section updated"),
msg_record_deleted = T("Section deleted"),
msg_list_empty = T("No Sections currently registered"),
name_nice = T("Search"),
name_nice_plural = T("Searches"))
# -------------------------------------------------------------------------
# Common options
rat_walking_time_opts = {
1: T("0-15 minutes"),
2: T("15-30 minutes"),
3: T("30-60 minutes"),
4: T("over one hour"),
999: NOT_APPLICABLE
}
# -------------------------------------------------------------------------
# Helper functions
def rat_represent_multiple(set, opt):
"""
Represent an IS_IN_SET with multiple=True as
comma-separated list of options
@param set: the options set as dict
@param opt: the selected option(s)
"""
if isinstance(opt, (list, tuple)):
opts = opt
vals = [str(set.get(o, o)) for o in opts]
#elif isinstance(opt, basestring):
# opts = opt.split("|")
# vals = [str(set.get(int(o), o)) for o in opts if o]
elif isinstance(opt, int):
opts = [opt]
vals = str(set.get(opt, opt))
else:
return T("None")
if len(opts) > 1:
vals = ", ".join(vals)
else:
vals = len(vals) and vals[0] or ""
return vals
def rat_tooltip(tooltip, multiple=False):
"""
Prepare tooltip
"""
if multiple:
comment = DIV("(%s)" % T("Select all that apply"),
DIV(_class="tooltipbody",
_title="|%s" % T(tooltip)))
else:
comment = DIV(DIV(_class="tooltipbody",
_title="|%s" % T(tooltip)))
return comment
def rat_label_and_tooltip(label, tooltip, multiple=False):
"""
Prepare tooltip that incorporates a field's label
"""
label = T(label)
if multiple:
comment = DIV("(%s)" % T("Select all that apply"),
DIV(_class="tooltip",
_title="%s|%s" % (T(label), T(tooltip))))
else:
comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (T(label), T(tooltip))))
return {"label": label, "comment": comment}
rat_interview_location_opts = {
1:T("Village"),
2:T("Urban area"),
3:T("Collective center"),
4:T("Informal camp"),
5:T("Formal camp"),
6:T("School"),
7:T("Mosque"),
8:T("Church"),
99:T("Other")
}
rat_interviewee_opts = {
1:T("Male"),
2:T("Female"),
3:T("Village Leader"),
4:T("Informal Leader"),
5:T("Community Member"),
6:T("Religious Leader"),
7:T("Police"),
8:T("Healthcare Worker"),
9:T("School Teacher"),
10:T("Womens Focus Groups"),
11:T("Child (< 18 yrs)"),
99:T("Other")
}
rat_accessibility_opts = {
1:T("2x4 Car"),
2:T("4x4 Car"),
3:T("Truck"),
4:T("Motorcycle"),
5:T("Boat"),
6:T("Walking Only"),
7:T("No access at all"),
99:T("Other")
}
# Main Resource -----------------------------------------------------------
# contains Section 1: Identification Information
#
tablename = "assess_rat"
table = define_table(tablename,
Field("date", "date",
requires = [IS_DATE(format = settings.get_L10n_date_format()),
IS_NOT_EMPTY()],
default = datetime.datetime.today()),
location_id(widget = S3LocationAutocompleteWidget(),
requires = IS_LOCATION()),
human_resource_id("staff_id", label=T("Staff")),
human_resource_id("staff2_id", label=T("Staff2")),
Field("interview_location", "list:integer",
label = T("Interview taking place at"),
requires = IS_NULL_OR(IS_IN_SET(rat_interview_location_opts,
multiple=True,
zero=None)),
#widget = SQLFORM.widgets.checkboxes.widget,
represent = lambda opt, set=rat_interview_location_opts: \
rat_represent_multiple(set, opt),
comment = "(%s)" % T("Select all that apply")),
Field("interviewee", "list:integer",
label = T("Person interviewed"),
requires = IS_NULL_OR(IS_IN_SET(rat_interviewee_opts,
multiple=True,
zero=None)),
#widget = SQLFORM.widgets.checkboxes.widget,
represent = lambda opt, set=rat_interviewee_opts: \
rat_represent_multiple(set, opt),
comment = "(%s)" % T("Select all that apply")),
Field("accessibility", "integer",
label = T("Accessibility of Affected Location"),
requires = IS_NULL_OR(IS_IN_SET(rat_accessibility_opts,
zero=None)),
represent = lambda opt: rat_accessibility_opts.get(opt, opt)),
s3_comments(),
#document_id(), # Better to have multiple Documents on a Tab
s3db.shelter_id(),
*s3_meta_fields())
# CRUD strings
ADD_ASSESSMENT = T("Add Rapid Assessment")
crud_strings[tablename] = Storage(
title_create = ADD_ASSESSMENT,
title_display = T("Rapid Assessment Details"),
title_list = T("Rapid Assessments"),
title_update = T("Edit Rapid Assessment"),
title_search = T("Search Rapid Assessments"),
subtitle_create = T("Add New Rapid Assessment"),
label_list_button = T("List Rapid Assessments"),
label_create_button = ADD_ASSESSMENT,
label_delete_button = T("Delete Rapid Assessment"),
msg_record_created = T("Rapid Assessment added"),
msg_record_modified = T("Rapid Assessment updated"),
msg_record_deleted = T("Rapid Assessment deleted"),
msg_list_empty = T("No Rapid Assessments currently registered"),
name_nice = T("Rapid Assessment"),
name_nice_plural = T("Rapid Assessments"))
# -------------------------------------------------------------------------
def rat_assessment_onaccept(form):
id = form.vars.get("id", None)
if id:
for x in xrange(2, 10):
section = "assess_section%s" % x
set = db(db[section].assessment_id == id)
record = set.select(db[section].id, limitby=(0, 1)).first()
if not record:
db[section].insert(assessment_id=id)
# -------------------------------------------------------------------------
def rat_represent(id):
""" Represent assessment as string """
table = db.assess_rat
row = db(table.id == id).select(table.date,
table.staff_id,
table.staff2_id,
table.location_id,
limitby = (0, 1)).first()
if row:
date = row.date and str(row.date) or ""
location = row.location_id and s3db.gis_LocationRepresent()(row.location_id) or ""
table = db.org_staff
org = ["", ""]
i = 0
for staff_id in [row.staff_id, row.staff2_id]:
if staff_id:
staff = db(table.id == staff_id).select(table.organisation_id,
limitby=(0, 1)).first()
if staff:
i += 1
org[i] = organisation_represent(staff.organisation_id)
assessment_represent = XML("<div>%s %s, %s %s</div>" % (location, org[0], org[1], date))
else:
assessment_represent = NONE
return assessment_represent
# -------------------------------------------------------------------------
# re-usable field
assessment_id = S3ReusableField("assessment_id", table,
requires = IS_NULL_OR(
IS_ONE_OF(db, "assess_rat.id",
rat_represent,
orderby="assess_rat.id")
),
#represent = rat_represent,
readable = False, writable = False,
#label = T("Rapid Assessment"),
#comment = A(ADD_ASSESSMENT,
# _class="s3_add_resource_link",
# _href=URL(c="assess", f="rat",
# args="create",
# vars=dict(format="popup")),
# _target="top",
# _title=ADD_ASSESSMENT),
ondelete = "RESTRICT")
# Assessment as component of cr_shelter.
# RAT has components itself, so best not to constrain within the parent resource tabs
# - therefore disable the listadd & jump out of the tabs for Create/Update
add_component(table, cr_shelter="shelter_id")
configure(tablename,
listadd=False, # We override this in the RAT controller for when not a component
onaccept=rat_assessment_onaccept)
# Section 2: Demographic --------------------------------------------------
tablename = "assess_section2"
table = define_table(tablename,
assessment_id(),
Field("population_total", "integer",
label = T("Total population of site visited"),
comment = T("people")),
Field("households_total", "integer",
label = T("Total # of households of site visited"),
comment = T("households")),
Field("population_affected", "integer",
label = T("Estimated # of people who are affected by the emergency"),
comment = T("people")),
Field("households_affected", "integer",
label = T("Estimated # of households who are affected by the emergency"),
comment = T("households")),
Field("male_05", "double",
label = T("Number/Percentage of affected population that is Male & Aged 0-5")),
Field("male_612", "double",
label = T("Number/Percentage of affected population that is Male & Aged 6-12")),
Field("male_1317", "double",
label = T("Number/Percentage of affected population that is Male & Aged 13-17")),
Field("male_1825", "double",
label = T("Number/Percentage of affected population that is Male & Aged 18-25")),
Field("male_2660", "double",
label = T("Number/Percentage of affected population that is Male & Aged 26-60")),
Field("male_61", "double",
label = T("Number/Percentage of affected population that is Male & Aged 61+")),
Field("female_05", "double",
label = T("Number/Percentage of affected population that is Female & Aged 0-5")),
Field("female_612", "double",
label = T("Number/Percentage of affected population that is Female & Aged 6-12")),
Field("female_1317", "double",
label = T("Number/Percentage of affected population that is Female & Aged 13-17")),
Field("female_1825", "double",
label = T("Number/Percentage of affected population that is Female & Aged 18-25")),
Field("female_2660", "double",
label = T("Number/Percentage of affected population that is Female & Aged 26-60")),
Field("female_61", "double",
label = T("Number/Percentage of affected population that is Female & Aged 61+")),
Field("dead_women", "integer",
label = T("How many Women (18 yrs+) are Dead due to the crisis"),
comment = T("people")), # @ToDo: Should this say "Number of people"?
Field("dead_men", "integer",
label = T("How many Men (18 yrs+) are Dead due to the crisis"),
comment = T("people")),
Field("dead_girl", "integer",
label = T("How many Girls (0-17 yrs) are Dead due to the crisis"),
comment = T("people")),
Field("dead_boy", "integer",
label = T("How many Boys (0-17 yrs) are Dead due to the crisis"),
comment = T("people")),
Field("injured_women", "integer",
label = T("How many Women (18 yrs+) are Injured due to the crisis"),
comment = T("people")),
Field("injured_men", "integer",
label = T("How many Men (18 yrs+) are Injured due to the crisis"),
comment = T("people")),
Field("injured_girl", "integer",
label = T("How many Girls (0-17 yrs) are Injured due to the crisis"),
comment = T("people")),
Field("injured_boy", "integer",
label = T("How many Boys (0-17 yrs) are Injured due to the crisis"),
comment = T("people")),
Field("missing_women", "integer",
label = T("How many Women (18 yrs+) are Missing due to the crisis"),
comment = T("people")),
Field("missing_men", "integer",
label = T("How many Men (18 yrs+) are Missing due to the crisis"),
comment = T("people")),
Field("missing_girl", "integer",
label = T("How many Girls (0-17 yrs) are Missing due to the crisis"),
comment = T("people")),
Field("missing_boy", "integer",
label = T("How many Boys (0-17 yrs) are Missing due to the crisis"),
comment = T("people")),
Field("household_head_elderly", "integer",
label = T("Elderly person headed households (>60 yrs)"),
comment = T("households")),
Field("household_head_female", "integer",
label = T("Female headed households"),
comment = T("households")),
Field("household_head_child", "integer",
label = T("Child headed households (<18 yrs)"),
comment = T("households")),
Field("disabled_physical", "integer",
label = T("Persons with disability (physical)"),
comment = T("people")),
Field("disabled_mental", "integer",
label = T("Persons with disability (mental)"),
comment = T("people")),
Field("pregnant", "integer",
label = T("Pregnant women"),
comment = T("people")),
Field("lactating", "integer",
label = T("Lactating women"),
comment = T("people")),
Field("minorities", "integer",
label = T("Migrants or ethnic minorities"),
comment = T("people")),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = rat_section_crud_strings
add_component(table,
assess_rat=dict(joinby="assessment_id",
multiple=False))
configure(tablename, deletable=False)
# Section 3: Shelter & Essential NFIs -------------------------------------
rat_houses_salvmat_types = {
1: T("Wooden plank"),
2: T("Zinc roof"),
3: T("Bricks"),
4: T("Wooden poles"),
5: T("Door frame"),
6: T("Window frame"),
7: T("Roof tile"),
999: NOT_APPLICABLE
}
rat_water_container_types = {
1: T("Jerry can"),
2: T("Bucket"),
3: T("Water gallon"),
99: T("Other (specify)")
}
tablename = "assess_section3"
table = define_table(tablename,
assessment_id(),
Field("houses_total", "integer",
label = T("Total number of houses in the area"),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)),
),
Field("houses_destroyed", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)),
**rat_label_and_tooltip(
"Number of houses destroyed/uninhabitable",
"How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?")),
Field("houses_damaged", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)),
**rat_label_and_tooltip(
"Number of houses damaged, but usable",
"How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?")),
Field("houses_salvmat", "list:integer",
requires = IS_NULL_OR(IS_IN_SET(rat_houses_salvmat_types,
multiple=True,
zero=None)),
represent = lambda opt, set=rat_houses_salvmat_types: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Salvage material usable from destroyed houses",
"What type of salvage material can be used from destroyed houses?",
multiple=True)),
Field("water_containers_available", "boolean",
**rat_label_and_tooltip(
"Water storage containers available for HH",
"Do households have household water storage containers?")),
Field("water_containers_sufficient", "boolean",
**rat_label_and_tooltip(
"Water storage containers sufficient per HH",
"Do households each have at least 2 containers (10-20 litres each) to hold water?")),
Field("water_containers_types", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_water_container_types,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_water_container_types: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Types of water storage containers available",
"What types of household water storage containers are available?",
multiple=True)),
Field("water_containers_types_other",
label = T("Other types of water storage containers")),
Field("cooking_equipment_available", "boolean",
**rat_label_and_tooltip(
"Appropriate cooking equipment/materials in HH",
"Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?")),
Field("sanitation_items_available", "boolean",
**rat_label_and_tooltip(
"Reliable access to sanitation/hygiene items",
"Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?")),
Field("sanitation_items_available_women", "boolean",
**rat_label_and_tooltip(
"Easy access to sanitation items for women/girls",
"Do women and girls have easy access to sanitary materials?")),
Field("bedding_materials_available", "boolean",
**rat_label_and_tooltip(
"Bedding materials available",
"Do households have bedding materials available (tarps, plastic mats, blankets)?")),
Field("clothing_sets_available", "boolean",
**rat_label_and_tooltip(
"Appropriate clothing available",
"Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?")),
Field("nfi_assistance_available", "boolean",
**rat_label_and_tooltip(
"Shelter/NFI assistance received/expected",
"Have households received any shelter/NFI assistance or is assistance expected in the coming days?")),
Field("kits_hygiene_received", "boolean",
label = T("Hygiene kits received")),
Field("kits_hygiene_source",
label = T("Hygiene kits, source")),
Field("kits_household_received", "boolean",
label = T("Household kits received")),
Field("kits_household_source",
label = T("Household kits, source")),
Field("kits_dwelling_received", "boolean",
label = T("Family tarpaulins received")), # @ToDo: Better label, perhaps? A tarp isn't a dwelling.
Field("kits_dwelling_source",
label = T("Family tarpaulins, source")),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = rat_section_crud_strings
add_component(table,
assess_rat=dict(joinby="assessment_id",
multiple=False))
configure(tablename, deletable=False)
# Section 4 - Water and Sanitation ----------------------------------------
rat_water_source_types = {
1: T("PDAM"),
2: T("Dug Well"),
3: T("Spring"),
4: T("River"),
5: T("Other Faucet/Piped Water"),
99: T("Other (describe)"),
999: NOT_APPLICABLE
}
rat_water_coll_person_opts = {
1: T("Child"),
2: T("Adult male"),
3: T("Adult female"),
4: T("Older person (>60 yrs)"),
999: NOT_APPLICABLE
}
rat_defec_place_types = {
1: T("open defecation"),
2: T("pit"),
3: T("latrines"),
4: T("river"),
99: T("other")
}
rat_defec_place_animals_opts = {
1: T("enclosed area"),
2: T("within human habitat"),
999: NOT_APPLICABLE
}
rat_latrine_types = {
1: T("flush latrine with septic tank"),
2: T("pit latrine"),
999: NOT_APPLICABLE
}
tablename = "assess_section4"
table = define_table(tablename,
assessment_id(),
Field("water_source_pre_disaster_type", "integer",
label = T("Type of water source before the disaster"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_water_source_types,
zero=None)),
represent = lambda opt: rat_water_source_types.get(opt,
UNKNOWN_OPT)),
Field("water_source_pre_disaster_description",
label = T("Description of water source before the disaster")),
Field("dwater_source_type", "integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_water_source_types,
zero=None)),
represent = lambda opt: rat_water_source_types.get(opt,
UNKNOWN_OPT),
**rat_label_and_tooltip(
"Current type of source for drinking water",
"What is your major source of drinking water?")),
Field("dwater_source_description",
label = T("Description of drinking water source")),
Field("dwater_reserve",
**rat_label_and_tooltip(
"How long will this water resource last?",
"Specify the minimum sustainability in weeks or days.")),
Field("swater_source_type", "integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_water_source_types,
zero=None)),
represent = lambda opt: rat_water_source_types.get(opt,
UNKNOWN_OPT),
**rat_label_and_tooltip(
"Current type of source for sanitary water",
"What is your major source of clean water for daily use (ex: washing, cooking, bathing)?")),
Field("swater_source_description",
label = T("Description of sanitary water source")),
Field("swater_reserve",
**rat_label_and_tooltip(
"How long will this water resource last?",
"Specify the minimum sustainability in weeks or days.")),
Field("water_coll_time", "integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_walking_time_opts,
zero=None)),
represent = lambda opt: rat_walking_time_opts.get(opt,
UNKNOWN_OPT),
**rat_label_and_tooltip(
"Time needed to collect water",
"How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.")),
Field("water_coll_safe", "boolean",
label = T("Is it safe to collect water?"),
default = True),
Field("water_coll_safety_problems",
label = T("If no, specify why")),
Field("water_coll_person", "integer",
label = T("Who usually collects water for the family?"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_water_coll_person_opts,
zero=None)),
represent = lambda opt: rat_water_coll_person_opts.get(opt,
UNKNOWN_OPT)),
Field("defec_place_type",
requires = IS_EMPTY_OR(IS_IN_SET(rat_defec_place_types,
zero=None,
multiple=True)),
represent = lambda opt: rat_defec_place_types.get(opt,
UNKNOWN_OPT),
**rat_label_and_tooltip(
"Type of place for defecation",
"Where do the majority of people defecate?",
multiple=True)),
Field("defec_place_description",
label = T("Description of defecation area")),
Field("defec_place_distance", "integer",
label = T("Distance between defecation area and water source"),
comment = T("meters")),
Field("defec_place_animals", "integer",
label = T("Defecation area for animals"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_defec_place_animals_opts,
zero = None)),
represent = lambda opt: rat_defec_place_animals_opts.get(opt,
UNKNOWN_OPT)),
Field("close_industry", "boolean",
**rat_label_and_tooltip(
"Industry close to village/camp",
"Is there any industrial or agro-chemical production close to the affected area/village?")),
Field("waste_disposal",
**rat_label_and_tooltip(
"Place for solid waste disposal",
"Where is solid waste disposed in the village/camp?")),
Field("latrines_number", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Number of latrines",
"How many latrines are available in the village/IDP centre/Camp?")),
Field("latrines_type", "integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_latrine_types,
zero=None)),
represent = lambda opt: rat_latrine_types.get(opt,
UNKNOWN_OPT),
**rat_label_and_tooltip(
"Type of latrines",
"What type of latrines are available in the village/IDP centre/Camp?")),
Field("latrines_separation", "boolean",
**rat_label_and_tooltip(
"Separate latrines for women and men",
"Are there separate latrines for women and men available?")),
Field("latrines_distance", "integer",
**rat_label_and_tooltip(
"Distance between shelter and latrines",
"Distance between latrines and temporary shelter in meters")),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = rat_section_crud_strings
add_component(table,
assess_rat=dict(joinby="assessment_id",
multiple=False))
configure(tablename, deletable=False)
# Section 5 - Health ------------------------------------------------------
rat_health_services_types = {
1: T("Community Health Center"),
2: T("Hospital")
}
rat_health_problems_opts = {
1: T("Respiratory Infections"),
2: T("Diarrhea"),
3: T("Dehydration"),
99: T("Other (specify)")
}
rat_infant_nutrition_alternative_opts = {
1: T("Porridge"),
2: T("Banana"),
3: T("Instant Porridge"),
4: T("Air tajin"),
99: T("Other (specify)")
}
tablename = "assess_section5"
table = define_table(tablename,
assessment_id(),
Field("health_services_pre_disaster", "boolean",
**rat_label_and_tooltip(
"Health services functioning prior to disaster",
"Were there health services functioning for the community prior to the disaster?")),
Field("medical_supplies_pre_disaster", "boolean",
**rat_label_and_tooltip(
"Basic medical supplies available prior to disaster",
"Were basic medical supplies available for health services prior to the disaster?")),
Field("health_services_post_disaster", "boolean",
**rat_label_and_tooltip(
"Health services functioning since disaster",
"Are there health services functioning for the community since the disaster?")),
Field("medical_supplies_post_disaster", "boolean",
**rat_label_and_tooltip(
"Basic medical supplies available since disaster",
"Are basic medical supplies available for health services since the disaster?")),
Field("medical_supplies_reserve", "integer",
label = T("How many days will the supplies last?")),
Field("health_services_available_types", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_health_services_types,
zero=None, multiple=True)),
represent = lambda opt: \
rat_represent_multiple(rat_health_services_types, opt),
**rat_label_and_tooltip(
"Types of health services available",
"What types of health services are still functioning in the affected area?",
multiple=True)),
Field("staff_number_doctors", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Number of doctors actively working",
"How many doctors in the health centers are still actively working?")),
Field("staff_number_nurses", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Number of nurses actively working",
"How many nurses in the health centers are still actively working?")),
Field("staff_number_midwives", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Number of midwives actively working",
"How many midwives in the health centers are still actively working?")),
Field("health_service_walking_time", "integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_walking_time_opts,
zero=None)),
represent = lambda opt: rat_walking_time_opts.get(opt,
UNKNOWN_OPT),
**rat_label_and_tooltip(
"Walking time to the health service",
"How long does it take you to walk to the health service?")),
Field("health_problems_adults", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_health_problems_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_health_problems_opts: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Current type of health problems, adults",
"What types of health problems do people currently have?",
multiple=True)),
Field("health_problems_adults_other",
label = T("Other current health problems, adults")),
Field("health_problems_children", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_health_problems_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_health_problems_opts: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Current type of health problems, children",
"What types of health problems do children currently have?",
multiple=True)),
Field("health_problems_children_other",
label = T("Other current health problems, children")),
Field("chronical_illness_cases", "boolean", # @ToDo: "chronic illness"?
**rat_label_and_tooltip(
"People with chronical illnesses",
"Are there people with chronical illnesses in your community?")),
Field("chronical_illness_children", "boolean",
**rat_label_and_tooltip(
"Children with chronical illnesses",
"Are there children with chronical illnesses in your community?")),
Field("chronical_illness_elderly", "boolean",
**rat_label_and_tooltip(
"Older people with chronical illnesses",
"Are there older people with chronical illnesses in your community?")),
Field("chronical_care_sufficient", "boolean",
**rat_label_and_tooltip(
"Sufficient care/assistance for chronically ill",
"Are the chronically ill receiving sufficient care and assistance?")),
Field("malnutrition_present_pre_disaster", "boolean",
**rat_label_and_tooltip(
"Malnutrition present prior to disaster",
"Were there cases of malnutrition in this area prior to the disaster?")),
Field("mmd_present_pre_disaster", "boolean",
**rat_label_and_tooltip(
"Micronutrient malnutrition prior to disaster",
"Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?")),
Field("breast_milk_substitutes_pre_disaster", "boolean",
**rat_label_and_tooltip(
"Breast milk substitutes used prior to disaster",
"Were breast milk substitutes used prior to the disaster?")),
Field("breast_milk_substitutes_post_disaster", "boolean",
**rat_label_and_tooltip(
"Breast milk substitutes in use since disaster",
"Are breast milk substitutes being used here since the disaster?")),
Field("infant_nutrition_alternative", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_infant_nutrition_alternative_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_infant_nutrition_alternative_opts: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Alternative infant nutrition in use",
"Babies who are not being breastfed, what are they being fed on?",
multiple=True)),
Field("infant_nutrition_alternative_other",
label = T("Other alternative infant nutrition in use")),
Field("u5_diarrhea", "boolean",
**rat_label_and_tooltip(
"Diarrhea among children under 5",
"Are there cases of diarrhea among children under the age of 5?")),
Field("u5_diarrhea_rate_48h", "integer",
**rat_label_and_tooltip(
"Approx. number of cases/48h",
"Approximately how many children under 5 with diarrhea in the past 48 hours?")),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = rat_section_crud_strings
add_component(table,
assess_rat=dict(joinby="assessment_id",
multiple=False))
configure(tablename, deletable=False)
# Section 6 - Nutrition/Food Security -------------------------------------
rat_main_dish_types = {
1: T("Rice"),
2: T("Noodles"),
3: T("Biscuits"),
4: T("Corn"),
5: T("Wheat"),
6: T("Cassava"),
7: T("Cooking Oil")
}
rat_side_dish_types = {
1: T("Salted Fish"),
2: T("Canned Fish"),
3: T("Chicken"),
4: T("Eggs"),
99: T("Other (specify)")
}
rat_food_stock_reserve_opts = {
1: T("1-3 days"),
2: T("4-7 days"),
3: T("8-14 days")
}
rat_food_source_types = {
1: "Local market",
2: "Field cultivation",
3: "Food stall",
4: "Animal husbandry",
5: "Raising poultry",
99: "Other (specify)"
}
tablename = "assess_section6"
table = define_table(tablename,
assessment_id(),
Field("food_stocks_main_dishes", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_main_dish_types,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_main_dish_types: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Existing food stocks, main dishes",
"What food stocks exist? (main dishes)",
multiple=True)),
# @ToDo: Should there be a field "food_stocks_other_main_dishes"?
Field("food_stocks_side_dishes", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_side_dish_types,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_side_dish_types: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Existing food stocks, side dishes",
"What food stocks exist? (side dishes)",
multiple=True)),
Field("food_stocks_other_side_dishes",
label = T("Other side dishes in stock")),
Field("food_stocks_reserve", "integer",
label = T("How long will the food last?"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_food_stock_reserve_opts,
zero=None)),
represent = lambda opt: rat_food_stock_reserve_opts.get(opt,
UNKNOWN_OPT)),
Field("food_sources", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_food_source_types,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_food_source_types: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Usual food sources in the area",
"What are the people's normal ways to obtain food in this area?",
multiple=True)),
Field("food_sources_other",
label = T("Other ways to obtain food")),
Field("food_sources_disruption", "boolean",
**rat_label_and_tooltip(
"Normal food sources disrupted",
"Have normal food sources been disrupted?")),
Field("food_sources_disruption_details",
label = T("If yes, which and how")),
Field("food_assistance_available", "boolean",
**rat_label_and_tooltip(
"Food assistance available/expected",
"Have the people received or are you expecting any medical or food assistance in the coming days?")),
Field("food_assistance_details", "text",
label = T("If yes, specify what and by whom")),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = rat_section_crud_strings
add_component(table,
assess_rat=dict(joinby="assessment_id",
multiple=False))
configure(tablename, deletable=False)
# Section 7 - Livelihood --------------------------------------------------
rat_income_source_opts = {
1: T("Agriculture"),
2: T("Fishing"),
3: T("Poultry"),
4: T("Casual Labor"),
5: T("Small Trade"),
6: T("Other")
}
rat_expense_types = {
1: T("Education"),
2: T("Health"),
3: T("Food"),
4: T("Hygiene"),
5: T("Shelter"),
6: T("Clothing"),
7: T("Funeral"),
8: T("Alcohol"),
99: T("Other (specify)")
}
rat_cash_source_opts = {
1: T("Family/friends"),
2: T("Government"),
3: T("Bank/micro finance"),
4: T("Humanitarian NGO"),
99: T("Other (specify)")
}
rat_ranking_opts = xrange(1, 7)
tablename = "assess_section7"
table = define_table(tablename,
assessment_id(),
Field("income_sources_pre_disaster", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_income_source_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_income_source_opts: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Main income sources before disaster",
"What were your main sources of income before the disaster?",
multiple=True)),
Field("income_sources_post_disaster", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_income_source_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_income_source_opts: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Current main income sources",
"What are your main sources of income now?",
multiple=True)),
Field("main_expenses", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_expense_types,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_expense_types: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Current major expenses",
"What do you spend most of your income on now?",
multiple=True)),
Field("main_expenses_other",
label = T("Other major expenses")),
Field("business_damaged", "boolean",
**rat_label_and_tooltip(
"Business damaged",
"Has your business been damaged in the course of the disaster?")),
Field("business_cash_available", "boolean",
**rat_label_and_tooltip(
"Cash available to restart business",
"Do you have access to cash to restart your business?")),
Field("business_cash_source", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_cash_source_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_cash_source_opts: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Main cash source",
"What are your main sources of cash to restart your business?")),
Field("rank_reconstruction_assistance", "integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None)),
**rat_label_and_tooltip(
"Immediate reconstruction assistance, Rank",
"Assistance for immediate repair/reconstruction of houses")),
Field("rank_farmland_fishing_assistance", "integer",
label = T("Farmland/fishing material assistance, Rank"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))),
Field("rank_poultry_restocking", "integer",
label = T("Poultry restocking, Rank"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))),
Field("rank_health_care_assistance", "integer",
label = T("Health care assistance, Rank"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))),
Field("rank_transportation_assistance", "integer",
label = T("Transportation assistance, Rank"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))),
Field("other_assistance_needed",
label = T("Other assistance needed")),
Field("rank_other_assistance", "integer",
label = T("Other assistance, Rank"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = rat_section_crud_strings
add_component(table,
assess_rat=dict(joinby="assessment_id",
multiple=False))
configure(tablename, deletable=False)
# Section 8 - Education ---------------------------------------------------
rat_schools_salvmat_types = {
1: T("Wooden plank"),
2: T("Zinc roof"),
3: T("Bricks"),
4: T("Wooden poles"),
5: T("Door frame"),
6: T("Window frame"),
7: T("Roof tile"),
999: NOT_APPLICABLE
}
rat_alternative_study_places = {
1: T("Community Centre"),
2: T("Church"),
3: T("Mosque"),
4: T("Open area"),
5: T("Government building"),
6: T("Other (specify)"),
999: NOT_APPLICABLE
}
rat_school_attendance_barriers_opts = {
1: T("School used for other purpose"),
2: T("School destroyed"),
3: T("Lack of school uniform"),
4: T("Lack of transport to school"),
5: T("Children not enrolled in new school"),
6: T("School heavily damaged"),
7: T("Desire to remain with family"),
8: T("Lack of supplies at school"),
9: T("Displaced"),
10: T("Other (specify)"),
999: NOT_APPLICABLE
}
tablename = "assess_section8"
table = define_table(tablename,
assessment_id(),
Field("schools_total", "integer",
label = T("Total number of schools in affected area"),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))),
Field("schools_public", "integer",
label = T("Number of public schools"),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))),
Field("schools_private", "integer",
label = T("Number of private schools"),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))),
Field("schools_religious", "integer",
label = T("Number of religious schools"),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))),
Field("schools_destroyed", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Number of schools destroyed/uninhabitable",
"uninhabitable = foundation and structure destroyed")),
Field("schools_damaged", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Number of schools damaged but usable",
"windows broken, cracks in walls, roof slightly damaged")),
Field("schools_salvmat", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_schools_salvmat_types,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_schools_salvmat_types: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Salvage material usable from destroyed schools",
"What type of salvage material can be used from destroyed schools?",
multiple=True)),
Field("alternative_study_places_available", "boolean",
**rat_label_and_tooltip(
"Alternative places for studying available",
"Are there alternative places for studying?")),
Field("alternative_study_places_number", "integer",
label = T("Number of alternative places for studying"),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))),
Field("alternative_study_places", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_alternative_study_places,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_alternative_study_places: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Alternative places for studying",
"Where are the alternative places for studying?",
multiple=True)),
Field("alternative_study_places_other",
label = T("Other alternative places for study")),
Field("schools_open_pre_disaster", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Number of schools open before disaster",
"How many primary/secondary schools were opening prior to the disaster?")),
Field("schools_open_post_disaster", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Number of schools open now",
"How many of the primary/secondary schools are now open and running a regular schedule of class?")),
Field("teachers_active_pre_disaster", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Number of teachers before disaster",
"How many teachers worked in the schools prior to the disaster?")),
Field("teachers_affected_by_disaster", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Number of teachers affected by disaster",
"How many teachers have been affected by the disaster (affected = unable to work)?")),
Field("children_0612_female", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Girls 6-12 yrs in affected area",
"How many primary school age girls (6-12) are in the affected area?")),
Field("children_0612_male", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Boys 6-12 yrs in affected area",
"How many primary school age boys (6-12) are in the affected area?")),
Field("children_0612_not_in_school_female", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Girls 6-12 yrs not attending school",
"How many of the primary school age girls (6-12) in the area are not attending school?")),
Field("children_0612_not_in_school_male", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Boys 6-12 yrs not attending school",
"How many of the primary school age boys (6-12) in the area are not attending school?")),
Field("children_1318_female", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Girls 13-18 yrs in affected area",
"How many secondary school age girls (13-18) are in the affected area?")),
Field("children_1318_male", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Boys 13-18 yrs in affected area",
"How many secondary school age boys (13-18) are in the affected area?")),
Field("children_1318_not_in_school_female", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Girls 13-18 yrs not attending school",
"How many of the secondary school age girls (13-18) in the area are not attending school?")),
Field("children_1318_not_in_school_male", "integer",
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)),
**rat_label_and_tooltip(
"Boys 13-18 yrs not attending school",
"How many of the secondary school age boys (13-18) in the area are not attending school?")),
Field("school_attendance_barriers", "list:integer",
requires = IS_EMPTY_OR(IS_IN_SET(rat_school_attendance_barriers_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_school_attendance_barriers_opts: \
rat_represent_multiple(set, opt),
**rat_label_and_tooltip(
"Factors affecting school attendance",
"What are the factors affecting school attendance?",
multiple=True)),
Field("school_attendance_barriers_other",
label = T("Other factors affecting school attendance")),
Field("school_assistance_available", "boolean",
**rat_label_and_tooltip(
"School assistance received/expected",
"Have schools received or are expecting to receive any assistance?")),
Field("school_assistance_tents_available", "boolean",
label = T("School tents received")),
Field("school_assistence_tents_source",
label = T("School tents, source")),
Field("school_assistance_materials_available", "boolean",
label = T("Education materials received")),
Field("school_assistance_materials_source",
label = T("Education materials, source")),
Field("school_assistance_other_available", "boolean",
label = T("Other school assistance received")),
Field("school_assistance_other",
label = T("Other school assistance, details")),
Field("school_assistance_other_source",
label = T("Other school assistance, source")),
s3_comments(),
*s3_meta_fields())
# @ToDo: onvalidation!
# CRUD strings
crud_strings[tablename] = rat_section_crud_strings
add_component(table,
assess_rat=dict(joinby="assessment_id",
multiple=False))
configure(tablename, deletable=False)
# Section 9 - Protection --------------------------------------------------
rat_fuzzy_quantity_opts = {
1: T("None"),
2: T("Few"),
3: T("Some"),
4: T("Many")
}
rat_quantity_opts = {
1: "1-10",
2: "11-50",
3: "51-100",
4: "100+"
}
rat_child_activity_opts = {
1: T("Playing"),
2: T("Domestic chores"),
3: T("School/studying"),
4: T("Doing nothing (no structured activity)"),
5: T("Working or other to provide money/food"),
99: T("Other (specify)")
}
rat_child_activity_post_disaster_opts = rat_child_activity_opts.copy()
rat_child_activity_post_disaster_opts.update({
6: T("Disaster clean-up/repairs")
})
tablename = "assess_section9"
table = define_table(tablename,
assessment_id(),
Field("vulnerable_groups_safe_env", "boolean",
label = T("Safe environment for vulnerable groups"),
comment = rat_tooltip("Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?")),
Field("safety_children_women_affected", "boolean",
label = T("Safety of children and women affected by disaster?"),
comment = rat_tooltip("Has the safety and security of women and children in your community changed since the emergency?")),
Field("sec_incidents", "boolean",
label = T("Known incidents of violence since disaster"),
comment = rat_tooltip("Do you know of any incidents of violence?")),
Field("sec_incidents_gbv", "boolean",
label = T("Known incidents of violence against women/girls"),
comment = rat_tooltip("Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?")),
Field("sec_current_needs",
label = T("Needs to reduce vulnerability to violence"),
comment = rat_tooltip("What should be done to reduce women and children's vulnerability to violence?")),
Field("children_separated", "integer",
label = T("Children separated from their parents/caregivers"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts,
zero=None)),
represent = lambda opt: rat_fuzzy_quantity_opts.get(opt,
UNKNOWN_OPT),
comment = rat_tooltip("Do you know of children separated from their parents or caregivers?")),
Field("children_separated_origin",
label = T("Origin of the separated children"),
comment = rat_tooltip("Where are the separated children originally from?")),
Field("children_missing", "integer",
label = T("Parents/Caregivers missing children"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts,
zero=None)),
represent = lambda opt: rat_fuzzy_quantity_opts.get(opt,
UNKNOWN_OPT),
comment = rat_tooltip("Do you know of parents/caregivers missing children?")),
Field("children_orphaned", "integer",
label = T("Children orphaned by the disaster"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts,
zero=None)),
represent = lambda opt: rat_fuzzy_quantity_opts.get(opt,
UNKNOWN_OPT),
comment = rat_tooltip("Do you know of children that have been orphaned by the disaster?")),
Field("children_unattended", "integer",
label = T("Children living on their own (without adults)"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts,
zero=None)),
represent = lambda opt: rat_fuzzy_quantity_opts.get(opt,
UNKNOWN_OPT),
comment = rat_tooltip("Do you know of children living on their own (without adults)?")),
Field("children_disappeared", "integer",
label = T("Children who have disappeared since the disaster"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts,
zero=None)),
represent = lambda opt: rat_fuzzy_quantity_opts.get(opt,
UNKNOWN_OPT),
comment = rat_tooltip("Do you know of children that have disappeared without explanation in the period since the disaster?")),
Field("children_evacuated", "integer",
label = T("Children that have been sent to safe places"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts,
zero=None)),
represent = lambda opt: rat_fuzzy_quantity_opts.get(opt,
UNKNOWN_OPT),
comment = rat_tooltip("Do you know of children that have been sent to safe places?")),
Field("children_evacuated_to",
label = T("Places the children have been sent to"),
comment = rat_tooltip("Where have the children been sent?")),
Field("children_with_older_caregivers", "integer",
label = T("Older people as primary caregivers of children"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts,
zero=None)),
represent = lambda opt: rat_fuzzy_quantity_opts.get(opt,
UNKNOWN_OPT),
comment = rat_tooltip("Do you know of older people who are primary caregivers of children?")),
Field("children_in_disabled_homes", "boolean",
label = T("Children in homes for disabled children"),
comment = rat_tooltip("Are there children living in homes for disabled children in this area?")),
Field("children_in_orphanages", "boolean",
label = T("Children in orphanages"),
comment = rat_tooltip("Are there children living in orphanages in this area?")),
Field("children_in_boarding_schools", "boolean",
label = T("Children in boarding schools"),
comment = rat_tooltip("Are there children living in boarding schools in this area?")),
Field("children_in_juvenile_detention", "boolean",
label = T("Children in juvenile detention"),
comment = rat_tooltip("Are there children living in juvenile detention in this area?")),
Field("children_in_adult_prisons", "boolean",
label = T("Children in adult prisons"),
comment = rat_tooltip("Are there children living in adult prisons in this area?")),
Field("people_in_adult_prisons", "boolean",
label = T("Adults in prisons"),
comment = rat_tooltip("Are there adults living in prisons in this area?")),
Field("people_in_care_homes", "boolean",
label = T("Older people in care homes"),
comment = rat_tooltip("Are there older people living in care homes in this area?")),
Field("people_in_institutions_est_total", "integer",
label = T("Estimated total number of people in institutions"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_quantity_opts,
zero=None)),
represent = lambda opt: rat_quantity_opts.get(opt,
UNKNOWN_OPT),
comment = rat_tooltip("What is the estimated total number of people in all of these institutions?")),
Field("staff_in_institutions_present", "boolean",
label = T("Staff present and caring for residents"),
comment = rat_tooltip("Are there staff present and caring for the residents in these institutions?")),
Field("adequate_food_water_in_institutions", "boolean",
label = T("Adequate food and water available"),
comment = rat_tooltip("Is adequate food and water available for these institutions?")),
Field("child_activities_u12f_pre_disaster", "list:integer",
label = T("Activities of girls <12yrs before disaster"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_child_activity_opts: \
rat_represent_multiple(set, opt),
comment = rat_tooltip("How did girls <12yrs spend most of their time prior to the disaster?",
multiple=True)),
Field("child_activities_u12f_pre_disaster_other",
label = T("Other activities of girls<12yrs before disaster")),
Field("child_activities_u12m_pre_disaster", "list:integer",
label = T("Activities of boys <12yrs before disaster"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_child_activity_opts: \
rat_represent_multiple(set, opt),
comment = rat_tooltip("How did boys <12yrs spend most of their time prior to the disaster?",
multiple=True)),
Field("child_activities_u12m_pre_disaster_other",
label = T("Other activities of boys <12yrs before disaster")),
Field("child_activities_o12f_pre_disaster", "list:integer",
label = T("Activities of girls 13-17yrs before disaster"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_child_activity_opts: \
rat_represent_multiple(set, opt),
comment = rat_tooltip("How did boys girls 13-17yrs spend most of their time prior to the disaster?",
multiple=True)),
Field("child_activities_o12f_pre_disaster_other",
label = T("Other activities of girls 13-17yrs before disaster")),
Field("child_activities_o12m_pre_disaster", "list:integer",
label = T("Activities of boys 13-17yrs before disaster"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_child_activity_opts: \
rat_represent_multiple(set, opt),
comment = rat_tooltip("How did boys 13-17yrs spend most of their time prior to the disaster?",
multiple=True)),
Field("child_activities_o12m_pre_disaster_other",
label = T("Other activities of boys 13-17yrs before disaster")),
Field("child_activities_u12f_post_disaster", "list:integer",
label = T("Activities of girls <12yrs now"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_child_activity_opts: \
rat_represent_multiple(set, opt),
comment = rat_tooltip("How do girls <12yrs spend most of their time now?",
multiple=True)),
Field("child_activities_u12f_post_disaster_other",
label = T("Other activities of girls<12yrs")),
Field("child_activities_u12m_post_disaster", "list:integer",
label = T("Activities of boys <12yrs now"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_child_activity_opts: \
rat_represent_multiple(set, opt),
comment = rat_tooltip("How do boys <12yrs spend most of their time now?",
multiple=True)),
Field("child_activities_u12m_post_disaster_other",
label = T("Other activities of boys <12yrs")),
Field("child_activities_o12f_post_disaster", "list:integer",
label = T("Activities of girls 13-17yrs now"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_child_activity_opts: \
rat_represent_multiple(set, opt),
comment = rat_tooltip("How do girls 13-17yrs spend most of their time now?",
multiple=True)),
Field("child_activities_o12f_post_disaster_other",
label = T("Other activities of girls 13-17yrs")),
Field("child_activities_o12m_post_disaster", "list:integer",
label = T("Activities of boys 13-17yrs now"),
requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts,
zero=None,
multiple=True)),
represent = lambda opt, set=rat_child_activity_opts: \
rat_represent_multiple(set, opt),
comment = rat_tooltip("How do boys 13-17yrs spend most of their time now?",
multiple=True)),
Field("child_activities_o12m_post_disaster_other",
label = T("Other activities of boys 13-17yrs")),
Field("coping_activities_elderly", "boolean",
label = T("Older people participating in coping activities"),
comment = rat_tooltip("Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")),
Field("coping_activities_women", "boolean",
label = T("Women participating in coping activities"),
comment = rat_tooltip("Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")),
Field("coping_activities_disabled", "boolean",
label = T("Disabled participating in coping activities"),
comment = rat_tooltip("Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")),
Field("coping_activities_minorities", "boolean",
label = T("Minorities participating in coping activities"),
comment = rat_tooltip("Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")),
Field("coping_activities_adolescent", "boolean",
label = T("Adolescent participating in coping activities"),
comment = rat_tooltip("Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")),
Field("current_general_needs", "text",
label = T("Current greatest needs of vulnerable groups"),
comment = rat_tooltip("In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?")),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = rat_section_crud_strings
add_component(table,
assess_rat=dict(joinby="assessment_id",
multiple=False))
configure(tablename, deletable=False)
# -----------------------------------------------------------------------------
def assess_rat_summary(r, **attr):
""" Aggregate reports """
if r.name == "rat":
if r.representation == "html":
return dict()
elif r.representation == "xls":
return None
else:
# Other formats?
raise HTTP(501, body=BADFORMAT)
else:
raise HTTP(501, body=BADMETHOD)
s3db.set_method("assess", "rat",
method="summary",
action=assess_rat_summary)
# Pass variables back to global scope (response.s3.*)
# =========================================================================
# UN Common Operational Datasets
# =========================================================================
# Population Statistics
tablename = "assess_population"
table = define_table(tablename,
location_id(widget = S3LocationAutocompleteWidget(),
requires = IS_LOCATION()),
Field("population", "integer"),
Field("households", "integer"),
Field("median_age", "double"),
Field("average_family_size", "double"),
Field("effective_date", "datetime"),
s3_comments(),
*(s3_timestamp() + s3_uid() + s3_deletion_status()))
# CRUD strings
crud_strings[tablename] = Storage(
title_create = T("Add Population Statistic"),
title_display = T("Population Statistic Details"),
title_list = T("Population Statistics"),
title_update = T("Edit Population Statistic"),
title_search = T("Search Population Statistics"),
subtitle_create = T("Add New Population Statistic"),
label_list_button = T("List Population Statistics"),
label_create_button = T("Add Population Statistic"),
label_delete_button = T("Delete Population Statistic"),
msg_record_created = T("Population Statistic added"),
msg_record_modified = T("Population Statistic updated"),
msg_record_deleted = T("Population Statistic deleted"),
msg_list_empty = T("No Population Statistics currently registered"),
name_nice = T("Population Statistic"),
name_nice_plural = T("Population Statistics"))
# Impact as component of incident reports
#add_component("impact_impact", irs_ireport="ireport_id")
# =========================================================================
def impact_tables():
""" Load the Impact tables as-needed """
sector_id = s3db.org_sector_id
ireport_id = s3db.irs_ireport_id
# Load the models we depend on
if settings.has_module("assess"):
assess_tables()
assess_id = s3.assess_id
module = "impact"
# -------------------------------------------------------------------------
# Impact Type
resourcename = "type"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("name", length=128, notnull=True, unique=True),
sector_id(),
*s3_meta_fields())
# CRUD strings
ADD_IMPACT_TYPE = T("Add Impact Type")
s3.crud_strings[tablename] = Storage(
title_create = ADD_IMPACT_TYPE,
title_display = T("Impact Type Details"),
title_list = T("Impact Types"),
title_update = T("Edit Impact Type"),
title_search = T("Search Impact Type"),
subtitle_create = T("Add New Impact Type"),
label_list_button = T("List Impact Types"),
label_create_button = ADD_IMPACT_TYPE,
label_delete_button = T("Delete Impact Type"),
msg_record_created = T("Impact Type added"),
msg_record_modified = T("Impact Type updated"),
msg_record_deleted = T("Impact Type deleted"),
msg_list_empty = T("No Impact Types currently registered"),
name_nice = T("Impact"),
name_nice_plural = T("Impacts"))
def impact_type_comment():
if auth.has_membership(auth.id_group("'Administrator'")):
return S3AddResourceLink(c="assess",
f="type",
vars=dict(child="impact_type_id"))
else:
return None
represent = S3Represent(tablename)
impact_type_id = S3ReusableField("impact_type_id", table,
sortby="name",
requires = IS_NULL_OR(
IS_ONE_OF(db,
"impact_type.id",
represent,
sort=True)),
represent = represent,
label = T("Impact Type"),
comment = impact_type_comment(),
ondelete = "RESTRICT")
# =====================================================================
# Impact
# Load model
ireport_id = s3db.irs_ireport_id
tablename = "assess_impact"
table = define_table(tablename,
ireport_id(readable=False, writable=False),
assess_id(readable=False, writable=False),
impact_type_id(),
Field("value", "double"),
Field("severity", "integer",
default = 0),
s3_comments(),
*s3_meta_fields())
table.severity.requires = IS_EMPTY_OR(IS_IN_SET(assess_severity_opts))
table.severity.widget=SQLFORM.widgets.radio.widget
table.severity.represent = s3_assess_severity_represent
# CRUD strings
ADD_IMPACT = T("Add Impact")
crud_strings[tablename] = Storage(
title_create = ADD_IMPACT,
title_display = T("Impact Details"),
title_list = T("Impacts"),
title_update = T("Edit Impact"),
title_search = T("Search Impacts"),
subtitle_create = T("Add New Impact"),
label_list_button = T("List Impacts"),
label_create_button = ADD_IMPACT,
label_delete_button = T("Delete Impact"),
msg_record_created = T("Impact added"),
msg_record_modified = T("Impact updated"),
msg_record_deleted = T("Impact deleted"),
msg_list_empty = T("No Impacts currently registered"))
# =============================================================================
def index():
""" Module's Home Page """
module_name = settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# -----------------------------------------------------------------------------
def create():
""" Redirect to assess/create """
redirect(URL(f="assess", args="create"))
# =============================================================================
# UN Common Operational Datasets
# =============================================================================
def population():
""" RESTful controller """
output = s3_rest_controller()
return output
# =============================================================================
# Rapid Assessments
# =============================================================================
def rat():
""" Rapid Assessments, RESTful controller """
# Load Models
assess_tables()
tablename = "%s_%s" % (module, resourcename)
table = db[tablename]
# Villages only
#table.location_id.requires = IS_NULL_OR(IS_ONE_OF(db(db.gis_location.level == "L5"),
# "gis_location.id",
# repr_select, sort=True))
# Subheadings in forms:
configure("assess_section2",
subheadings = {
T("Population and number of households"): "population_total",
T("Fatalities"): "dead_women",
T("Casualties"): "injured_women",
T("Missing Persons"): "missing_women",
T("General information on demographics"): "household_head_elderly",
T("Comments"): "comments"})
configure("assess_section3",
subheadings = {
T("Access to Shelter"): "houses_total",
T("Water storage containers in households"): "water_containers_available",
T("Other non-food items"): "cooking_equipment_available",
T("Shelter/NFI Assistance"): "nfi_assistance_available",
T("Comments"): "comments"})
configure("assess_section4",
subheadings = {
T("Water supply"): "water_source_pre_disaster_type",
T("Water collection"): "water_coll_time",
T("Places for defecation"): "defec_place_type",
T("Environment"): "close_industry",
T("Latrines"): "latrines_number",
T("Comments"): "comments"})
configure("assess_section5",
subheadings = {
T("Health services status"): "health_services_pre_disaster",
T("Current health problems"): "health_problems_adults",
T("Nutrition problems"): "malnutrition_present_pre_disaster",
T("Comments"): "comments"})
configure("assess_section6",
subheadings = {
T("Existing food stocks"): "food_stocks_main_dishes",
T("food_sources") : "Food sources",
T("Food assistance"): "food_assistance_available",
T("Comments"): "comments"})
configure("assess_section7",
subheadings = {
"%s / %s" % (T("Sources of income"),
T("Major expenses")): "income_sources_pre_disaster",
T("Business Damaged"): "Access to cash",
T("Current community priorities"): "rank_reconstruction_assistance",
T("Comments"): "comments"})
configure("assess_section8",
subheadings = {
T("Access to education services"): "schools_total",
T("Alternative places for studying"): "alternative_study_places_available",
T("School activities"): "schools_open_pre_disaster",
T("School attendance"): "children_0612_female",
T("School assistance"): "school_assistance_available",
T("Comments"): "comments"})
configure("assess_section9",
subheadings = {
T("Physical Safety"): "vulnerable_groups_safe_env",
T("Separated children, caregiving arrangements"): "children_separated",
T("Persons in institutions"): "children_in_disabled_homes",
T("Activities of children"): "child_activities_u12f_pre_disaster",
T("Coping Activities"): "coping_activities_elderly",
T("Current general needs"): "current_general_needs",
T("Comments"): "comments"})
# @ToDo Generalize this and make it available as a function that other
# component prep methods can call to set the default for a join field.
def prep(r):
if r.interactive:
# Pre-populate staff ID
staff_id = auth.s3_logged_in_human_resource()
if staff_id:
r.table.staff_id.default = staff_id
if r.method == "create":
# If this assessment is being created as a component of a shelter,
# it will have the shelter id in its vars.
shelter_id = r.get_vars.get("rat.shelter_id", None)
if shelter_id:
try:
shelter_id = int(shelter_id)
except ValueError:
pass
else:
r.table.shelter_id.default = shelter_id
return True
response.s3.prep = prep
# Post-processor
def postp(r, output):
s3_action_buttons(r, deletable=False)
# Redirect to update view to open tabs
if r.representation == "html" and r.method == "create":
r.next = r.url(method="",
id=s3mgr.get_session("assess", "rat"))
return output
response.s3.postp = postp
# Over-ride the listadd since we're not a component here
configure(tablename, create_next="", listadd=True)
tabs = [(T("Identification"), None),
(T("Demographic"), "section2"),
(T("Shelter & Essential NFIs"), "section3"),
(T("WatSan"), "section4"),
(T("Health"), "section5"),
(T("Nutrition"), "section6"),
(T("Livelihood"), "section7"),
(T("Education"), "section8"),
(T("Protection"), "section9") ]
rheader = lambda r: rat_rheader(r,
tabs)
output = s3_rest_controller(rheader=rheader,
s3ocr_config={"tabs": tabs})
response.s3.stylesheets.append( "S3/rat.css" )
return output
# -----------------------------------------------------------------------------
def rat_rheader(r, tabs=[]):
""" Resource Headers """
if r.representation == "html":
if r.name == "rat":
report = r.record
if report:
htable = db.hrm_human_resource
rheader_tabs = s3_rheader_tabs(r, tabs, paging=True)
location = report.location_id
if location:
location = r.table.location_id.represent(location)
staff = report.staff_id
if staff:
organisation_represent = htable.organisation_id.represent
query = (htable.id == staff)
organisation_id = db(query).select(htable.organisation_id,
limitby=(0, 1)).first().organisation_id
organisation = organisation_represent(organisation_id)
else:
organisation = None
staff = report.staff2_id
if staff:
query = (htable.id == staff)
organisation2_id = db(query).select(htable.organisation_id,
limitby=(0, 1)).first().organisation_id
if organisation2_id == organisation_id:
organisation2 = None
else:
organisation2 = organisation_represent(organisation_id)
else:
organisation2 = None
if organisation2:
orgs = "%s, %s" % (organisation, organisation2)
else:
orgs = organisation
rheader = DIV(TABLE(
TR(
TH("%s: " % T("Location")), location,
TH("%s: " % T("Date")), report.date
),
TR(
TH("%s: " % T("Organizations")), orgs,
)
),
rheader_tabs)
return rheader
return None
# =============================================================================
# Flexible Impact Assessments
# =============================================================================
def assess_rheader(r, tabs=[]):
""" Resource Headers for Flexible Impact Assessments """
if r.representation == "html":
rheader_tabs = s3_rheader_tabs(r, tabs)
assess = r.record
if assess:
table = db.assess_assess
rheader = DIV(TABLE(TR(
TH("%s: " % T("Date & Time")),
table.datetime.represent(assess.datetime),
TH("%s: " % T("Location")),
table.location_id.represent(assess.location_id),
TH("%s: " % T("Assessor")),
table.assessor_person_id.represent(assess.assessor_person_id),
),
),
rheader_tabs
)
return rheader
return None
# -----------------------------------------------------------------------------
def assess():
""" RESTful CRUD controller """
# Load Models
assess_tables()
impact_tables()
tablename = "%s_%s" % (module, resourcename)
table = db[tablename]
# Pre-processor
def prep(r):
if session.s3.mobile and r.method == "create" and r.interactive:
# redirect to mobile-specific form:
redirect(URL(f="assess_short_mobile"))
return True
response.s3.prep = prep
#table.incident_id.comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Incident"),
# T("Optional link to an Incident which this Assessment was triggered by.")))
tabs = [
(T("Edit Details"), None),
(T("Baselines"), "baseline"),
(T("Impacts"), "impact"),
(T("Summary"), "summary"),
#(T("Requested"), "ritem"),
]
rheader = lambda r: assess_rheader(r, tabs)
return s3_rest_controller(rheader=rheader)
# -----------------------------------------------------------------------------
def impact_type():
""" RESTful CRUD controller """
# Load Models
impact_tables()
module = "impact"
resourcename = "type"
return s3_rest_controller(module, resourcename)
# -----------------------------------------------------------------------------
def baseline_type():
""" RESTful CRUD controller """
# Load Models
assess_tables()
return s3_rest_controller()
# -----------------------------------------------------------------------------
def baseline():
""" RESTful CRUD controller """
# Load Models
assess_tables()
return s3_rest_controller()
# -----------------------------------------------------------------------------
def summary():
""" RESTful CRUD controller """
# Load Models
assess_tables()
return s3_rest_controller()
# =============================================================================
def basic_assess():
""" Custom page to hide the complexity of the Assessments/Impacts/Summary model: PC Browser version """
if not auth.is_logged_in():
session.error = T("Need to be logged-in to be able to submit assessments")
redirect(URL(c="default", f="user", args=["login"]))
# Load Models
assess_tables()
impact_tables()
# See if we've been created from an Incident
ireport_id = request.vars.get("ireport_id")
if ireport_id:
# Location is the same as the calling Incident
table = db.irs_ireport
row = db(table.id == ireport_id).select(table.location_id,
limitby=(0, 1)).first()
if row:
irs_location_id = row.location_id
location = table.location_id.represent(irs_location_id)
else:
irs_location_id = None
location = None
custom_assess_fields = (
("impact", 1),
("impact", 2),
("impact", 3),
("impact", 4),
("impact", 5),
("impact", 6),
("impact", 7),
("assess", "comments"),
)
form, form_accepted, assess_id = custom_assess(custom_assess_fields,
location_id=irs_location_id)
else:
location = None
custom_assess_fields = (
("assess", "location_id", "selector"),
("impact", 1),
("impact", 2),
("impact", 3),
("impact", 4),
("impact", 5),
("impact", 6),
("impact", 7),
("assess", "comments"),
)
form, form_accepted, assess_id = custom_assess(custom_assess_fields)
if form_accepted:
session.confirmation = T("Basic Assessment Reported")
redirect(URL(f="assess", args=[assess_id, "impact"]))
return dict(title = T("Basic Assessment"),
location = location,
form = form)
# -----------------------------------------------------------------------------
def mobile_basic_assess():
""" Custom page to hide the complexity of the Assessments/Impacts/Summary model: Mobile device version """
if not auth.is_logged_in():
redirect(URL(c="default", f="index"))
# Load Models
assess_tables()
impact_tables()
custom_assess_fields = (
("assess", "location_id", "auto"),
("impact", 1),
("impact", 2),
("impact", 3),
("impact", 4),
("impact", 5),
("impact", 6),
("impact", 7),
("assess", "comments"),
)
form, form_accepted, assess_id = custom_assess(custom_assess_fields)
if form_accepted:
form = FORM(H1(settings.get_system_name_short()),
H2(T("Short Assessment")),
P(T("Assessment Reported")),
A(T("Report Another Assessment..."),
_href = URL(r=request)
),
_class = "mobile",
)
return dict(form = form)
# -----------------------------------------------------------------------------
def color_code_severity_widget(widget, name):
""" Utility function to colour-code Severity options """
for option, color in zip(widget, ["green", "yellow", "orange", "red"]):
option[0].__setitem__("_style", "background-color:%s;" % color)
option[0][0].__setitem__("_name", name)
return widget
# -----------------------------------------------------------------------------
def custom_assess(custom_assess_fields, location_id=None):
"""
Build a custom page to hide the complexity of the
Assessments/Impacts/Summary model
@ToDo: Improved validation
- the existing .double JS isn't 100% reliable & this currently crashes
the back-end upon submission if bad data slips through
"""
# Load Models
assess_tables()
impact_tables()
form_rows = []
comment = ""
for field in custom_assess_fields:
name = "custom_%s_%s" % (field[0], field[1])
if field[0] == "assess":
if field[1] == "comments":
label = "%s:" % db.assess_assess[ field[1] ].label
#widget = db.assess_assess[ field[1] ].widget
widget = TEXTAREA(_name = name,
_class = "double",
_type = "text")
elif field[1] == "location_id":
if field[2] == "auto":
# HTML5 Geolocate
label = "%s:" % T("Location")
#widget = db.assess_assess[ field[1] ].widget
widget = DIV(INPUT(_name = name,
_type = "text"),
INPUT(_name = "gis_location_lat",
_id = "gis_location_lat",
_type = "text"),
INPUT(_name = "gis_location_lon",
_id = "gis_location_lon",
_type = "text"))
else:
# Location Selector
label = "%s:" % T("Location")
#widget = SELECT(_id = name,
# _class = "reference gis_location",
# _name = "location_id")
#response.s3.gis.location_id = "custom_assess_location_id"
widget = db.assess_assess.location_id.widget(field=db.assess_assess.location_id,
value="")
elif field[0] == "baseline":
label = S3Represent(lookup="assess_baseline_type")(field[1])
label = "%s:" % T(label)
widget = INPUT(_name = name,
_class = "double",
_type = "text")
elif field[0] == "impact":
label = S3Represent(lookup="assess_impact_type")(field[1])
label = "%s:" % T(label)
value_widget = INPUT(_name = name,
_class = "double",
_type = "text")
severity_widget = db.assess_summary.value.widget(db.impact_impact.severity,
0,
_name = "%s_severity" % name
)
severity_widget = color_code_severity_widget(severity_widget,
"%s_severity" % name)
widget = DIV(value_widget,
DIV("%s:" % T("Severity")),
severity_widget,
XML(" "))
elif field[0] == "summary":
label = "%s:" % T(org_subsector_represent(field[1]))
widget = db.assess_summary.value.widget(db.assess_summary.value,
0, _name = name)
widget = color_code_severity_widget(widget)
# Add the field components to the form_rows
if field[0] == "title":
form_rows.append(TR(H3( field[1] )))
else:
form_rows = form_rows + list(s3_formstyle("%s__row" % name,
label,
widget,
comment))
form = FORM(TABLE(*form_rows),
INPUT(_value = T("Save"), _type = "submit"))
assess_id = None
form_accepted = form.accepts(request.vars, session)
if form_accepted:
record_dict = {"organisation_id" : session.s3.organisation_id}
for field in custom_assess_fields:
if field[0] != "assess" or field[1] == "location":
continue
name = "custom__assess_%s" % field[1]
if name in request.vars:
record_dict[field[1]] = request.vars[name]
# Add Location (must happen first)
if "custom_assess_location_id" in request.vars:
# Auto
location_dict = {}
if "gis_location_lat" in request.vars:
location_dict["lat"] = request.vars["gis_location_lat"]
if "gis_location_lon" in request.vars:
location_dict["lon"] = request.vars["gis_location_lon"]
location_dict["name"] = request.vars["custom_assess_location_id"]
record_dict["location_id"] = s3db.gis_location.insert(**location_dict)
if "location_id" in request.vars:
# Location Selector
record_dict["location_id"] = request.vars["location_id"]
if location_id:
# Location_id was passed to function
record_dict["location_id"] = location_id
# Add Assessment
assess_id = db.assess_assess.insert(**record_dict)
fk_dict = dict(baseline = "baseline_type_id",
impact = "impact_type_id",
summary = "subsector_id"
)
component_dict = dict(baseline = "assess_baseline",
impact = "impact_impact",
summary = "assess_summary"
)
# Add Assessment Components
sector_summary = {}
for field in custom_assess_fields:
if field[0] == "assess":
continue
record_dict = {}
name = "custom_%s_%s" % (field[0], field[1])
if name in request.vars:
record_dict["assess_id"] = assess_id
record_dict[fk_dict[ field[0] ] ] = field[1]
record_dict["value"] = request.vars[name]
if field[0] == "impact":
severity = int(request.vars[name + "_severity"])
record_dict["severity"] = severity
if not record_dict["value"] and not record_dict["severity"]:
# Do not record impact if there is no data for it.
# Should we still average severity though? Now not doing this
continue
# Record the Severity per sector
table = db.impact_type
row = db(table.id == field[1]).select(table.sector_id,
limitby=(0, 1)
).first()
sector_id = row.sector_id
if sector_id in sector_summary.keys():
sector_summary[sector_id].append(severity)
elif sector_id:
sector_summary[sector_id] = [severity]
db[component_dict[ field[0] ] ].insert(**record_dict)
# Add Cluster summaries
# @ToDo: make sure that this doesn't happen if there are sectors in the assess
for sector_id in sector_summary.keys():
severity_values = sector_summary[sector_id]
db.assess_summary.insert(assess_id = assess_id,
sector_id = sector_id,
# Average severity
value = sum(severity_values) / len(severity_values)
)
# Send Out Notification SMS
#message = "Sahana: " + T("New Assessment reported from") + " %s by %s %s" % ( location_dict["name"],
# session.auth.user.first_name,
# session.auth.user.last_name
# )
# Hard coded notification message for Demo
#msg.send_by_pe_id(3,
# message=message,
# pr_message_method = 2)
return form, form_accepted, assess_id
# =============================================================================
def type():
""" RESTful CRUD controller """
return s3_rest_controller("impact", "type")
# =============================================================================
def impact():
""" RESTful CRUD controller """
return s3_rest_controller("impact", "impact")
# END =========================================================================
|
{
"content_hash": "f74193ca442c8c7d67c34c6a2bc669b6",
"timestamp": "",
"source": "github",
"line_count": 2561,
"max_line_length": 246,
"avg_line_length": 52.06286606794221,
"alnum_prop": 0.4424861062152655,
"repo_name": "sammyshj/gci",
"id": "c91b18c3f6b69b149a432134756b971c620a02ce",
"size": "133358",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "controllers/assess2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1375094"
},
{
"name": "JavaScript",
"bytes": "16625771"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "25684403"
},
{
"name": "Racket",
"bytes": "166"
},
{
"name": "Shell",
"bytes": "727"
},
{
"name": "XSLT",
"bytes": "2003150"
}
],
"symlink_target": ""
}
|
"""
tipfy.auth.facebook
~~~~~~~~~~~~~~~~~~~
Implementation of Facebook authentication scheme.
Ported from `tornado.auth`_.
:copyright: 2009 Facebook.
:copyright: 2011 tipfy.org.
:license: Apache License Version 2.0, see LICENSE.txt for more details.
"""
from __future__ import absolute_import
import functools
import hashlib
import logging
import time
import urlparse
import urllib
from google.appengine.api import urlfetch
from tipfy import REQUIRED_VALUE
from tipfy.utils import json_decode, json_encode
#: Default configuration values for this module. Keys are:
#:
#: - ``api_key``: Key provided when you register an application with
#: Facebook.
#: - ``app_secret``: Secret provided when you register an application
#: with Facebook.
default_config = {
'api_key': REQUIRED_VALUE,
'app_secret': REQUIRED_VALUE,
}
class FacebookMixin(object):
"""A :class:`tipfy.RequestHandler` mixin that implements Facebook Connect
authentication.
To authenticate with Facebook, register your application with
Facebook at http://www.facebook.com/developers/apps.php. Then
copy your API Key and Application Secret to config.py::
config['tipfy.auth.twitter'] = {
'api_key': 'XXXXXXXXXXXXXXX',
'app_secret': 'XXXXXXXXXXXXXXX',
}
When your application is set up, you can use the FacebookMixin like this
to authenticate the user with Facebook::
from tipfy import RequestHandler
from tipfy.auth.facebook import FacebookMixin
class FacebookHandler(RequestHandler, FacebookMixin):
def get(self):
if self.request.args.get('session', None):
return self.get_authenticated_user(self._on_auth)
return self.authenticate_redirect()
def _on_auth(self, user):
if not user:
self.abort(403)
# Set the user in the session.
The user object returned by get_authenticated_user() includes the
attributes 'facebook_uid' and 'name' in addition to session attributes
like 'session_key'. You should save the session key with the user; it is
required to make requests on behalf of the user later with
facebook_request().
"""
@property
def _facebook_api_key(self):
return self.app.config[__name__]['api_key']
@property
def _facebook_secret(self):
return self.app.config[__name__]['app_secret']
def authenticate_redirect(self, callback_uri=None, cancel_uri=None,
extended_permissions=None):
"""Authenticates/installs this app for the current user."""
callback_uri = callback_uri or self.request.path
args = {
'api_key': self._facebook_api_key,
'v': '1.0',
'fbconnect': 'true',
'display': 'page',
'next': urlparse.urljoin(self.request.url, callback_uri),
'return_session': 'true',
}
if cancel_uri:
args['cancel_url'] = urlparse.urljoin(self.request.url, cancel_uri)
if extended_permissions:
if isinstance(extended_permissions, basestring):
extended_permissions = [extended_permissions]
args['req_perms'] = ','.join(extended_permissions)
return self.redirect('http://www.facebook.com/login.php?' +
urllib.urlencode(args))
def authorize_redirect(self, extended_permissions, callback_uri=None,
cancel_uri=None):
"""Redirects to an authorization request for the given FB resource.
The available resource names are listed at
http://wiki.developers.facebook.com/index.php/Extended_permission.
The most common resource types include:
publish_stream
read_stream
email
sms
extended_permissions can be a single permission name or a list of
names. To get the session secret and session key, call
get_authenticated_user() just as you would with
authenticate_redirect().
"""
return self.authenticate_redirect(callback_uri, cancel_uri,
extended_permissions)
def get_authenticated_user(self, callback):
"""Fetches the authenticated Facebook user.
The authenticated user includes the special Facebook attributes
'session_key' and 'facebook_uid' in addition to the standard
user attributes like 'name'.
"""
session = json_decode(self.request.args.get('session'))
return self.facebook_request(
method='facebook.users.getInfo',
callback=functools.partial(
self._on_get_user_info, callback, session),
session_key=session['session_key'],
uids=session['uid'],
fields='uid,first_name,last_name,name,locale,pic_square,' \
'profile_url,username')
def facebook_request(self, method, callback=None, **kwargs):
"""Makes a Facebook API REST request.
We automatically include the Facebook API key and signature, but
it is the callers responsibility to include 'session_key' and any
other required arguments to the method.
The available Facebook methods are documented here:
http://wiki.developers.facebook.com/index.php/API
Here is an example for the stream.get() method::
from tipfy import RequestHandler
from tipfy.auth.facebook import FacebookMixin
from tipfyext.jinja2 import Jinja2Mixin
class MainHandler(RequestHandler, Jinja2Mixin, FacebookMixin):
def get(self):
self.facebook_request(
method='stream.get',
callback=self._on_stream,
session_key=self.current_user['session_key'])
def _on_stream(self, stream):
if stream is None:
# Not authorized to read the stream yet?
return self.redirect(self.authorize_redirect('read_stream'))
return self.render_response('stream.html', stream=stream)
"""
if not method.startswith('facebook.'):
method = 'facebook.' + method
kwargs.update({
'api_key': self._facebook_api_key,
'v': '1.0',
'method': method,
'call_id': str(long(time.time() * 1e6)),
'format': 'json',
})
kwargs['sig'] = self._signature(kwargs)
url = 'http://api.facebook.com/restserver.php?' + \
urllib.urlencode(kwargs)
try:
response = urlfetch.fetch(url, deadline=10)
except urlfetch.DownloadError, e:
logging.exception(e)
response = None
if not callback:
# Don't preprocess the response, just return a bare one.
return response
return self._parse_response(callback, response)
def _on_get_user_info(self, callback, session, users):
if users is None:
return callback(None)
user = users[0]
return callback({
'name': user['name'],
'first_name': user['first_name'],
'last_name': user['last_name'],
'uid': user['uid'],
'locale': user['locale'],
'pic_square': user['pic_square'],
'profile_url': user['profile_url'],
'username': user.get('username'),
'session_key': session['session_key'],
'session_expires': session.get('expires'),
})
def _parse_response(self, callback, response):
if not response:
logging.warning('Missing Facebook response.')
return callback(None)
elif response.status_code < 200 or response.status_code >= 300:
logging.warning('HTTP error from Facebook (%d): %s',
response.status_code, response.content)
return callback(None)
try:
json = json_decode(response.content)
except:
logging.warning('Invalid JSON from Facebook: %r', response.content)
return callback(None)
if isinstance(json, dict) and json.get('error_code'):
logging.warning('Facebook error: %d: %r', json['error_code'],
json.get('error_msg'))
return callback(None)
return callback(json)
def _signature(self, kwargs):
parts = ['%s=%s' % (n, kwargs[n]) for n in sorted(kwargs.keys())]
body = ''.join(parts) + self._facebook_secret
if isinstance(body, unicode):
body = body.encode('utf-8')
return hashlib.md5(body).hexdigest()
|
{
"content_hash": "c2e18cab65eb8d45a231d1b0c2f45eed",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 83,
"avg_line_length": 36.04016064257028,
"alnum_prop": 0.5853577000222866,
"repo_name": "moraes/tipfy",
"id": "5363cf9bff5736f166fbf33bb640790859e42553",
"size": "8998",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tipfy/auth/facebook.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "2954"
},
{
"name": "Python",
"bytes": "818426"
},
{
"name": "Shell",
"bytes": "4509"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from celery import Celery
import os
os.environ.setdefault('CELERY_CONFIG_MODULE', 'thehonestgenepipeline.celeryconfig')
celery = Celery('thehonestgenepipeline',
include=['thehonestgenepipeline.imputation','thehonestgenepipeline.ancestry','thehonestgenepipeline.riskprediction'])
celery.config_from_envvar('CELERY_CONFIG_MODULE')
if __name__ == '__main__':
celery.start()
|
{
"content_hash": "8b09e862774f4e0c3e1d9987bc08f03f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 121,
"avg_line_length": 26.5625,
"alnum_prop": 0.7670588235294118,
"repo_name": "TheHonestGene/thehonestgene-pipeline",
"id": "a04fc2cf7baab39344a479e4e06c2b2ac1720862",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thehonestgenepipeline/celery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14157"
}
],
"symlink_target": ""
}
|
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra.protocol import ProtocolHandler, ResultMessage, UUIDType, read_int, EventMessage
from cassandra.query import tuple_factory
from cassandra.cluster import Cluster
from tests.integration import use_singledc, PROTOCOL_VERSION, drop_keyspace_shutdown_cluster
from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES
from tests.integration.standard.utils import create_table_with_all_types, get_all_primitive_params
from six import binary_type
import uuid
def setup_module():
use_singledc()
update_datatypes()
class CustomProtocolHandlerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cls.session = cls.cluster.connect()
cls.session.execute("CREATE KEYSPACE custserdes WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}")
cls.session.set_keyspace("custserdes")
@classmethod
def tearDownClass(cls):
drop_keyspace_shutdown_cluster("custserdes", cls.session, cls.cluster)
def test_custom_raw_uuid_row_results(self):
"""
Test to validate that custom protocol handlers work with raw row results
Connect and validate that the normal protocol handler is used.
Re-Connect and validate that the custom protocol handler is used.
Re-Connect and validate that the normal protocol handler is used.
@since 2.7
@jira_ticket PYTHON-313
@expected_result custom protocol handler is invoked appropriately.
@test_category data_types:serialization
"""
# Ensure that we get normal uuid back first
session = Cluster(protocol_version=PROTOCOL_VERSION).connect(keyspace="custserdes")
session.row_factory = tuple_factory
result = session.execute("SELECT schema_version FROM system.local")
uuid_type = result[0][0]
self.assertEqual(type(uuid_type), uuid.UUID)
# use our custom protocol handlder
session.client_protocol_handler = CustomTestRawRowType
session.row_factory = tuple_factory
result_set = session.execute("SELECT schema_version FROM system.local")
raw_value = result_set[0][0]
self.assertTrue(isinstance(raw_value, binary_type))
self.assertEqual(len(raw_value), 16)
# Ensure that we get normal uuid back when we re-connect
session.client_protocol_handler = ProtocolHandler
result_set = session.execute("SELECT schema_version FROM system.local")
uuid_type = result_set[0][0]
self.assertEqual(type(uuid_type), uuid.UUID)
session.shutdown()
def test_custom_raw_row_results_all_types(self):
"""
Test to validate that custom protocol handlers work with varying types of
results
Connect, create a table with all sorts of data. Query the data, make the sure the custom results handler is
used correctly.
@since 2.7
@jira_ticket PYTHON-313
@expected_result custom protocol handler is invoked with various result types
@test_category data_types:serialization
"""
# Connect using a custom protocol handler that tracks the various types the result message is used with.
session = Cluster(protocol_version=PROTOCOL_VERSION).connect(keyspace="custserdes")
session.client_protocol_handler = CustomProtocolHandlerResultMessageTracked
session.row_factory = tuple_factory
colnames = create_table_with_all_types("alltypes", session, 1)
columns_string = ", ".join(colnames)
# verify data
params = get_all_primitive_params(0)
results = session.execute("SELECT {0} FROM alltypes WHERE primkey=0".format(columns_string))[0]
for expected, actual in zip(params, results):
self.assertEqual(actual, expected)
# Ensure we have covered the various primitive types
self.assertEqual(len(CustomResultMessageTracked.checked_rev_row_set), len(PRIMITIVE_DATATYPES)-1)
session.shutdown()
class CustomResultMessageRaw(ResultMessage):
"""
This is a custom Result Message that is used to return raw results, rather then
results which contain objects.
"""
my_type_codes = ResultMessage.type_codes.copy()
my_type_codes[0xc] = UUIDType
type_codes = my_type_codes
@classmethod
def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata):
paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map)
rowcount = read_int(f)
rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)]
coltypes = [c[3] for c in column_metadata]
return (paging_state, (coltypes, rows))
class CustomTestRawRowType(ProtocolHandler):
"""
This is the a custom protocol handler that will substitute the the
customResultMesageRowRaw Result message for our own implementation
"""
my_opcodes = ProtocolHandler.message_types_by_opcode.copy()
my_opcodes[CustomResultMessageRaw.opcode] = CustomResultMessageRaw
message_types_by_opcode = my_opcodes
class CustomResultMessageTracked(ResultMessage):
"""
This is a custom Result Message that is use to track what primitive types
have been processed when it receives results
"""
my_type_codes = ResultMessage.type_codes.copy()
my_type_codes[0xc] = UUIDType
type_codes = my_type_codes
checked_rev_row_set = set()
@classmethod
def recv_results_rows(cls, f, protocol_version, user_type_map, result_metadata):
paging_state, column_metadata = cls.recv_results_metadata(f, user_type_map)
rowcount = read_int(f)
rows = [cls.recv_row(f, len(column_metadata)) for _ in range(rowcount)]
colnames = [c[2] for c in column_metadata]
coltypes = [c[3] for c in column_metadata]
cls.checked_rev_row_set.update(coltypes)
parsed_rows = [
tuple(ctype.from_binary(val, protocol_version)
for ctype, val in zip(coltypes, row))
for row in rows]
return (paging_state, (colnames, parsed_rows))
class CustomProtocolHandlerResultMessageTracked(ProtocolHandler):
"""
This is the a custom protocol handler that will substitute the the
CustomTestRawRowTypeTracked Result message for our own implementation
"""
my_opcodes = ProtocolHandler.message_types_by_opcode.copy()
my_opcodes[CustomResultMessageTracked.opcode] = CustomResultMessageTracked
message_types_by_opcode = my_opcodes
|
{
"content_hash": "a7d768bbd71e0f5f050a86f0b07fc8e0",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 133,
"avg_line_length": 40.11377245508982,
"alnum_prop": 0.6948798328108673,
"repo_name": "thobbs/python-driver",
"id": "c6818f7f4bbfa41cb3054cd8dd470bc13f9eb370",
"size": "7279",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/integration/standard/test_custom_protocol_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28924"
},
{
"name": "PowerShell",
"bytes": "4614"
},
{
"name": "Python",
"bytes": "2078148"
}
],
"symlink_target": ""
}
|
from fabric.api import local
def css():
local('cp -r vendor/markdown-core/dist/css dist/')
local('cp -r vendor/markdown-core/dist/fonts dist/')
local('curl https://cdn.jsdelivr.net/jquery.ui/1.11.4/jquery-ui.min.css > dist/css/markdown-plus.css')
local('curl https://cdn.jsdelivr.net/jquery.layout/1.4.3/layout-default.css >> dist/css/markdown-plus.css')
local('curl https://cdn.jsdelivr.net/remodal/1.0.5/remodal.css >> dist/css/markdown-plus.css')
local('curl https://cdn.jsdelivr.net/remodal/1.0.5/remodal-default-theme.css >> dist/css/markdown-plus.css')
local('cat dist/css/markdown-core.css >> dist/css/markdown-plus.css')
local('rm dist/css/markdown-core.css')
local('cat markdown-plus.css >> dist/css/markdown-plus.css')
def js():
local('curl https://cdn.jsdelivr.net/underscorejs/1.8.3/underscore-min.js > dist/markdown-plus.js')
local('echo "\n" >> dist/markdown-plus.js')
local('cat vendor/markdown-core/dist/markdown-core.min.js >> dist/markdown-plus.js')
local('echo "\n" >> dist/markdown-plus.js')
local('curl https://cdn.jsdelivr.net/jquery.ui/1.11.4/jquery-ui.min.js >> dist/markdown-plus.js')
local('echo "\n" >> dist/markdown-plus.js')
local('curl https://cdn.jsdelivr.net/jquery.layout/1.4.3/jquery.layout.min.js >> dist/markdown-plus.js')
local('echo "\n" >> dist/markdown-plus.js')
local('curl https://cdn.jsdelivr.net/js-cookie/2.0.4/js.cookie.js >> dist/markdown-plus.js')
local('echo "\n" >> dist/markdown-plus.js')
local('curl https://cdn.jsdelivr.net/remodal/1.0.5/remodal.min.js >> dist/markdown-plus.js')
local('echo "\n" >> dist/markdown-plus.js')
local('curl https://cdn.jsdelivr.net/ace/1.2.2/noconflict/ace.js >> dist/markdown-plus.js')
local('echo "\n" >> dist/markdown-plus.js')
local('curl https://cdn.jsdelivr.net/ace/1.2.2/noconflict/keybinding-vim.js >> dist/markdown-plus.js')
local('echo "\n" >> dist/markdown-plus.js')
local('curl https://cdn.jsdelivr.net/ace/1.2.2/noconflict/keybinding-emacs.js >> dist/markdown-plus.js')
local('echo "\n" >> dist/markdown-plus.js')
local('curl https://cdn.jsdelivr.net/ace/1.2.2/noconflict/mode-markdown.js >> dist/markdown-plus.js')
local('echo "\n" >> dist/markdown-plus.js')
local('curl https://cdn.jsdelivr.net/ace/1.2.2/noconflict/ext-searchbox.js >> dist/markdown-plus.js')
for theme in ['tomorrow_night_eighties', 'tomorrow_night_blue', 'tomorrow', 'kuroir']:
local('echo "\n" >> dist/markdown-plus.js')
local('curl https://cdn.jsdelivr.net/ace/1.2.2/noconflict/theme-{0}.js >> dist/markdown-plus.js'.format(theme))
local('echo "\n" >> dist/markdown-plus.js')
local('cat markdown-plus.js >> dist/markdown-plus.js')
local('uglifyjs dist/markdown-plus.js -cmo dist/markdown-plus.min.js')
local('rm dist/markdown-plus.js')
def dist():
local('bower install markdown-core')
css()
js()
local('rm -rf vendor')
def mdp():
local('cp -rf dist ~/src/swift/markdown-plus/Markdown\ Plus/markdown-plus/')
local('cp -f index.html ~/src/swift/markdown-plus/Markdown\ Plus/markdown-plus/')
local('cp -f icon.png ~/src/swift/markdown-plus/Markdown\ Plus/markdown-plus/')
|
{
"content_hash": "bc08cbb721f68a1e6cfc79c2dd10dceb",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 119,
"avg_line_length": 56.64912280701754,
"alnum_prop": 0.6763703933106224,
"repo_name": "bnuzet/markpurcha",
"id": "a474fbee5ffcb61ac65bc1388588a71b0adb7d62",
"size": "3229",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "174740"
},
{
"name": "HTML",
"bytes": "6288"
},
{
"name": "JavaScript",
"bytes": "12402"
}
],
"symlink_target": ""
}
|
"""Zgres config file"""
import os
import sys
import logging
import configparser
class StdOutFilter(logging.Filter):
def filter(self, record):
return record.levelno < logging.WARNING
def add_config_file(parser, config_file):
parser.add_argument('-c', '--config',
dest='config',
nargs='*',
default=['/etc/zgres/{}'.format(config_file), '/etc/zgres/{}.d'.format(config_file)],
help='Use this config file or directory. If a directory, all files ending with .ini are parsed. Order is important with latter files over-riding earlier ones.')
def add_logging_args(parser):
verbosity = parser.add_mutually_exclusive_group()
verbosity.add_argument('--debug',
action='store_true',
help='Print extra debug info on stdout')
verbosity.add_argument('--verbose',
action='store_true',
help='Print extra info on stdout')
verbosity.add_argument('--quiet',
action='store_true',
help='Print only errors')
def setup_logging(config):
root_logger = logging.getLogger()
level = logging.WARN
if config.quiet:
level = logging.ERROR
elif config.verbose:
level = logging.INFO
elif config.debug:
level = logging.DEBUG
root_logger.setLevel(level)
# less than WARN to stderr
stdout = logging.StreamHandler(sys.stdout)
stdout.addFilter(StdOutFilter())
root_logger.addHandler(stdout)
# WARN and above to stderr
stderr = logging.StreamHandler(sys.stderr)
stderr.setLevel(logging.WARNING)
root_logger.addHandler(stderr)
def read_config_file(args):
config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
for file_or_dir in args.config:
if os.path.isfile(file_or_dir):
config.read_file(open(file_or_dir, 'r'))
else:
for cfg in sorted(os.listdir(file_or_dir)):
if cfg.startswith('.') or not cfg.endswith('.ini'):
continue
config.read_file(open(os.path.join(file_or_dir, cfg), 'r'))
return config
def parse_args(parser, argv, config_file=None):
if config_file is not None:
add_config_file(parser, config_file)
add_logging_args(parser)
args = parser.parse_args(args=argv[1:])
setup_logging(args)
if config_file is None:
config = None
else:
config = read_config_file(args)
return config
|
{
"content_hash": "524cf5152f98d8bc361c9850614f76f2",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 172,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.6383495145631068,
"repo_name": "jinty/zgres",
"id": "789a75529c1e59ebb80017122a97681240ce1fbc",
"size": "2472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zgres/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "183377"
},
{
"name": "Shell",
"bytes": "636"
}
],
"symlink_target": ""
}
|
"""
===============================
Compute and visualize ERDS maps
===============================
This example calculates and displays ERDS maps of event-related EEG data. ERDS
(sometimes also written as ERD/ERS) is short for event-related
desynchronization (ERD) and event-related synchronization (ERS) [1]_.
Conceptually, ERD corresponds to a decrease in power in a specific frequency
band relative to a baseline. Similarly, ERS corresponds to an increase in
power. An ERDS map is a time/frequency representation of ERD/ERS over a range
of frequencies [2]_. ERDS maps are also known as ERSP (event-related spectral
perturbation) [3]_.
We use a public EEG BCI data set containing two different motor imagery tasks
available at PhysioNet. The two tasks are imagined hand and feet movement. Our
goal is to generate ERDS maps for each of the two tasks.
First, we load the data and create epochs of 5s length. The data sets contain
multiple channels, but we will only consider the three channels C3, Cz, and C4.
We compute maps containing frequencies ranging from 2 to 35Hz. We map ERD to
red color and ERS to blue color, which is the convention in many ERDS
publications. Finally, we perform cluster-based permutation tests to estimate
significant ERDS values (corrected for multiple comparisons within channels).
References
----------
.. [1] G. Pfurtscheller, F. H. Lopes da Silva. Event-related EEG/MEG
synchronization and desynchronization: basic principles. Clinical
Neurophysiology 110(11), 1842-1857, 1999.
.. [2] B. Graimann, J. E. Huggins, S. P. Levine, G. Pfurtscheller.
Visualization of significant ERD/ERS patterns in multichannel EEG and
ECoG data. Clinical Neurophysiology 113(1), 43-47, 2002.
.. [3] S. Makeig. Auditory event-related dynamics of the EEG spectrum and
effects of exposure to tones. Electroencephalography and Clinical
Neurophysiology 86(4), 283-293, 1993.
"""
# Authors: Clemens Brunner <clemens.brunner@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import mne
from mne.datasets import eegbci
from mne.io import concatenate_raws, read_raw_edf
from mne.time_frequency import tfr_multitaper
from mne.stats import permutation_cluster_1samp_test as pcluster_test
def center_cmap(cmap, vmin, vmax):
"""Center given colormap (ranging from vmin to vmax) at value 0.
Note that eventually this could also be achieved by re-normalizing a given
colormap by subclassing matplotlib.colors.Normalize as described here:
https://matplotlib.org/users/colormapnorms.html#custom-normalization-two-linear-ranges
""" # noqa: E501
vzero = abs(vmin) / (vmax - vmin)
index_old = np.linspace(0, 1, cmap.N)
index_new = np.hstack([np.linspace(0, vzero, cmap.N // 2, endpoint=False),
np.linspace(vzero, 1, cmap.N // 2)])
cdict = {"red": [], "green": [], "blue": [], "alpha": []}
for old, new in zip(index_old, index_new):
r, g, b, a = cmap(old)
cdict["red"].append((new, r, r))
cdict["green"].append((new, g, g))
cdict["blue"].append((new, b, b))
cdict["alpha"].append((new, a, a))
return LinearSegmentedColormap("erds", cdict)
# load and preprocess data ####################################################
subject = 1 # use data from subject 1
runs = [6, 10, 14] # use only hand and feet motor imagery runs
fnames = eegbci.load_data(subject, runs)
raws = [read_raw_edf(f, preload=True, stim_channel='auto') for f in fnames]
raw = concatenate_raws(raws)
raw.rename_channels(lambda x: x.strip('.')) # remove dots from channel names
events = mne.find_events(raw, shortest_event=0, stim_channel='STI 014')
picks = mne.pick_channels(raw.info["ch_names"], ["C3", "Cz", "C4"])
# epoch data ##################################################################
tmin, tmax = -1, 4 # define epochs around events (in s)
event_ids = dict(hands=2, feet=3) # map event IDs to tasks
epochs = mne.Epochs(raw, events, event_ids, tmin - 0.5, tmax + 0.5,
picks=picks, baseline=None, preload=True)
# compute ERDS maps ###########################################################
freqs = np.arange(2, 36, 1) # frequencies from 2-35Hz
n_cycles = freqs # use constant t/f resolution
vmin, vmax = -1, 1.5 # set min and max ERDS values in plot
baseline = [-1, 0] # baseline interval (in s)
cmap = center_cmap(plt.cm.RdBu, vmin, vmax) # zero maps to white
kwargs = dict(n_permutations=100, step_down_p=0.05, seed=1,
buffer_size=None) # for cluster test
for event in event_ids:
tfr = tfr_multitaper(epochs[event], freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=False, average=False,
decim=2)
tfr.crop(tmin, tmax)
tfr.apply_baseline(baseline, mode="percent")
fig, axes = plt.subplots(1, 4, figsize=(12, 4),
gridspec_kw={"width_ratios": [10, 10, 10, 1]})
for ch, ax in enumerate(axes[:-1]): # for each channel
# positive clusters
_, c1, p1, _ = pcluster_test(tfr.data[:, ch, ...], tail=1, **kwargs)
# negative clusters
_, c2, p2, _ = pcluster_test(tfr.data[:, ch, ...], tail=-1, **kwargs)
# note that we keep clusters with p <= 0.05 from the combined clusters
# of two independent tests; in this example, we do not correct for
# these two comparisons
c = np.stack(c1 + c2, axis=2) # combined clusters
p = np.concatenate((p1, p2)) # combined p-values
mask = c[..., p <= 0.05].any(axis=-1)
# plot TFR (ERDS map with masking)
tfr.average().plot([ch], vmin=vmin, vmax=vmax, cmap=(cmap, False),
axes=ax, colorbar=False, show=False, mask=mask,
mask_style="mask")
ax.set_title(epochs.ch_names[ch], fontsize=10)
ax.axvline(0, linewidth=1, color="black", linestyle=":") # event
if not ax.is_first_col():
ax.set_ylabel("")
ax.set_yticklabels("")
fig.colorbar(axes[0].images[-1], cax=axes[-1])
fig.suptitle("ERDS ({})".format(event))
fig.show()
|
{
"content_hash": "842be34dfe2570c668180137d14adeec",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 90,
"avg_line_length": 44.94964028776978,
"alnum_prop": 0.6381241997439181,
"repo_name": "teonlamont/mne-python",
"id": "aff930fd9a9b444d188974d28c5d7840dfedb09c",
"size": "6248",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/time_frequency/plot_time_frequency_erds.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3117"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4354605"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
"""
Accuracy on MNIST dataset: 94,39 %
"""
import os, sys
from scythe.core import *
from scythe.MNIST import *
import matplotlib.pyplot as plt
def main():
if len(sys.argv) < 2:
print("Please provide the path to the MNIST dataset as first argument")
return
mnist_folder = sys.argv[1]
n_forests_per_layer = 2
kc, kr = 22, 22
fconfig = ForestConfiguration()
fconfig.n_classes = 10
fconfig.max_n_trees = 50
fconfig.max_n_features = 20
fconfig.max_depth = 12
fconfig.bagging_fraction = 0.1
lconfig = LayerConfiguration(fconfig, n_forests_per_layer, COMPLETE_RANDOM_FOREST)
print("Create gcForest")
graph = DeepForest(task = "classification", n_classes = 10)
# scanner is set as both front layer and rear layer
scanner = MultiGrainedScanner2D(lconfig, (kc, kr))
scanner_id = graph.add(scanner)
# cascade is added to rear's chidren (scanner)
# cascade is then set as rear layer
cascade = CascadeLayer(lconfig)
cascade_id = graph.add(cascade)
# cascade2 is added to rear's children (cascade)
# cascade2 is then set as rear layer
cascade2 = CascadeLayer(lconfig)
cascade2_id = graph.add(cascade2)
# connect scanner and cascade2
graph.connect(scanner_id, cascade2_id)
# graph.connect(scanner_id, cascade3_id)
print("Load MNIST datasets")
X_train, y_train = loadMNISTTrainingSet(location = mnist_folder)
X_test, labels = loadMNISTTestSet(location = mnist_folder)
X_train, y_train = X_train[:500], y_train[:500]
print("Fit gcForest")
graph.fit(X_train, y_train)
print("Classify with gcForest")
probas = graph.classify(X_test)
predictions = probas.argmax(axis = 1)
ga = np.sum(predictions == labels)
print("Correct predictions : %i / %i" % (ga, len(labels)))
if __name__ == "__main__":
main()
|
{
"content_hash": "fcbe3fa996de91d6a764bbb71edf94e4",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 86,
"avg_line_length": 28.606060606060606,
"alnum_prop": 0.6573093220338984,
"repo_name": "AntoinePassemiers/Scythe",
"id": "088ae8e88e16cb3dbd5e17998a6cbce9a816cbc4",
"size": "1997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/examples/example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "149424"
},
{
"name": "Makefile",
"bytes": "3244"
},
{
"name": "Python",
"bytes": "63826"
},
{
"name": "R",
"bytes": "3025"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2014 Moritz Wundke
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import web
import db
import config
t_globals = dict(
datestr=web.datestr,
)
render = web.template.render('templates/', cache=config.cache,
globals=t_globals)
render._keywords['globals']['render'] = render
def listing(**k):
l = db.listing(**k)
return render.listing(l)
|
{
"content_hash": "6b836f6ef6369b9d6bf9a07eed0f205e",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 37.108108108108105,
"alnum_prop": 0.7749453750910416,
"repo_name": "moritz-wundke/SimpleTelemetryProvider",
"id": "9379dbd3611bf9f0e6cc08c71bfa97eb59cbc5c0",
"size": "1373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SimpleTelemetryServer/ubt/view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "14945"
},
{
"name": "Python",
"bytes": "13046"
},
{
"name": "Shell",
"bytes": "54"
}
],
"symlink_target": ""
}
|
import glob
from redbaron import RedBaron
help_dict = {
'--model-dir': 'Location to write checkpoints and summaries to. Must be a GCS URI when using Cloud TPU.',
'--max-steps': 'The total number of steps to train the model.',
'--use-tpu': 'Whether to use TPU.',
'--tpu': 'The name or GRPC URL of the TPU node. Leave it as `None` when training on AI Platform.',
'--train-batch-size': 'The training batch size. The training batch is divided evenly across the TPU cores.',
'--save-checkpoints-steps': 'The number of training steps before saving each checkpoint.',
'--sequence-length': 'The sequence length for an LSTM model.',
'--gr-weight': 'The weight used in the gradient reversal layer.',
'--lambda': 'The trade-off between label_prediction_loss and domain_classification_loss.'
}
filenames = glob.glob('../**/trainer*.py')
for filename in filenames:
with open(filename, 'r') as f:
red = RedBaron(f.read())
# the `if __name__ == '__main__':` block
ifelseblock = red[-1]
nodes = ifelseblock.value[0]
for node in nodes:
# looking for those parser.add_argument calls
if node.type != 'atomtrailers':
continue
# reference on the node structure: https://redbaron.readthedocs.io/en/latest/nodes_reference.html#atomtrailersnode
if node.value[0].name.value != 'parser' or node.value[1].name.value != 'add_argument':
continue
# args passed into the add_argument call
args = node.value[2].value
# get the arg name
assert args[0].target is None
arg_name = args[0].value.value.replace("'", '')
assert arg_name.startswith('--')
# check if a `help` argument has already been passed in, and if not, add it.
for arg in args[1:]:
if arg.target.value == 'help':
break
else:
# create a CallArgumentNode for the `help` keyward argument
arg = args[-1].copy()
arg.target.value = 'help'
node.value[2].value.append(arg)
if arg_name in help_dict:
help_string = "'" + help_dict.get(arg_name, '') + "'"
else:
print('>>> {} does not have a help string.'.format(arg_name))
help_string = "''"
arg.value.value = help_string
with open(filename, 'w') as out:
out.write(red.dumps())
|
{
"content_hash": "a07e1775cd20f18e6a79597c3e3099de",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 122,
"avg_line_length": 37.03076923076923,
"alnum_prop": 0.6028250934773577,
"repo_name": "GoogleCloudPlatform/cloudml-samples",
"id": "d98aee786dad4188a0125750eed34313579ca99a",
"size": "2407",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tpu/templates/tools/parser_add.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7846"
},
{
"name": "Jupyter Notebook",
"bytes": "1081052"
},
{
"name": "OpenEdge ABL",
"bytes": "1846"
},
{
"name": "Python",
"bytes": "1174159"
},
{
"name": "Shell",
"bytes": "50370"
}
],
"symlink_target": ""
}
|
DOCUMENTATION = '''
---
module: compatibility
short_description: Verifies whether a given kraken config file contains any
known incompatibilities. This module assumes the config has already passed
jsonschema validation.
'''
EXAMPLES = '''
- name: Load configuration file
include_vars:
file: "{{ config_filename }}"
name: config
- name: Check if configuration file is incompatible
compatibility:
config: config
register: compatibility
- name: Fail if configuration file is incompatible
fail:
msg: >-
One or more incompatibilities were found in {{ config_filename }}.
They were {{ compatibility.explanations }}
when:
- compatibility.incompatible
'''
from ansible.module_utils.basic import *
from ansible import errors
try:
import yaml
from semver import parse_version_info
except ImportError as e:
raise errors.AnsibleModuleError(e)
REGISTERED_CHECKS = []
# All checks decorated by @register_check will be run by check_compatibility().
def register_check(check):
REGISTERED_CHECKS.append(check)
return check
def get_version(version):
'''Return the VersionInfo for the version string, but strip the leading
character if it is a v, since that's not strictly semver.
'''
if version[0] == 'v':
version = version[1:]
version_info = parse_version_info(version)
return version_info
def get_versioned_fabric(fabric_config, version):
'''If the kind of fabricConfig is `versionedFabric`, return the config
specified by the version. Otherwise return the fabric_config.
'''
if fabric_config['kind'] == 'versionedFabric':
version_key = 'v{}.{}'.format(version.major,
version.minor)
if version_key in fabric_config['kubeVersion']['versions']:
return fabric_config['kubeVersion']['versions'][version_key]
else:
return fabric_config['kubeVersion']['default']
else:
return fabric_config
@register_check
def check_k8s_calico_mismatch(config):
'''Due to an incompatiblity created by kraken-lib commit 02448b6, kraken
nodepools running kubernetes 1.7 must use calico version v2.6.1. See
https://goo.gl/uJR4c9 for more information.
'''
incompatible, explanations = False, []
required_k8s_version = get_version('v1.7.0')
required_calico_node_version = get_version('v2.6.1')
template = ('Kubernetes v1.7 clusters using Calico require the calicoNode '
'container to be at v2.6.1. The (cluster, nodepool) '
'({cluster}, {nodepool}) does not meet this requirement. '
'Please update the fabricConfig for the cluster named '
'{cluster} so that the calicoNode container is v2.6.1.'
)
clusters = config['deployment']['clusters']
for cluster in clusters:
if cluster['providerConfig']['provider'] != 'aws':
continue
if cluster['fabricConfig']['type'] != 'canal':
continue
nodepools = cluster['nodePools']
for nodepool in nodepools:
if 'kubeConfig' not in nodepool:
continue
k8s_version = get_version(nodepool['kubeConfig']['version'])
if ((k8s_version.major, k8s_version.minor) !=
(required_k8s_version.major, required_k8s_version.minor)):
continue
fabric_config = get_versioned_fabric(cluster['fabricConfig'],
k8s_version)
containers = fabric_config['options']['containers']
calico_node_version = get_version(containers['calicoNode']['version'])
if calico_node_version < required_calico_node_version:
incompatible = True
explaination = template.format(cluster=cluster['name'],
nodepool=nodepool['name'])
explanations.append(explaination)
return incompatible, explanations
def check_compatibility(config):
'''Calls each check function with a config and collects any
incompatibilities returned.
'''
result = { 'incompatible': False,
'explanations': [] }
for check in REGISTERED_CHECKS:
incompatible, explanations = check(config)
if incompatible:
result['incompatible'] = True
result['explanations'] = result['explanations'] + explanations
return result
def load_documents(config=None, config_filename=None, **kwargs):
'''Accepts a config as either a python object or a file containing
YAML or JSON, and returns a python object.
'''
if config_filename:
with open(config_filename, 'r') as config_file:
config = yaml.load(config_file)
return config
def main():
module = AnsibleModule(
argument_spec={
'config': { 'required': False, 'type': 'dict' },
'config_filename': { 'required': False, 'type': 'str' },
},
mutually_exclusive=[
[ 'config', 'config_filename' ],
],
required_one_of=[
[ 'config', 'config_filename' ],
],
supports_check_mode=True
)
config = load_documents(**module.params)
result = check_compatibility(config)
if result['incompatible']:
msg = 'There are incompatibles in the kraken config.'
else:
msg = "The kraken config appears to be compatible."
module.exit_json(changed=False, msg=msg, **result)
if __name__ == '__main__':
main()
|
{
"content_hash": "27cb127f64b79d7a3309055def9ecd76",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 82,
"avg_line_length": 33.63855421686747,
"alnum_prop": 0.6223137535816619,
"repo_name": "samsung-cnct/kraken-lib",
"id": "b2d941ced65e8bfbcaedcd63cdf7aa166a35287f",
"size": "6208",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ansible/library/compatibility.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "186704"
},
{
"name": "Makefile",
"bytes": "3533"
},
{
"name": "Python",
"bytes": "29148"
},
{
"name": "Shell",
"bytes": "50151"
}
],
"symlink_target": ""
}
|
import datetime
from django.core.urlresolvers import reverse
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.utils import timezone
from uuslug import slugify
import thread
class PostQuerySet(models.QuerySet):
def published(self):
return self.active().filter(pub_date__lte=timezone.now())
def active(self):
return self.filter(is_active=True)
class Notice(models.Model):
headline = models.CharField(max_length=100)
content = models.TextField()
pub_date = models.DateTimeField(editable=False, auto_now_add=True)
def __unicode__(self):
return self.headline
class Meta:
ordering = ('-pub_date',)
get_latest_by = 'pub_date'
class Category(models.Model):
name = models.CharField(max_length=128)
create_data = models.DateTimeField(editable=False, auto_now_add=True)
def get_absolute_url(self):
kwargs = {
'category': self.name,
}
return reverse('blog:archive-category', kwargs=kwargs)
def __unicode__(self):
return self.name
class Post(models.Model):
headline = models.CharField(max_length=200)
slug = models.SlugField(editable=False, unique_for_date='pub_date')
is_active = models.BooleanField(default=False)
category = models.ManyToManyField(Category, related_name='category_post')
content = models.TextField()
hitcount = models.IntegerField(default=0)
pub_date = models.DateTimeField(auto_now_add=True)
create_date = models.DateTimeField(editable=False, auto_now_add=True)
update_date = models.DateTimeField(editable=False, auto_now=True)
author = models.ForeignKey(User,related_name='user_post')
objects = PostQuerySet.as_manager()
class Meta:
ordering = ('-pub_date',)
get_latest_by = 'pub_date'
def save(self, *args, **kwargs):
self.slug = slugify(self.headline)
super(Post, self).save(*args, **kwargs)
def __unicode__(self):
return self.headline
def get_absolute_url(self):
kwargs = {
'year': self.pub_date.year,
'month': self.pub_date.strftime('%b').lower(),
'day': self.pub_date.strftime('%d').lower(),
'slug': self.slug,
}
return reverse('blog:post', kwargs=kwargs)
class Comment(models.Model):
user = models.ForeignKey(User, related_name='user_comment')
post = models.ForeignKey(Post, related_name='post_comment')
context = models.TextField()
create_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('create_date',)
get_latest_by = 'create_date'
def generate_message(self):
message = """\
<html>
<head></head>
<body>
<p>%s<br>
%s<br>
<a href="%s">%s</a>
</p>
</body>
</html>
""" % (u'您好!',
u'你最近评价的文章有了新的评价',
u'http://blog.zouyapeng.website' + self.post.get_absolute_url(),
self.post.headline)
return message
def save(self, *args, **kwargs):
super(Comment, self).save(*args, **kwargs)
now = datetime.datetime.now()
prev_day = now - datetime.timedelta(days=30)
comments = self.post.post_comment.filter(create_date__range=(prev_day, now))
users = []
for comment in comments:
if comment.user not in users:
users.append(comment.user)
if self.post.author not in users:
users.append(self.post.author)
users.remove(self.user)
for user in users:
if user.user_profile.enable_email is True:
thread.start_new_thread(user.user_profile.email_user, ('[Zouyapeng 博客]你有一条新消息', self.generate_message()))
# user.user_profile.email_user()
def get_absolute_url(self):
kwargs = {
'year': self.post.pub_date.year,
'month': self.post.pub_date.strftime('%b').lower(),
'day': self.post.pub_date.strftime('%d').lower(),
'slug': self.post.slug,
}
return reverse('blog:post', kwargs=kwargs)
def __unicode__(self):
return "%s:%s" % (self.post.slug, self.context)
|
{
"content_hash": "c59176556289f83ce4f18c95a9fdaa9d",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 121,
"avg_line_length": 33.34074074074074,
"alnum_prop": 0.5794267940457676,
"repo_name": "zouyapeng/Homepage",
"id": "831e5c4c76003a979079a655e88ee27e1e99e018",
"size": "4576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "165360"
},
{
"name": "HTML",
"bytes": "39455"
},
{
"name": "JavaScript",
"bytes": "146218"
},
{
"name": "Python",
"bytes": "26397"
}
],
"symlink_target": ""
}
|
import time
import sqlite3 as lite
from tmp102 import tmp102_read_temperature
#location of the data base
location = '../db.sqlite3'
CURRENT_DAY_TABLE = 'current_day'
#LAST_WEEK_TABLE = 'last_week'
#HISTORY_TABLE_NAME = 'history'
#so we assume we have created these two tables manualy
'''
To create tables enter the SQLite shell:
$ sqlite3 db.sqlite3
In the SQLite shell enter these commands to create a table called last_week:
BEGIN;
CREATE TABLE current_day (timestamp DATETIME, temperature NUMERIC);
CREATE TABLE last_week (timestamp DATETIME, temperature NUMERIC);
CREATE TABLE history (timestamp DATETIME, temperature NUMERIC);
COMMIT;
'''
def add_new_entry(temperature):
#current_time = time.strftime("%H:%M:%S") #23:59:23
connection = lite.connect(location)
cursor = connection.cursor()
#sql_request = 'insert into {table} values (?,?)'.format('table':CURRENT_DAY_TABLE)
#cursor.execute(sql_request, [current_time, data])
cursor.execute("INSERT INTO {} values(datetime('now', 'localtime'), (?))"
.format(CURRENT_DAY_TABLE), (temperature,))
connection.commit()
cursor.close()
connection.close()
def mesure_temperature():
while True:
#date = time.strftime("%d/%m/%Y") #00/00/0000
temperature = tmp102_read_temperature()
#create_table(date)
add_new_entry(temperature)
print(temperature)
time.sleep(900)
mesure_temperature()
|
{
"content_hash": "c9cda87c7fef7ac0ce8f08fdcb18a445",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 87,
"avg_line_length": 28.764705882352942,
"alnum_prop": 0.6802999318336742,
"repo_name": "AngelTsanev/pacemPI",
"id": "cd53ef1dd53105a9815d253508a0f9577ecbb00e",
"size": "1467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/mesure_temperature.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18654"
}
],
"symlink_target": ""
}
|
import re
class CommandError(Exception):
pass
class BaseCommand():
"""
Base command, this will accept and handle some generic features of all commands.
Like error handling, argument retrieving / checking
"""
def __init__(self, args):
"""
Initialize the class
"""
self._args = args
def arg(self, key):
"""
Retrieve a single argument
"""
return self._args.get(key)
def args(self, *keys):
"""
Retrieve a set of argument
"""
if keys:
return [self.arg(k) for k in keys]
else:
return self._args
def value(self, key):
"""
Retrieve a single argument
"""
key = '<{0}>'.format(key)
return self.arg(key)
def option(self, key, value=None):
"""
Retrieve a single argument
"""
key = '--'+key
if value:
return self.arg(key) == value
return self.arg(key)
def args_context(self):
"""
Convert all options and values into a context usable by the template parser
"""
context = dict(options={}, values={})
for key, value in self.args().items():
expressions = {
'options': r'--(.*)',
'values': r'<(.*)>',
}
for group, expression in expressions.items():
matches = re.search(expression, key)
if matches:
context[matches.group(1).replace('-', '_')] = value
return context
|
{
"content_hash": "b51b81fa667abefdfaec95c0dd45522c",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 84,
"avg_line_length": 22.87142857142857,
"alnum_prop": 0.4928169893816365,
"repo_name": "snelis/snelis",
"id": "ab5273d654a3923ccb0b1bcda1dd62d46392a4ae",
"size": "1601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snelis/management/commands/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "54458"
},
{
"name": "Python",
"bytes": "27457"
},
{
"name": "Ruby",
"bytes": "4736"
},
{
"name": "Shell",
"bytes": "6773"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.