repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
IEMLdev/ieml-api | ieml/test/test_path.py | 2 | 8291 | import unittest
import tqdm
from ieml.dictionary.script import Script
from ieml.ieml_database import IEMLDatabase, GitInterface
from ieml.usl import PolyMorpheme, Lexeme, Word
from ieml.usl.decoration.parser.parser import PathParser
from ieml.usl.decoration.path import PolymorphemePath, GroupIndex, FlexionPath, LexemeIndex, LexemePath, RolePath, \
usl_from_path_values, path
from ieml.usl.parser import IEMLParser
from ieml.usl.syntagmatic_function import SyntagmaticRole
from ieml.usl.usl import usl
parser = PathParser()
class TestPath(unittest.TestCase):
def check(self, path, _type, usl, expected_type):
self.assertEqual(str(parser.parse(path)), path)
res = parser.parse(path).deference(usl)
self.assertIsInstance(res, expected_type)
def test_path(self):
from ieml.usl.usl import usl
pm = [usl("A: E: S: B: T:"), usl("A: E: m1(S: B: T:)"), usl("A: m1(E:) m1(S: B: T:)"),
usl("m1(A:) m1(E:) m1(S: B: T:)")]
# pm_path = PolymorphemePath(GroupIndex.CONSTANT, usl('S:'))
PolymorphemePath(GroupIndex.CONSTANT, usl('S:')).deference(pm[0])
PolymorphemePath(GroupIndex.GROUP_0, usl('S:')).deference(pm[1])
PolymorphemePath(GroupIndex.GROUP_1, usl('S:')).deference(pm[2])
PolymorphemePath(GroupIndex.GROUP_2, usl('S:')).deference(pm[3])
self.check(">constant>S:", PolymorphemePath, usl('S: A:'), Script)
self.check(">constant", PolymorphemePath, usl('S: A:'), PolyMorpheme)
self.check(">group_0 1>S:", PolymorphemePath, usl('A: m1(S:)'), Script)
self.check(">group_0 1", PolymorphemePath, usl('m1(S: A:)'), PolyMorpheme)
self.check(">group_2 1>B:", PolymorphemePath, usl('A: m1(U:) m1(B:) m1(S:)'), Script)
self.check(">group_1 1>S:", PolymorphemePath, usl('A: m1(U:) m1(S:)'), Script)
self.check(">group_2 1", PolymorphemePath, usl('A: m1(U:) m1(B:) m1(S:)'), PolyMorpheme)
self.check(">group_1 1", PolymorphemePath, usl('A: m1(U:) m1(S:)'), PolyMorpheme)
self.check(">", PolymorphemePath, usl('S: A:'), PolyMorpheme)
LexemePath(LexemeIndex.CONTENT, child=PolymorphemePath(GroupIndex.CONSTANT, usl('S:'))).deference(
usl("()(S: B:)"))
LexemePath(LexemeIndex.FLEXION, child=FlexionPath(usl('S:'))).deference(usl("(S: B:)(S:)"))
self.check('>content>constant>S:', LexemePath, usl('()(S:)'), Script)
self.check('>flexion>S:', LexemePath, usl('(S:)(B:)'), Script)
self.check('>flexion', LexemePath, usl('(S:)(B:)'), PolyMorpheme)
self.check('>flexion', LexemePath, usl('(S:)(B:)'), PolyMorpheme)
self.check(">", LexemePath, usl('(S:)(B:)'), Lexeme)
w = usl("[! E:A:. ()(m.-B:.A:.-') > E:A:. E:A:. (E:B:.-d.u.-')(p.E:A:T:.- m1(S:))]")
path = RolePath(SyntagmaticRole([usl('E:A:.'), usl('E:A:.')]),
child=LexemePath(LexemeIndex.CONTENT,
child=PolymorphemePath(GroupIndex.CONSTANT, usl('p.E:A:T:.-'))))
path.deference(w)
self.check(">role>E:A:. E:A:.>content>group_0 1>S:", RolePath, w, Script)
self.check(">role>E:A:. E:A:.>content>constant>p.E:A:T:.-", RolePath, w, Script)
self.check(">role>E:A:. E:A:.>flexion>E:B:.-d.u.-'", RolePath, w, Script)
self.check(">role>E:A:.>content>constant>m.-B:.A:.-'", RolePath, w, Script)
u = usl(
"[! E:B:. ()(k.a.-k.a.-' l.o.-k.o.-') > E:.f.- ()(m1(p.E:A:S:.- p.E:A:B:.- p.E:A:T:.- t.i.-l.i.-' c.-'B:.-'k.o.-t.o.-',))]")
self.check(">role>E:.f.->content>group_0 1>p.E:A:S:.-", RolePath, u, Script)
self.check(">role>E:A:.", RolePath, w, Lexeme)
self.check(">role>E:A:.>content", RolePath, w, PolyMorpheme)
self.check(">", RolePath, w, Word)
def test_paths_values_to_usl(self):
pm = [(">constant>S:", "S:"), (">constant>B:", "B:"), (">group_0 2>T:", "T:"), (">group_0 2>A:", "A:")]
res = usl_from_path_values(pm)
self.assertIsInstance(res, PolyMorpheme)
self.assertEqual(str(res), "S: B: m2(A: T:)")
pm = [(">content>constant>S:", "S:"), (">content>constant>B:", "B:"), (">content>group_0 1>T:", "T:")]
res = usl_from_path_values(pm)
self.assertIsInstance(res, Lexeme)
self.assertEqual(str(res), "()(S: B: m1(T:))")
pm = [(">role>! E:A:.>content>constant>S:", "S:"),
(">role>E:A:. E:A:.>content>constant>B:", "B:"),
(">role>E:A:. E:A:.>content>group_0>T:", "T:")]
res = usl_from_path_values(pm)
self.assertIsInstance(res, Word)
self.assertEqual(str(res), "[! E:A:. ()(S:) > E:A:. E:A:. ()(B: m1(T:))]")
def test_expand_compose_into_paths(self):
# parser = IEMLParser().parse
gitdb = GitInterface(origin='https://github.com/plevyieml/ieml-language.git')
gitdb.pull()
db = IEMLDatabase(folder=gitdb.folder)
usls = db.list(type=Word, parse=True) + db.list(type=PolyMorpheme, parse=True) + db.list(type=Lexeme, parse=True)
for u in tqdm.tqdm(usls):
p_u = list(u.iter_structure_path_by_script_ss())
res = usl_from_path_values(p_u)
self.assertEqual(str(u), str(res), "expand_compose_into_paths failed on: " + str(u))
def test_expand_compose_into_paths_empty_exclamation(self):
u = usl('[E:A:. (E:.-n.S:.-\')(b.a.- b.o.-n.o.-s.u.-\' f.a.-b.a.-f.o.-\') > E:A:. E:A:. ()(n.-S:.U:.-\'B:.-\'B:.-\',B:.-\',B:.-\',_ n.-S:.U:.-\'B:.-\'B:.-\',T:.-\',S:.-\',_) > ! E:A:. E:U:. ()]')
p_u = list(u.iter_structure_path_by_script_ss())
res = usl_from_path_values(p_u)
self.assertEqual(str(u), str(res))
def test_expand_compose_into_paths_pm(self):
u = usl("E:T:S:. n.-T:.A:.-'")
p_u = list(u.iter_structure_path_by_script_ss())
res = usl_from_path_values(p_u)
self.assertEqual(str(u), str(res))
def test_expand_compose_into_paths_pm2(self):
u = usl("s.-S:.U:.-' n.-T:.A:.-' d.-S:.U:.-' m1(E:.-U:.b.-l.-' E:.-U:.f.-l.-') m1(E:.-B:.k.-l.-')")
p_u = list(u.iter_structure_path_by_script_ss())
res = usl_from_path_values(p_u)
self.assertEqual(str(u), str(res))
def test_has_prefix(self):
u = usl("[! E:A:. ()(b.-S:.A:.-'S:.-'S:.-', m1(S: B: T:) m2(y. o. e. u. a. i.)) > E:A:. E:A:. (m1(E:U:T:. E:A:T:. E:S:T:. E:B:T:. E:T:T:.))(k.a.-k.a.-')]")
p0 = path(">role>! E:A:.>content>group_0 1>S:")
p0_prefix = path(">role>! E:A:.>content>group_0 1")
self.assertTrue(p0.has_prefix(p0_prefix))
def test_usl_from_path(self):
structure = {">role>! E:A:.>flexion>E:": "E:",
">role>! E:A:.>content>constant>b.-S:.A:.-'S:.-'S:.-',": "b.-S:.A:.-'S:.-'S:.-',",
">role>E:A:. E:A:.>flexion>E:": "E:",
">role>E:A:. E:A:.>flexion>E:U:T:.": "E:U:T:.",
">role>E:A:. E:A:.>flexion>E:A:T:.": "E:A:T:.",
">role>E:A:. E:A:.>flexion>E:S:T:.": "E:S:T:.",
">role>E:A:. E:A:.>flexion>E:B:T:.": "E:B:T:.",
">role>E:A:. E:A:.>flexion>E:T:T:.": "E:T:T:.",
">role>E:A:. E:A:.>content>constant>k.a.-k.a.-'": "k.a.-k.a.-'"}
usl_parser = IEMLParser().parse
path_parser = PathParser().parse
structure = [(path_parser(p), usl_parser(u)) for p, u in structure.items()]
u = usl_from_path_values(structure)
self.assertEqual(u, usl("[! E:A:. ()(b.-S:.A:.-'S:.-'S:.-',) > E:A:. E:A:. (m1(E:U:T:. E:A:T:. E:S:T:. E:B:T:. E:T:T:.))(k.a.-k.a.-')]"))
def test_usl_from_path_pm(self):
structure = [
(">constant>b.-S:.A:.-'S:.-'S:.-',", "b.-S:.A:.-'S:.-'S:.-',"),
(">constant>k.a.-k.a.-'", "k.a.-k.a.-'"),
(">constant", "U:"),
(">constant", "E:")
]
usl_parser = IEMLParser().parse
path_parser = PathParser().parse
structure = [(path_parser(p), usl_parser(u)) for p, u in structure]
u = usl_from_path_values(structure)
self.assertEqual(str(u), "U: k.a.-k.a.-' b.-S:.A:.-'S:.-'S:.-',")
def test_usl_from_path_flexion_paradigm(self):
structure = [
(">flexion", "E:.wo.U:.-t.o.-'"),
(">flexion", "E:.wo.A:.-t.o.-'"),
(">content>constant", "U:"),
]
usl_parser = IEMLParser().parse
path_parser = PathParser().parse
structure = [(path_parser(p), usl_parser(u)) for p, u in structure]
u = usl_from_path_values(structure)
self.assertEqual(str(u), "(m1(E:.wo.U:.-t.o.-' E:.wo.A:.-t.o.-'))(U:)")
def test_usl_from_path_pm2(self):
structure = [
(">constant>b.-S:.A:.-'S:.-'S:.-',", "b.-S:.A:.-'S:.-'S:.-',"),
(">constant", "k.a.-k.a.-' A:"),
(">constant", "U:"),
(">constant", "E:")
]
usl_parser = IEMLParser().parse
path_parser = PathParser().parse
structure = [(path_parser(p), usl_parser(u)) for p, u in structure]
u = usl_from_path_values(structure)
self.assertEqual(str(u), "U: A: k.a.-k.a.-' b.-S:.A:.-'S:.-'S:.-',")
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
alexandrucoman/vbox-nova-driver | nova/db/sqlalchemy/migrate_repo/versions/229_add_extra_resources_in_compute_nodes.py | 49 | 1699 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Add a new column metrics to save metrics info for compute nodes
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
extra_resources = Column('extra_resources', Text, nullable=True)
shadow_extra_resources = Column('extra_resources', Text, nullable=True)
compute_nodes.create_column(extra_resources)
shadow_compute_nodes.create_column(shadow_extra_resources)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Remove the new column
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
compute_nodes.drop_column('extra_resources')
shadow_compute_nodes.drop_column('extra_resources')
| apache-2.0 |
toshywoshy/ansible | lib/ansible/modules/storage/netapp/netapp_e_asup.py | 52 | 11220 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_asup
short_description: NetApp E-Series manage auto-support settings
description:
- Allow the auto-support settings to be configured for an individual E-Series storage-system
version_added: '2.7'
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.eseries
options:
state:
description:
- Enable/disable the E-Series auto-support configuration.
- When this option is enabled, configuration, logs, and other support-related information will be relayed
to NetApp to help better support your system. No personally identifiable information, passwords, etc, will
be collected.
default: enabled
choices:
- enabled
- disabled
aliases:
- asup
- auto_support
- autosupport
active:
description:
- Enable active/proactive monitoring for ASUP. When a problem is detected by our monitoring systems, it's
possible that the bundle did not contain all of the required information at the time of the event.
Enabling this option allows NetApp support personnel to manually request transmission or re-transmission
of support data in order ot resolve the problem.
- Only applicable if I(state=enabled).
default: yes
type: bool
start:
description:
- A start hour may be specified in a range from 0 to 23 hours.
- ASUP bundles will be sent daily between the provided start and end time (UTC).
- I(start) must be less than I(end).
aliases:
- start_time
default: 0
end:
description:
- An end hour may be specified in a range from 1 to 24 hours.
- ASUP bundles will be sent daily between the provided start and end time (UTC).
- I(start) must be less than I(end).
aliases:
- end_time
default: 24
days:
description:
- A list of days of the week that ASUP bundles will be sent. A larger, weekly bundle will be sent on one
of the provided days.
choices:
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
- sunday
required: no
aliases:
- days_of_week
- schedule_days
verbose:
description:
- Provide the full ASUP configuration in the return.
default: no
required: no
type: bool
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- Enabling ASUP will allow our support teams to monitor the logs of the storage-system in order to proactively
respond to issues with the system. It is recommended that all ASUP-related options be enabled, but they may be
disabled if desired.
- This API is currently only supported with the Embedded Web Services API v2.0 and higher.
"""
EXAMPLES = """
- name: Enable ASUP and allow pro-active retrieval of bundles
netapp_e_asup:
state: enabled
active: yes
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
- name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST.
netapp_e_asup:
start: 17
end: 20
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
asup:
description:
- True if ASUP is enabled.
returned: on success
sample: True
type: bool
active:
description:
- True if the active option has been enabled.
returned: on success
sample: True
type: bool
cfg:
description:
- Provide the full ASUP configuration.
returned: on success when I(verbose=true).
type: complex
contains:
asupEnabled:
description:
- True if ASUP has been enabled.
type: bool
onDemandEnabled:
description:
- True if ASUP active monitoring has been enabled.
type: bool
daysOfWeek:
description:
- The days of the week that ASUP bundles will be sent.
type: list
"""
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class Asup(object):
DAYS_OPTIONS = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', required=False, default='enabled', aliases=['asup', 'auto_support', 'autosupport'],
choices=['enabled', 'disabled']),
active=dict(type='bool', required=False, default=True, ),
days=dict(type='list', required=False, aliases=['schedule_days', 'days_of_week'],
choices=self.DAYS_OPTIONS),
start=dict(type='int', required=False, default=0, aliases=['start_time']),
end=dict(type='int', required=False, default=24, aliases=['end_time']),
verbose=dict(type='bool', required=False, default=False),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.asup = args['state'] == 'enabled'
self.active = args['active']
self.days = args['days']
self.start = args['start']
self.end = args['end']
self.verbose = args['verbose']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.start >= self.end:
self.module.fail_json(msg="The value provided for the start time is invalid."
" It must be less than the end time.")
if self.start < 0 or self.start > 23:
self.module.fail_json(msg="The value provided for the start time is invalid. It must be between 0 and 23.")
else:
self.start = self.start * 60
if self.end < 1 or self.end > 24:
self.module.fail_json(msg="The value provided for the end time is invalid. It must be between 1 and 24.")
else:
self.end = min(self.end * 60, 1439)
if not self.days:
self.days = self.DAYS_OPTIONS
def get_configuration(self):
try:
(rc, result) = request(self.url + 'device-asup', headers=HEADERS, **self.creds)
if not (result['asupCapable'] and result['onDemandCapable']):
self.module.fail_json(msg="ASUP is not supported on this device. Array Id [%s]." % (self.ssid))
return result
except Exception as err:
self.module.fail_json(msg="Failed to retrieve ASUP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update_configuration(self):
config = self.get_configuration()
update = False
body = dict()
if self.asup:
body = dict(asupEnabled=True)
if not config['asupEnabled']:
update = True
if (config['onDemandEnabled'] and config['remoteDiagsEnabled']) != self.active:
update = True
body.update(dict(onDemandEnabled=self.active,
remoteDiagsEnabled=self.active))
self.days.sort()
config['schedule']['daysOfWeek'].sort()
body['schedule'] = dict(daysOfWeek=self.days,
dailyMinTime=self.start,
dailyMaxTime=self.end,
weeklyMinTime=self.start,
weeklyMaxTime=self.end)
if self.days != config['schedule']['daysOfWeek']:
update = True
if self.start != config['schedule']['dailyMinTime'] or self.start != config['schedule']['weeklyMinTime']:
update = True
elif self.end != config['schedule']['dailyMaxTime'] or self.end != config['schedule']['weeklyMaxTime']:
update = True
elif config['asupEnabled']:
body = dict(asupEnabled=False)
update = True
self._logger.info(pformat(body))
if update and not self.check_mode:
try:
(rc, result) = request(self.url + 'device-asup', method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.update_configuration()
cfg = self.get_configuration()
if self.verbose:
self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'], cfg=cfg)
else:
self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'])
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = Asup()
settings()
if __name__ == '__main__':
main()
| gpl-3.0 |
nikolas/configuration | util/vpc-tools/tag-old-ebs.py | 62 | 7679 | """
For a given aws account, go through all un-attached volumes and tag them.
"""
import boto
import boto.utils
import argparse
import logging
import subprocess
import time
import os
from os.path import join, exists, isdir, islink, realpath, basename, dirname
import yaml
# needs to be pip installed
import netaddr
LOG_FORMAT = "%(asctime)s %(levelname)s - %(filename)s:%(lineno)s - %(message)s"
TIMEOUT = 300
log_level = logging.INFO
def tags_for_hostname(hostname, mapping):
logging.debug("Hostname is {}".format(hostname))
if not hostname.startswith('ip-'):
return {}
octets = hostname.lstrip('ip-').split('-')
tags = {}
# Update with env and deployment info
tags.update(mapping['CIDR_SECOND_OCTET'][octets[1]])
ip_addr = netaddr.IPAddress(".".join(octets))
for key, value in mapping['CIDR_REST'].items():
cidr = ".".join([
mapping['CIDR_FIRST_OCTET'],
octets[1],
key])
cidrset = netaddr.IPSet([cidr])
if ip_addr in cidrset:
tags.update(value)
return tags
def potential_devices(root_device):
device_dir = dirname(root_device)
relevant_devices = lambda x: x.startswith(basename(root_device))
all_devices = os.listdir(device_dir)
all_devices = filter(relevant_devices, all_devices)
logging.info("Potential devices on {}: {}".format(root_device, all_devices))
if len(all_devices) > 1:
all_devices.remove(basename(root_device))
return map(lambda x: join(device_dir, x), all_devices)
def get_tags_for_disk(mountpoint):
tag_data = {}
# Look at some files on it to determine:
# - hostname
# - environment
# - deployment
# - cluster
# - instance-id
# - date created
hostname_file = join(mountpoint, "etc", "hostname")
edx_dir = join(mountpoint, 'edx', 'app')
if exists(hostname_file):
# This means this was a root volume.
with open(hostname_file, 'r') as f:
hostname = f.readline().strip()
tag_data['hostname'] = hostname
if exists(edx_dir) and isdir(edx_dir):
# This is an ansible related ami, we'll try to map
# the hostname to a knows deployment and cluster.
cluster_tags = tags_for_hostname(hostname, mappings)
tag_data.update(cluster_tags)
else:
# Not an ansible created root volume.
tag_data['cluster'] = 'unknown'
else:
# Not a root volume
tag_data['cluster'] = "unknown"
instance_file = join(mountpoint, "var", "lib", "cloud", "instance")
if exists(instance_file) and islink(instance_file):
resolved_path = realpath(instance_file)
old_instance_id = basename(resolved_path)
tag_data['instance-id'] = old_instance_id
return tag_data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tag unattached ebs volumes.")
parser.add_argument("--profile", '-p',
help="AWS Profile to use with boto.")
parser.add_argument("--noop", "-n", action="store_true",
help="Don't actually tag anything.")
parser.add_argument("--verbose", "-v", action="store_true",
help="More verbose output.")
parser.add_argument("--device", "-d", default="/dev/xvdf",
help="The /dev/??? where the volume should be mounted.")
parser.add_argument("--mountpoint", "-m", default="/mnt",
help="Location to mount the new device.")
parser.add_argument("--config", "-c", required=True,
help="Configuration to map hostnames to tags.")
# The config should specify what tags to associate with the second
# and this octet of the hostname which should be the ip address.
# example:
args = parser.parse_args()
mappings = yaml.safe_load(open(args.config,'r'))
# Setup Logging
if args.verbose:
log_level = logging.DEBUG
logging.basicConfig(format=LOG_FORMAT, level=log_level)
# setup boto
ec2 = boto.connect_ec2(profile_name=args.profile)
# get mounting args
id_info = boto.utils.get_instance_identity()['document']
instance_id = id_info['instanceId']
az = id_info['availabilityZone']
root_device = args.device
mountpoint = args.mountpoint
# Find all unattached volumes
filters = { "status": "available", "availability-zone": az }
potential_volumes = ec2.get_all_volumes(filters=filters)
logging.debug("Found {} unattached volumes in {}".format(len(potential_volumes), az))
for vol in potential_volumes:
if "cluster" in vol.tags:
continue
# Attach volume to the instance running this process
logging.debug("Trying to attach {} to {} at {}".format(
vol.id, instance_id, root_device))
try:
ec2.attach_volume(vol.id, instance_id, root_device)
# Wait for the volume to finish attaching.
waiting_msg = "Waiting for {} to be available at {}"
timeout = TIMEOUT
while not exists(root_device):
time.sleep(2)
logging.debug(waiting_msg.format(vol.id, root_device))
timeout -= 2
if timeout <= 0:
logging.critical("Timed out while attaching {}.".format(vol.id))
exit(1)
# Because a volume might have multiple mount points
devices_on_volume = potential_devices(root_device)
if len(devices_on_volume) != 1:
vol.add_tag("devices_on_volume", str(devices_on_volume))
# Don't tag in this case because the different devices
# may have conflicting tags.
logging.info("Skipping {} because it has multiple mountpoints.".format(vol.id))
logging.info("{} has mountpoints {}".format(vol.id, str(devices_on_volume)))
else:
device = devices_on_volume[0]
try:
# Mount the volume
subprocess.check_call(["sudo", "mount", device, mountpoint])
# Learn all tags we can know from content on disk.
tag_data = get_tags_for_disk(mountpoint)
tag_data['created'] = vol.create_time
# If they are found tag the instance with them
if args.noop:
logging.info("Would have tagged {} with: \n{}".format(vol.id, str(tag_data)))
else:
logging.info("Tagging {} with: \n{}".format(vol.id, str(tag_data)))
vol.add_tags(tag_data)
finally:
# Un-mount the volume
subprocess.check_call(['sudo', 'umount', mountpoint])
finally:
# Need this to be a function so we always re-check the API for status.
is_attached = lambda vol_id: ec2.get_all_volumes(vol_id)[0].status != "available"
timeout = TIMEOUT
while exists(root_device) or is_attached(vol.id):
if is_attached(vol.id):
try:
# detach the volume
ec2.detach_volume(vol.id)
except boto.exception.EC2ResponseError as e:
logging.warning("Failed to detach volume. Will try again in a bit.")
time.sleep(2)
timeout -= 2
if timeout <= 0:
logging.critical("Timed out while detaching {}.".format(vol.id))
exit(1)
logging.debug("Waiting for {} to be detached.".format(vol.id))
| agpl-3.0 |
hsfzxjy/wisecitymbc | site_packages/rest_framework/tests/test_relations_nested.py | 2 | 14951 | from __future__ import unicode_literals
from django.db import models
from django.test import TestCase
from rest_framework import serializers
from .models import OneToOneTarget
class OneToOneSource(models.Model):
name = models.CharField(max_length=100)
target = models.OneToOneField(OneToOneTarget, related_name='source',
null=True, blank=True)
class OneToManyTarget(models.Model):
name = models.CharField(max_length=100)
class OneToManySource(models.Model):
name = models.CharField(max_length=100)
target = models.ForeignKey(OneToManyTarget, related_name='sources')
class ReverseNestedOneToOneTests(TestCase):
def setUp(self):
class OneToOneSourceSerializer(serializers.ModelSerializer):
class Meta:
model = OneToOneSource
fields = ('id', 'name')
class OneToOneTargetSerializer(serializers.ModelSerializer):
source = OneToOneSourceSerializer()
class Meta:
model = OneToOneTarget
fields = ('id', 'name', 'source')
self.Serializer = OneToOneTargetSerializer
for idx in range(1, 4):
target = OneToOneTarget(name='target-%d' % idx)
target.save()
source = OneToOneSource(name='source-%d' % idx, target=target)
source.save()
def test_one_to_one_retrieve(self):
queryset = OneToOneTarget.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'source': {'id': 1, 'name': 'source-1'}},
{'id': 2, 'name': 'target-2', 'source': {'id': 2, 'name': 'source-2'}},
{'id': 3, 'name': 'target-3', 'source': {'id': 3, 'name': 'source-3'}}
]
self.assertEqual(serializer.data, expected)
def test_one_to_one_create(self):
data = {'id': 4, 'name': 'target-4', 'source': {'id': 4, 'name': 'source-4'}}
serializer = self.Serializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-4')
# Ensure (target 4, target_source 4, source 4) are added, and
# everything else is as expected.
queryset = OneToOneTarget.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'source': {'id': 1, 'name': 'source-1'}},
{'id': 2, 'name': 'target-2', 'source': {'id': 2, 'name': 'source-2'}},
{'id': 3, 'name': 'target-3', 'source': {'id': 3, 'name': 'source-3'}},
{'id': 4, 'name': 'target-4', 'source': {'id': 4, 'name': 'source-4'}}
]
self.assertEqual(serializer.data, expected)
def test_one_to_one_create_with_invalid_data(self):
data = {'id': 4, 'name': 'target-4', 'source': {'id': 4}}
serializer = self.Serializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'source': [{'name': ['This field is required.']}]})
def test_one_to_one_update(self):
data = {'id': 3, 'name': 'target-3-updated', 'source': {'id': 3, 'name': 'source-3-updated'}}
instance = OneToOneTarget.objects.get(pk=3)
serializer = self.Serializer(instance, data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-3-updated')
# Ensure (target 3, target_source 3, source 3) are updated,
# and everything else is as expected.
queryset = OneToOneTarget.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'source': {'id': 1, 'name': 'source-1'}},
{'id': 2, 'name': 'target-2', 'source': {'id': 2, 'name': 'source-2'}},
{'id': 3, 'name': 'target-3-updated', 'source': {'id': 3, 'name': 'source-3-updated'}}
]
self.assertEqual(serializer.data, expected)
class ForwardNestedOneToOneTests(TestCase):
def setUp(self):
class OneToOneTargetSerializer(serializers.ModelSerializer):
class Meta:
model = OneToOneTarget
fields = ('id', 'name')
class OneToOneSourceSerializer(serializers.ModelSerializer):
target = OneToOneTargetSerializer()
class Meta:
model = OneToOneSource
fields = ('id', 'name', 'target')
self.Serializer = OneToOneSourceSerializer
for idx in range(1, 4):
target = OneToOneTarget(name='target-%d' % idx)
target.save()
source = OneToOneSource(name='source-%d' % idx, target=target)
source.save()
def test_one_to_one_retrieve(self):
queryset = OneToOneSource.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': {'id': 1, 'name': 'target-1'}},
{'id': 2, 'name': 'source-2', 'target': {'id': 2, 'name': 'target-2'}},
{'id': 3, 'name': 'source-3', 'target': {'id': 3, 'name': 'target-3'}}
]
self.assertEqual(serializer.data, expected)
def test_one_to_one_create(self):
data = {'id': 4, 'name': 'source-4', 'target': {'id': 4, 'name': 'target-4'}}
serializer = self.Serializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-4')
# Ensure (target 4, target_source 4, source 4) are added, and
# everything else is as expected.
queryset = OneToOneSource.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': {'id': 1, 'name': 'target-1'}},
{'id': 2, 'name': 'source-2', 'target': {'id': 2, 'name': 'target-2'}},
{'id': 3, 'name': 'source-3', 'target': {'id': 3, 'name': 'target-3'}},
{'id': 4, 'name': 'source-4', 'target': {'id': 4, 'name': 'target-4'}}
]
self.assertEqual(serializer.data, expected)
def test_one_to_one_create_with_invalid_data(self):
data = {'id': 4, 'name': 'source-4', 'target': {'id': 4}}
serializer = self.Serializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'target': [{'name': ['This field is required.']}]})
def test_one_to_one_update(self):
data = {'id': 3, 'name': 'source-3-updated', 'target': {'id': 3, 'name': 'target-3-updated'}}
instance = OneToOneSource.objects.get(pk=3)
serializer = self.Serializer(instance, data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-3-updated')
# Ensure (target 3, target_source 3, source 3) are updated,
# and everything else is as expected.
queryset = OneToOneSource.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': {'id': 1, 'name': 'target-1'}},
{'id': 2, 'name': 'source-2', 'target': {'id': 2, 'name': 'target-2'}},
{'id': 3, 'name': 'source-3-updated', 'target': {'id': 3, 'name': 'target-3-updated'}}
]
self.assertEqual(serializer.data, expected)
def test_one_to_one_update_to_null(self):
data = {'id': 3, 'name': 'source-3-updated', 'target': None}
instance = OneToOneSource.objects.get(pk=3)
serializer = self.Serializer(instance, data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-3-updated')
self.assertEqual(obj.target, None)
queryset = OneToOneSource.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': {'id': 1, 'name': 'target-1'}},
{'id': 2, 'name': 'source-2', 'target': {'id': 2, 'name': 'target-2'}},
{'id': 3, 'name': 'source-3-updated', 'target': None}
]
self.assertEqual(serializer.data, expected)
# TODO: Nullable 1-1 tests
# def test_one_to_one_delete(self):
# data = {'id': 3, 'name': 'target-3', 'target_source': None}
# instance = OneToOneTarget.objects.get(pk=3)
# serializer = self.Serializer(instance, data=data)
# self.assertTrue(serializer.is_valid())
# serializer.save()
# # Ensure (target_source 3, source 3) are deleted,
# # and everything else is as expected.
# queryset = OneToOneTarget.objects.all()
# serializer = self.Serializer(queryset)
# expected = [
# {'id': 1, 'name': 'target-1', 'source': {'id': 1, 'name': 'source-1'}},
# {'id': 2, 'name': 'target-2', 'source': {'id': 2, 'name': 'source-2'}},
# {'id': 3, 'name': 'target-3', 'source': None}
# ]
# self.assertEqual(serializer.data, expected)
class ReverseNestedOneToManyTests(TestCase):
def setUp(self):
class OneToManySourceSerializer(serializers.ModelSerializer):
class Meta:
model = OneToManySource
fields = ('id', 'name')
class OneToManyTargetSerializer(serializers.ModelSerializer):
sources = OneToManySourceSerializer(many=True, allow_add_remove=True)
class Meta:
model = OneToManyTarget
fields = ('id', 'name', 'sources')
self.Serializer = OneToManyTargetSerializer
target = OneToManyTarget(name='target-1')
target.save()
for idx in range(1, 4):
source = OneToManySource(name='source-%d' % idx, target=target)
source.save()
def test_one_to_many_retrieve(self):
queryset = OneToManyTarget.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'},
{'id': 2, 'name': 'source-2'},
{'id': 3, 'name': 'source-3'}]},
]
self.assertEqual(serializer.data, expected)
def test_one_to_many_create(self):
data = {'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'},
{'id': 2, 'name': 'source-2'},
{'id': 3, 'name': 'source-3'},
{'id': 4, 'name': 'source-4'}]}
instance = OneToManyTarget.objects.get(pk=1)
serializer = self.Serializer(instance, data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-1')
# Ensure source 4 is added, and everything else is as
# expected.
queryset = OneToManyTarget.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'},
{'id': 2, 'name': 'source-2'},
{'id': 3, 'name': 'source-3'},
{'id': 4, 'name': 'source-4'}]}
]
self.assertEqual(serializer.data, expected)
def test_one_to_many_create_with_invalid_data(self):
data = {'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'},
{'id': 2, 'name': 'source-2'},
{'id': 3, 'name': 'source-3'},
{'id': 4}]}
serializer = self.Serializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'sources': [{}, {}, {}, {'name': ['This field is required.']}]})
def test_one_to_many_update(self):
data = {'id': 1, 'name': 'target-1-updated', 'sources': [{'id': 1, 'name': 'source-1-updated'},
{'id': 2, 'name': 'source-2'},
{'id': 3, 'name': 'source-3'}]}
instance = OneToManyTarget.objects.get(pk=1)
serializer = self.Serializer(instance, data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-1-updated')
# Ensure (target 1, source 1) are updated,
# and everything else is as expected.
queryset = OneToManyTarget.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1-updated', 'sources': [{'id': 1, 'name': 'source-1-updated'},
{'id': 2, 'name': 'source-2'},
{'id': 3, 'name': 'source-3'}]}
]
self.assertEqual(serializer.data, expected)
def test_one_to_many_delete(self):
data = {'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'},
{'id': 3, 'name': 'source-3'}]}
instance = OneToManyTarget.objects.get(pk=1)
serializer = self.Serializer(instance, data=data)
self.assertTrue(serializer.is_valid())
serializer.save()
# Ensure source 2 is deleted, and everything else is as
# expected.
queryset = OneToManyTarget.objects.all()
serializer = self.Serializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [{'id': 1, 'name': 'source-1'},
{'id': 3, 'name': 'source-3'}]}
]
self.assertEqual(serializer.data, expected)
| gpl-2.0 |
akaihola/django | tests/modeltests/serializers/tests.py | 6 | 17664 | from __future__ import absolute_import
# -*- coding: utf-8 -*-
import json
from datetime import datetime
from xml.dom import minidom
from StringIO import StringIO
from django.conf import settings
from django.core import serializers
from django.db import transaction, connection
from django.test import TestCase, TransactionTestCase, Approximate
from django.utils import unittest
from .models import (Category, Author, Article, AuthorProfile, Actor, Movie,
Score, Player, Team)
class SerializerRegistrationTests(unittest.TestCase):
def setUp(self):
self.old_SERIALIZATION_MODULES = getattr(settings, 'SERIALIZATION_MODULES', None)
self.old_serializers = serializers._serializers
serializers._serializers = {}
settings.SERIALIZATION_MODULES = {
"json2" : "django.core.serializers.json",
}
def tearDown(self):
serializers._serializers = self.old_serializers
if self.old_SERIALIZATION_MODULES:
settings.SERIALIZATION_MODULES = self.old_SERIALIZATION_MODULES
else:
delattr(settings, 'SERIALIZATION_MODULES')
def test_register(self):
"Registering a new serializer populates the full registry. Refs #14823"
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertIn('json3', public_formats)
self.assertIn('json2', public_formats)
self.assertIn('xml', public_formats)
def test_unregister(self):
"Unregistering a serializer doesn't cause the registry to be repopulated. Refs #14823"
serializers.unregister_serializer('xml')
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertNotIn('xml', public_formats)
self.assertIn('json3', public_formats)
def test_builtin_serializers(self):
"Requesting a list of serializer formats popuates the registry"
all_formats = set(serializers.get_serializer_formats())
public_formats = set(serializers.get_public_serializer_formats())
self.assertIn('xml', all_formats),
self.assertIn('xml', public_formats)
self.assertIn('json2', all_formats)
self.assertIn('json2', public_formats)
self.assertIn('python', all_formats)
self.assertNotIn('python', public_formats)
class SerializersTestBase(object):
@staticmethod
def _comparison_value(value):
return value
def setUp(self):
sports = Category.objects.create(name="Sports")
music = Category.objects.create(name="Music")
op_ed = Category.objects.create(name="Op-Ed")
self.joe = Author.objects.create(name="Joe")
self.jane = Author.objects.create(name="Jane")
self.a1 = Article(
author=self.jane,
headline="Poker has no place on ESPN",
pub_date=datetime(2006, 6, 16, 11, 00)
)
self.a1.save()
self.a1.categories = [sports, op_ed]
self.a2 = Article(
author=self.joe,
headline="Time to reform copyright",
pub_date=datetime(2006, 6, 16, 13, 00, 11, 345)
)
self.a2.save()
self.a2.categories = [music, op_ed]
def test_serialize(self):
"""Tests that basic serialization works."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
self.assertTrue(self._validate_output(serial_str))
def test_serializer_roundtrip(self):
"""Tests that serialized content can be deserialized."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
models = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(len(models), 2)
def test_altering_serialized_output(self):
"""
Tests the ability to create new objects by
modifying serialized content.
"""
old_headline = "Poker has no place on ESPN"
new_headline = "Poker has no place on television"
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
serial_str = serial_str.replace(old_headline, new_headline)
models = list(serializers.deserialize(self.serializer_name, serial_str))
# Prior to saving, old headline is in place
self.assertTrue(Article.objects.filter(headline=old_headline))
self.assertFalse(Article.objects.filter(headline=new_headline))
for model in models:
model.save()
# After saving, new headline is in place
self.assertTrue(Article.objects.filter(headline=new_headline))
self.assertFalse(Article.objects.filter(headline=old_headline))
def test_one_to_one_as_pk(self):
"""
Tests that if you use your own primary key field
(such as a OneToOneField), it doesn't appear in the
serialized field list - it replaces the pk identifier.
"""
profile = AuthorProfile(author=self.joe,
date_of_birth=datetime(1970,1,1))
profile.save()
serial_str = serializers.serialize(self.serializer_name,
AuthorProfile.objects.all())
self.assertFalse(self._get_field_values(serial_str, 'author'))
for obj in serializers.deserialize(self.serializer_name, serial_str):
self.assertEqual(obj.object.pk, self._comparison_value(self.joe.pk))
def test_serialize_field_subset(self):
"""Tests that output can be restricted to a subset of fields"""
valid_fields = ('headline','pub_date')
invalid_fields = ("author", "categories")
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all(),
fields=valid_fields)
for field_name in invalid_fields:
self.assertFalse(self._get_field_values(serial_str, field_name))
for field_name in valid_fields:
self.assertTrue(self._get_field_values(serial_str, field_name))
def test_serialize_unicode(self):
"""Tests that unicode makes the roundtrip intact"""
actor_name = u"Za\u017c\u00f3\u0142\u0107"
movie_title = u'G\u0119\u015bl\u0105 ja\u017a\u0144'
ac = Actor(name=actor_name)
mv = Movie(title=movie_title, actor=ac)
ac.save()
mv.save()
serial_str = serializers.serialize(self.serializer_name, [mv])
self.assertEqual(self._get_field_values(serial_str, "title")[0], movie_title)
self.assertEqual(self._get_field_values(serial_str, "actor")[0], actor_name)
obj_list = list(serializers.deserialize(self.serializer_name, serial_str))
mv_obj = obj_list[0].object
self.assertEqual(mv_obj.title, movie_title)
def test_serialize_superfluous_queries(self):
"""Ensure no superfluous queries are made when serializing ForeignKeys
#17602
"""
ac = Actor(name='Actor name')
ac.save()
mv = Movie(title='Movie title', actor_id=ac.pk)
mv.save()
with self.assertNumQueries(0):
serial_str = serializers.serialize(self.serializer_name, [mv])
def test_serialize_with_null_pk(self):
"""
Tests that serialized data with no primary key results
in a model instance with no id
"""
category = Category(name="Reference")
serial_str = serializers.serialize(self.serializer_name, [category])
pk_value = self._get_pk_values(serial_str)[0]
self.assertFalse(pk_value)
cat_obj = list(serializers.deserialize(self.serializer_name,
serial_str))[0].object
self.assertEqual(cat_obj.id, None)
def test_float_serialization(self):
"""Tests that float values serialize and deserialize intact"""
sc = Score(score=3.4)
sc.save()
serial_str = serializers.serialize(self.serializer_name, [sc])
deserial_objs = list(serializers.deserialize(self.serializer_name,
serial_str))
self.assertEqual(deserial_objs[0].object.score, Approximate(3.4, places=1))
def test_custom_field_serialization(self):
"""Tests that custom fields serialize and deserialize intact"""
team_str = "Spartak Moskva"
player = Player()
player.name = "Soslan Djanaev"
player.rank = 1
player.team = Team(team_str)
player.save()
serial_str = serializers.serialize(self.serializer_name,
Player.objects.all())
team = self._get_field_values(serial_str, "team")
self.assertTrue(team)
self.assertEqual(team[0], team_str)
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(deserial_objs[0].object.team.to_string(),
player.team.to_string())
def test_pre_1000ad_date(self):
"""Tests that year values before 1000AD are properly formatted"""
# Regression for #12524 -- dates before 1000AD get prefixed
# 0's on the year
a = Article.objects.create(
author = self.jane,
headline = "Nobody remembers the early years",
pub_date = datetime(1, 2, 3, 4, 5, 6))
serial_str = serializers.serialize(self.serializer_name, [a])
date_values = self._get_field_values(serial_str, "pub_date")
self.assertEqual(date_values[0].replace('T', ' '), "0001-02-03 04:05:06")
def test_pkless_serialized_strings(self):
"""
Tests that serialized strings without PKs
can be turned into models
"""
deserial_objs = list(serializers.deserialize(self.serializer_name,
self.pkless_str))
for obj in deserial_objs:
self.assertFalse(obj.object.id)
obj.save()
self.assertEqual(Category.objects.all().count(), 4)
class SerializersTransactionTestBase(object):
def test_forward_refs(self):
"""
Tests that objects ids can be referenced before they are
defined in the serialization data.
"""
# The deserialization process needs to be contained
# within a transaction in order to test forward reference
# handling.
transaction.enter_transaction_management()
transaction.managed(True)
objs = serializers.deserialize(self.serializer_name, self.fwd_ref_str)
with connection.constraint_checks_disabled():
for obj in objs:
obj.save()
transaction.commit()
transaction.leave_transaction_management()
for model_cls in (Category, Author, Article):
self.assertEqual(model_cls.objects.all().count(), 1)
art_obj = Article.objects.all()[0]
self.assertEqual(art_obj.categories.all().count(), 1)
self.assertEqual(art_obj.author.name, "Agnes")
class XmlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "xml"
pkless_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.category">
<field type="CharField" name="name">Reference</field>
</object>
</django-objects>"""
@staticmethod
def _comparison_value(value):
# The XML serializer handles everything as strings, so comparisons
# need to be performed on the stringified value
return unicode(value)
@staticmethod
def _validate_output(serial_str):
try:
minidom.parseString(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("object")
for field in fields:
ret_list.append(field.getAttribute("pk"))
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("field")
for field in fields:
if field.getAttribute("name") == field_name:
temp = []
for child in field.childNodes:
temp.append(child.nodeValue)
ret_list.append("".join(temp))
return ret_list
class XmlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "xml"
fwd_ref_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="1" model="serializers.article">
<field to="serializers.author" name="author" rel="ManyToOneRel">1</field>
<field type="CharField" name="headline">Forward references pose no problem</field>
<field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field>
<field to="serializers.category" name="categories" rel="ManyToManyRel">
<object pk="1"></object>
</field>
</object>
<object pk="1" model="serializers.author">
<field type="CharField" name="name">Agnes</field>
</object>
<object pk="1" model="serializers.category">
<field type="CharField" name="name">Reference</field></object>
</django-objects>"""
class JsonSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "json"
pkless_str = """[{"pk": null, "model": "serializers.category", "fields": {"name": "Reference"}}]"""
@staticmethod
def _validate_output(serial_str):
try:
json.loads(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
if field_name in obj_dict["fields"]:
ret_list.append(obj_dict["fields"][field_name])
return ret_list
class JsonSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "json"
fwd_ref_str = """[
{
"pk": 1,
"model": "serializers.article",
"fields": {
"headline": "Forward references pose no problem",
"pub_date": "2006-06-16T15:00:00",
"categories": [1],
"author": 1
}
},
{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
},
{
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
try:
import yaml
except ImportError:
pass
else:
class YamlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
pkless_str = """- fields:
name: Reference
pk: null
model: serializers.category"""
@staticmethod
def _validate_output(serial_str):
try:
yaml.safe_load(StringIO(serial_str))
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
if "fields" in obj_dict and field_name in obj_dict["fields"]:
field_value = obj_dict["fields"][field_name]
# yaml.safe_load will return non-string objects for some
# of the fields we are interested in, this ensures that
# everything comes back as a string
if isinstance(field_value, basestring):
ret_list.append(field_value)
else:
ret_list.append(str(field_value))
return ret_list
class YamlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
| bsd-3-clause |
goliveirab/odoo | addons/analytic_contract_hr_expense/__init__.py | 432 | 1091 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import analytic_contract_hr_expense
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fredericmohr/mitro | mitro-mail/build/venv/lib/python2.7/site-packages/psycopg2/__init__.py | 61 | 5838 | """A Python driver for PostgreSQL
psycopg is a PostgreSQL_ database adapter for the Python_ programming
language. This is version 2, a complete rewrite of the original code to
provide new-style classes for connection and cursor objects and other sweet
candies. Like the original, psycopg 2 was written with the aim of being very
small and fast, and stable as a rock.
Homepage: http://initd.org/projects/psycopg2
.. _PostgreSQL: http://www.postgresql.org/
.. _Python: http://www.python.org/
:Groups:
* `Connections creation`: connect
* `Value objects constructors`: Binary, Date, DateFromTicks, Time,
TimeFromTicks, Timestamp, TimestampFromTicks
"""
# psycopg/__init__.py - initialization of the psycopg module
#
# Copyright (C) 2003-2010 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
# Import modules needed by _psycopg to allow tools like py2exe to do
# their work without bothering about the module dependencies.
# Note: the first internal import should be _psycopg, otherwise the real cause
# of a failed loading of the C module may get hidden, see
# http://archives.postgresql.org/psycopg/2011-02/msg00044.php
# Import the DBAPI-2.0 stuff into top-level module.
from psycopg2._psycopg import BINARY, NUMBER, STRING, DATETIME, ROWID
from psycopg2._psycopg import Binary, Date, Time, Timestamp
from psycopg2._psycopg import DateFromTicks, TimeFromTicks, TimestampFromTicks
from psycopg2._psycopg import Error, Warning, DataError, DatabaseError, ProgrammingError
from psycopg2._psycopg import IntegrityError, InterfaceError, InternalError
from psycopg2._psycopg import NotSupportedError, OperationalError
from psycopg2._psycopg import _connect, apilevel, threadsafety, paramstyle
from psycopg2._psycopg import __version__
from psycopg2 import tz
# Register default adapters.
import psycopg2.extensions as _ext
_ext.register_adapter(tuple, _ext.SQL_IN)
_ext.register_adapter(type(None), _ext.NoneAdapter)
# Register the Decimal adapter here instead of in the C layer.
# This way a new class is registered for each sub-interpreter.
# See ticket #52
try:
from decimal import Decimal
except ImportError:
pass
else:
from psycopg2._psycopg import Decimal as Adapter
_ext.register_adapter(Decimal, Adapter)
del Decimal, Adapter
import re
def _param_escape(s,
re_escape=re.compile(r"([\\'])"),
re_space=re.compile(r'\s')):
"""
Apply the escaping rule required by PQconnectdb
"""
if not s: return "''"
s = re_escape.sub(r'\\\1', s)
if re_space.search(s):
s = "'" + s + "'"
return s
del re
def connect(dsn=None,
database=None, user=None, password=None, host=None, port=None,
connection_factory=None, cursor_factory=None, async=False, **kwargs):
"""
Create a new database connection.
The connection parameters can be specified either as a string:
conn = psycopg2.connect("dbname=test user=postgres password=secret")
or using a set of keyword arguments:
conn = psycopg2.connect(database="test", user="postgres", password="secret")
The basic connection parameters are:
- *dbname*: the database name (only in dsn string)
- *database*: the database name (only as keyword argument)
- *user*: user name used to authenticate
- *password*: password used to authenticate
- *host*: database host address (defaults to UNIX socket if not provided)
- *port*: connection port number (defaults to 5432 if not provided)
Using the *connection_factory* parameter a different class or connections
factory can be specified. It should be a callable object taking a dsn
argument.
Using the *cursor_factory* parameter, a new default cursor factory will be
used by cursor().
Using *async*=True an asynchronous connection will be created.
Any other keyword parameter will be passed to the underlying client
library: the list of supported parameters depends on the library version.
"""
items = []
if database is not None:
items.append(('dbname', database))
if user is not None:
items.append(('user', user))
if password is not None:
items.append(('password', password))
if host is not None:
items.append(('host', host))
if port is not None:
items.append(('port', port))
items.extend([(k, v) for (k, v) in kwargs.iteritems() if v is not None])
if dsn is not None and items:
raise TypeError(
"'%s' is an invalid keyword argument when the dsn is specified"
% items[0][0])
if dsn is None:
if not items:
raise TypeError('missing dsn and no parameters')
else:
dsn = " ".join(["%s=%s" % (k, _param_escape(str(v)))
for (k, v) in items])
conn = _connect(dsn, connection_factory=connection_factory, async=async)
if cursor_factory is not None:
conn.cursor_factory = cursor_factory
return conn
| gpl-3.0 |
haginara/jsunpack-n | exampleImport.py | 23 | 4525 | #!/usr/bin/python
'''
Jsunpackn - A generic JavaScript Unpacker Network Edition
Copyright (C) 2010 Blake Hartstein
http://jsunpack.jeek.org/
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
from hashlib import sha1
import datetime
import socket
import jsunpackn
# Error Reporting to /tmp
socket.setdefaulttimeout(10)
class cmdline_filler:
options = {
'timeout':30,
'redoevaltime':1,
'maxruntime':0,
'urlfetch':'',
'configfile':'options.config',
'saveallfiles':True, # for pcaps?
'saveallexes':False,
'quiet':True,
'verbose':True,
'veryverbose':True,
'graphfile':'',
'debug':False,
'active':True,
'interface':'',
'nojs':False,
'log_ips':'./maliciousips.txt',
'pre':'./pre.js',
'post':'./post.js',
'htmlparse':'./htmlparse.config',
'fasteval':False,
'proxy': '',
'currentproxy': '',
}
def __init__(self, inhash):
self.tmpdir = '/tmp' # these temporary files are necessary for decoding, but you can use any path and they will be deleted afterwards
self.logdir = self.outdir = '' # an empty storage filepath means no directory of output files will be created
self.decoded = '' #NO decoding logfile, otherwise = self.outdir + '/decoded.log'
for item in self.options:
setattr(self, item, self.options[item])
#Feel free to hard code all your files "rules", "rules.ascii", and "htmlparse.config" in this file instead, only problem is updating them
fin = open('rules', 'r')
if fin:
self.rules = fin.read()
fin.close()
fin = open('rules.ascii', 'r')
if fin:
self.rulesAscii = fin.read()
fin.close()
if self.options['htmlparse']:
fin = open(self.options['htmlparse'], 'r')
self.htmlparseconfig = fin.read()
fin.close()
def main(userdata):
'''userdata contains the javascript, html, or pdf data to decode'''
'''if you'd like to do other things with the results, then modify this function'''
HASH = sha1(str(datetime.datetime.now()) + userdata).hexdigest()
options = cmdline_filler(HASH)
root_of_tree = '' # This can be empty but its sometimes useful to specify a filename here
url_or_name = '/' # This can also be empty but if you have the URL, you'd want to set that here
prevRooturl = {} # This can also be empty but if you want to decode something with more context its useful to keep state
js = jsunpackn.jsunpack(root_of_tree, [url_or_name, userdata, root_of_tree], options, prevRooturl)
for url in js.rooturl: # set all the state variables for printing
js.rooturl[url].seen = {}
results = ''
for url in [js.start]: #recursive
print 'The key %s has the following output in recursive mode' % (url)
results = js.rooturl[url].tostring('', True)[0] + '\n'
print results
print 'Note that none of the files are actually created since self.outdir is empty.'
print 'Instead, you could go through each url and look at the decodings that it creates'
for url in js.rooturl:
print 'Looking at key %s, has %d files and %d messages, that follow:' % (url, len(js.rooturl[url].files), len(js.rooturl[url].msg))
for type, hash, data in js.rooturl[url].files:
print 'file type=%s, hash=%s, data=%d bytes' % (type, hash, len(data))
for printable, impact, msg in js.rooturl[url].msg:
print 'output message printable=%d, impact=%d, msg=%s' % (printable, impact, msg)
if __name__ == "__main__":
main('eval("var a=123;");')
| gpl-2.0 |
mattcongy/itshop | docker-images/taigav2/taiga-back/taiga/timeline/permissions.py | 2 | 1431 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taiga.base.api.permissions import TaigaResourcePermission, AllowAny, IsSuperUser
from taiga.permissions.permissions import HasProjectPerm, IsProjectAdmin
class UserTimelinePermission(TaigaResourcePermission):
enought_perms = IsSuperUser()
global_perms = None
retrieve_perms = AllowAny()
class ProjectTimelinePermission(TaigaResourcePermission):
enought_perms = IsProjectAdmin() | IsSuperUser()
global_perms = None
retrieve_perms = HasProjectPerm('view_project')
| mit |
cogeorg/black_rhino | examples/Georg2012/networkx/algorithms/tests/test_euler.py | 32 | 2578 | #!/usr/bin/env python
# run with nose: nosetests -v test_euler.py
from nose.tools import *
import networkx as nx
from networkx import is_eulerian,eulerian_circuit
class TestEuler:
def test_is_eulerian(self):
assert_true(is_eulerian(nx.complete_graph(5)))
assert_true(is_eulerian(nx.complete_graph(7)))
assert_true(is_eulerian(nx.hypercube_graph(4)))
assert_true(is_eulerian(nx.hypercube_graph(6)))
assert_false(is_eulerian(nx.complete_graph(4)))
assert_false(is_eulerian(nx.complete_graph(6)))
assert_false(is_eulerian(nx.hypercube_graph(3)))
assert_false(is_eulerian(nx.hypercube_graph(5)))
assert_false(is_eulerian(nx.petersen_graph()))
assert_false(is_eulerian(nx.path_graph(4)))
def test_is_eulerian2(self):
# not connected
G = nx.Graph()
G.add_nodes_from([1,2,3])
assert_false(is_eulerian(G))
# not strongly connected
G = nx.DiGraph()
G.add_nodes_from([1,2,3])
assert_false(is_eulerian(G))
G = nx.MultiDiGraph()
G.add_edge(1,2)
G.add_edge(2,3)
G.add_edge(2,3)
G.add_edge(3,1)
assert_false(is_eulerian(G))
def test_eulerian_circuit_cycle(self):
G=nx.cycle_graph(4)
edges=list(eulerian_circuit(G,source=0))
nodes=[u for u,v in edges]
assert_equal(nodes,[0,1,2,3])
assert_equal(edges,[(0,1),(1,2),(2,3),(3,0)])
edges=list(eulerian_circuit(G,source=1))
nodes=[u for u,v in edges]
assert_equal(nodes,[1,0,3,2])
assert_equal(edges,[(1,0),(0,3),(3,2),(2,1)])
def test_eulerian_circuit_digraph(self):
G=nx.DiGraph()
G.add_cycle([0,1,2,3])
edges=list(eulerian_circuit(G,source=0))
nodes=[u for u,v in edges]
assert_equal(nodes,[0,1,2,3])
assert_equal(edges,[(0,1),(1,2),(2,3),(3,0)])
edges=list(eulerian_circuit(G,source=1))
nodes=[u for u,v in edges]
assert_equal(nodes,[1,2,3,0])
assert_equal(edges,[(1,2),(2,3),(3,0),(0,1)])
def test_eulerian_circuit_multigraph(self):
G=nx.MultiGraph()
G.add_cycle([0,1,2,3])
G.add_edge(1,2)
G.add_edge(1,2)
edges=list(eulerian_circuit(G,source=0))
nodes=[u for u,v in edges]
assert_equal(nodes,[0,1,2,1,2,3])
assert_equal(edges,[(0,1),(1,2),(2,1),(1,2),(2,3),(3,0)])
@raises(nx.NetworkXError)
def test_not_eulerian(self):
f=list(eulerian_circuit(nx.complete_graph(4)))
| gpl-3.0 |
huchoi/edx-platform | common/lib/xmodule/xmodule/graders.py | 45 | 17335 | import abc
import inspect
import logging
import random
import sys
from collections import namedtuple
log = logging.getLogger("edx.courseware")
# This is a tuple for holding scores, either from problems or sections.
# Section either indicates the name of the problem or the name of the section
Score = namedtuple("Score", "earned possible graded section")
def aggregate_scores(scores, section_name="summary"):
"""
scores: A list of Score objects
returns: A tuple (all_total, graded_total).
all_total: A Score representing the total score summed over all input scores
graded_total: A Score representing the score summed over all graded input scores
"""
total_correct_graded = sum(score.earned for score in scores if score.graded)
total_possible_graded = sum(score.possible for score in scores if score.graded)
total_correct = sum(score.earned for score in scores)
total_possible = sum(score.possible for score in scores)
#regardless of whether or not it is graded
all_total = Score(total_correct,
total_possible,
False,
section_name)
#selecting only graded things
graded_total = Score(total_correct_graded,
total_possible_graded,
True,
section_name)
return all_total, graded_total
def invalid_args(func, argdict):
"""
Given a function and a dictionary of arguments, returns a set of arguments
from argdict that aren't accepted by func
"""
args, _, keywords, _ = inspect.getargspec(func)
if keywords:
return set() # All accepted
return set(argdict) - set(args)
def grader_from_conf(conf):
"""
This creates a CourseGrader from a configuration (such as in course_settings.py).
The conf can simply be an instance of CourseGrader, in which case no work is done.
More commonly, the conf is a list of dictionaries. A WeightedSubsectionsGrader
with AssignmentFormatGrader's or SingleSectionGrader's as subsections will be
generated. Every dictionary should contain the parameters for making either a
AssignmentFormatGrader or SingleSectionGrader, in addition to a 'weight' key.
"""
if isinstance(conf, CourseGrader):
return conf
subgraders = []
for subgraderconf in conf:
subgraderconf = subgraderconf.copy()
weight = subgraderconf.pop("weight", 0)
# NOTE: 'name' used to exist in SingleSectionGrader. We are deprecating SingleSectionGrader
# and converting everything into an AssignmentFormatGrader by adding 'min_count' and
# 'drop_count'. AssignmentFormatGrader does not expect 'name', so if it appears
# in bad_args, go ahead remove it (this causes no errors). Eventually, SingleSectionGrader
# should be completely removed.
name = 'name'
try:
if 'min_count' in subgraderconf:
#This is an AssignmentFormatGrader
subgrader_class = AssignmentFormatGrader
elif name in subgraderconf:
#This is an SingleSectionGrader
subgrader_class = SingleSectionGrader
else:
raise ValueError("Configuration has no appropriate grader class.")
bad_args = invalid_args(subgrader_class.__init__, subgraderconf)
# See note above concerning 'name'.
if bad_args.issuperset({name}):
bad_args = bad_args - {name}
del subgraderconf[name]
if len(bad_args) > 0:
log.warning("Invalid arguments for a subgrader: %s", bad_args)
for key in bad_args:
del subgraderconf[key]
subgrader = subgrader_class(**subgraderconf)
subgraders.append((subgrader, subgrader.category, weight))
except (TypeError, ValueError) as error:
# Add info and re-raise
msg = ("Unable to parse grader configuration:\n " +
str(subgraderconf) +
"\n Error was:\n " + str(error))
raise ValueError(msg), None, sys.exc_info()[2]
return WeightedSubsectionsGrader(subgraders)
class CourseGrader(object):
"""
A course grader takes the totaled scores for each graded section (that a student has
started) in the course. From these scores, the grader calculates an overall percentage
grade. The grader should also generate information about how that score was calculated,
to be displayed in graphs or charts.
A grader has one required method, grade(), which is passed a grade_sheet. The grade_sheet
contains scores for all graded section that the student has started. If a student has
a score of 0 for that section, it may be missing from the grade_sheet. The grade_sheet
is keyed by section format. Each value is a list of Score namedtuples for each section
that has the matching section format.
The grader outputs a dictionary with the following keys:
- percent: Contains a float value, which is the final percentage score for the student.
- section_breakdown: This is a list of dictionaries which provide details on sections
that were graded. These are used for display in a graph or chart. The format for a
section_breakdown dictionary is explained below.
- grade_breakdown: This is a list of dictionaries which provide details on the contributions
of the final percentage grade. This is a higher level breakdown, for when the grade is constructed
of a few very large sections (such as Homeworks, Labs, a Midterm, and a Final). The format for
a grade_breakdown is explained below. This section is optional.
A dictionary in the section_breakdown list has the following keys:
percent: A float percentage for the section.
label: A short string identifying the section. Preferably fixed-length. E.g. "HW 3".
detail: A string explanation of the score. E.g. "Homework 1 - Ohms Law - 83% (5/6)"
category: A string identifying the category. Items with the same category are grouped together
in the display (for example, by color).
prominent: A boolean value indicating that this section should be displayed as more prominent
than other items.
A dictionary in the grade_breakdown list has the following keys:
percent: A float percentage in the breakdown. All percents should add up to the final percentage.
detail: A string explanation of this breakdown. E.g. "Homework - 10% of a possible 15%"
category: A string identifying the category. Items with the same category are grouped together
in the display (for example, by color).
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def grade(self, grade_sheet, generate_random_scores=False):
'''Given a grade sheet, return a dict containing grading information'''
raise NotImplementedError
class WeightedSubsectionsGrader(CourseGrader):
"""
This grader takes a list of tuples containing (grader, category_name, weight) and computes
a final grade by totalling the contribution of each sub grader and multiplying it by the
given weight. For example, the sections may be
[ (homeworkGrader, "Homework", 0.15), (labGrader, "Labs", 0.15), (midtermGrader, "Midterm", 0.30),
(finalGrader, "Final", 0.40) ]
All items in section_breakdown for each subgrader will be combined. A grade_breakdown will be
composed using the score from each grader.
Note that the sum of the weights is not take into consideration. If the weights add up to
a value > 1, the student may end up with a percent > 100%. This allows for sections that
are extra credit.
"""
def __init__(self, sections):
self.sections = sections
def grade(self, grade_sheet, generate_random_scores=False):
total_percent = 0.0
section_breakdown = []
grade_breakdown = []
for subgrader, category, weight in self.sections:
subgrade_result = subgrader.grade(grade_sheet, generate_random_scores)
weighted_percent = subgrade_result['percent'] * weight
section_detail = u"{0} = {1:.2%} of a possible {2:.2%}".format(category, weighted_percent, weight)
total_percent += weighted_percent
section_breakdown += subgrade_result['section_breakdown']
grade_breakdown.append({'percent': weighted_percent, 'detail': section_detail, 'category': category})
return {'percent': total_percent,
'section_breakdown': section_breakdown,
'grade_breakdown': grade_breakdown}
class SingleSectionGrader(CourseGrader):
"""
This grades a single section with the format 'type' and the name 'name'.
If the name is not appropriate for the short short_label or category, they each may
be specified individually.
"""
def __init__(self, type, name, short_label=None, category=None):
self.type = type
self.name = name
self.short_label = short_label or name
self.category = category or name
def grade(self, grade_sheet, generate_random_scores=False):
found_score = None
if self.type in grade_sheet:
for score in grade_sheet[self.type]:
if score.section == self.name:
found_score = score
break
if found_score or generate_random_scores:
if generate_random_scores: # for debugging!
earned = random.randint(2, 15)
possible = random.randint(earned, 15)
else: # We found the score
earned = found_score.earned
possible = found_score.possible
percent = earned / float(possible)
detail = u"{name} - {percent:.0%} ({earned:.3n}/{possible:.3n})".format(
name=self.name,
percent=percent,
earned=float(earned),
possible=float(possible)
)
else:
percent = 0.0
detail = u"{name} - 0% (?/?)".format(name=self.name)
breakdown = [{'percent': percent, 'label': self.short_label,
'detail': detail, 'category': self.category, 'prominent': True}]
return {'percent': percent,
'section_breakdown': breakdown,
#No grade_breakdown here
}
class AssignmentFormatGrader(CourseGrader):
"""
Grades all sections matching the format 'type' with an equal weight. A specified
number of lowest scores can be dropped from the calculation. The minimum number of
sections in this format must be specified (even if those sections haven't been
written yet).
min_count defines how many assignments are expected throughout the course. Placeholder
scores (of 0) will be inserted if the number of matching sections in the course is < min_count.
If there number of matching sections in the course is > min_count, min_count will be ignored.
show_only_average is to suppress the display of each assignment in this grader and instead
only show the total score of this grader in the breakdown.
hide_average is to suppress the display of the total score in this grader and instead
only show each assignment in this grader in the breakdown.
If there is only a single assignment in this grader, then it acts like a SingleSectionGrader
and returns only one entry for the grader. Since the assignment and the total are the same,
the total is returned but is not labeled as an average.
category should be presentable to the user, but may not appear. When the grade breakdown is
displayed, scores from the same category will be similar (for example, by color).
section_type is a string that is the type of a singular section. For example, for Labs it
would be "Lab". This defaults to be the same as category.
short_label is similar to section_type, but shorter. For example, for Homework it would be
"HW".
starting_index is the first number that will appear. For example, starting_index=3 and
min_count = 2 would produce the labels "Assignment 3", "Assignment 4"
"""
def __init__(self, type, min_count, drop_count, category=None, section_type=None, short_label=None,
show_only_average=False, hide_average=False, starting_index=1):
self.type = type
self.min_count = min_count
self.drop_count = drop_count
self.category = category or self.type
self.section_type = section_type or self.type
self.short_label = short_label or self.type
self.show_only_average = show_only_average
self.starting_index = starting_index
self.hide_average = hide_average
def grade(self, grade_sheet, generate_random_scores=False):
def total_with_drops(breakdown, drop_count):
'''calculates total score for a section while dropping lowest scores'''
#create an array of tuples with (index, mark), sorted by mark['percent'] descending
sorted_breakdown = sorted(enumerate(breakdown), key=lambda x: -x[1]['percent'])
# A list of the indices of the dropped scores
dropped_indices = []
if drop_count > 0:
dropped_indices = [x[0] for x in sorted_breakdown[-drop_count:]]
aggregate_score = 0
for index, mark in enumerate(breakdown):
if index not in dropped_indices:
aggregate_score += mark['percent']
if (len(breakdown) - drop_count > 0):
aggregate_score /= len(breakdown) - drop_count
return aggregate_score, dropped_indices
#Figure the homework scores
scores = grade_sheet.get(self.type, [])
breakdown = []
for i in range(max(self.min_count, len(scores))):
if i < len(scores) or generate_random_scores:
if generate_random_scores: # for debugging!
earned = random.randint(2, 15)
possible = random.randint(earned, 15)
section_name = "Generated"
else:
earned = scores[i].earned
possible = scores[i].possible
section_name = scores[i].section
percentage = earned / float(possible)
summary_format = u"{section_type} {index} - {name} - {percent:.0%} ({earned:.3n}/{possible:.3n})"
summary = summary_format.format(
index=i + self.starting_index,
section_type=self.section_type,
name=section_name,
percent=percentage,
earned=float(earned),
possible=float(possible)
)
else:
percentage = 0
summary = u"{section_type} {index} Unreleased - 0% (?/?)".format(
index=i + self.starting_index,
section_type=self.section_type
)
short_label = u"{short_label} {index:02d}".format(
index=i + self.starting_index,
short_label=self.short_label
)
breakdown.append({'percent': percentage, 'label': short_label,
'detail': summary, 'category': self.category})
total_percent, dropped_indices = total_with_drops(breakdown, self.drop_count)
for dropped_index in dropped_indices:
breakdown[dropped_index]['mark'] = {'detail': u"The lowest {drop_count} {section_type} scores are dropped."
.format(drop_count=self.drop_count, section_type=self.section_type)}
if len(breakdown) == 1:
# if there is only one entry in a section, suppress the existing individual entry and the average,
# and just display a single entry for the section. That way it acts automatically like a
# SingleSectionGrader.
total_detail = u"{section_type} = {percent:.0%}".format(percent=total_percent,
section_type=self.section_type)
total_label = u"{short_label}".format(short_label=self.short_label)
breakdown = [{'percent': total_percent, 'label': total_label,
'detail': total_detail, 'category': self.category, 'prominent': True}, ]
else:
total_detail = u"{section_type} Average = {percent:.0%}".format(
percent=total_percent,
section_type=self.section_type
)
total_label = u"{short_label} Avg".format(short_label=self.short_label)
if self.show_only_average:
breakdown = []
if not self.hide_average:
breakdown.append({'percent': total_percent, 'label': total_label,
'detail': total_detail, 'category': self.category, 'prominent': True})
return {'percent': total_percent,
'section_breakdown': breakdown,
#No grade_breakdown here
}
| agpl-3.0 |
BartoszCichecki/onlinepython | onlinepython/pypy-2.4.0-win32/lib_pypy/_functools.py | 1 | 1573 | """ Supplies the internal functions for functools.py in the standard library """
# reduce() has moved to _functools in Python 2.6+.
reduce = reduce
class partial(object):
"""
partial(func, *args, **keywords) - new function with partial application
of the given arguments and keywords.
"""
def __init__(self, func, *args, **keywords):
if not callable(func):
raise TypeError("the first argument must be callable")
self._func = func
self._args = args
self._keywords = keywords or None
def __delattr__(self, key):
if key == '__dict__':
raise TypeError("a partial object's dictionary may not be deleted")
object.__delattr__(self, key)
@property
def func(self):
return self._func
@property
def args(self):
return self._args
@property
def keywords(self):
return self._keywords
def __call__(self, *fargs, **fkeywords):
if self.keywords is not None:
fkeywords = dict(self.keywords, **fkeywords)
return self.func(*(self.args + fargs), **fkeywords)
def __reduce__(self):
d = dict((k, v) for k, v in self.__dict__.iteritems() if k not in
('_func', '_args', '_keywords'))
if len(d) == 0:
d = None
return (type(self), (self.func,),
(self.func, self.args, self.keywords, d))
def __setstate__(self, state):
self._func, self._args, self._keywords, d = state
if d is not None:
self.__dict__.update(d)
| gpl-2.0 |
proudlygeek/proudlygeek-blog | markdown/extensions/def_list.py | 126 | 3192 | #!/usr/bin/env Python
"""
Definition List Extension for Python-Markdown
=============================================
Added parsing of Definition Lists to Python-Markdown.
A simple example:
Apple
: Pomaceous fruit of plants of the genus Malus in
the family Rosaceae.
: An american computer company.
Orange
: The fruit of an evergreen tree of the genus Citrus.
Copyright 2008 - [Waylan Limberg](http://achinghead.com)
"""
import markdown, re
from markdown import etree
class DefListProcessor(markdown.blockprocessors.BlockProcessor):
""" Process Definition Lists. """
RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
terms = [l.strip() for l in block[:m.start()].split('\n') if l.strip()]
d, theRest = self.detab(block[m.end():])
if d:
d = '%s\n%s' % (m.group(2), d)
else:
d = m.group(2)
#import ipdb; ipdb.set_trace()
sibling = self.lastChild(parent)
if not terms and sibling.tag == 'p':
# The previous paragraph contains the terms
state = 'looselist'
terms = sibling.text.split('\n')
parent.remove(sibling)
# Aquire new sibling
sibling = self.lastChild(parent)
else:
state = 'list'
if sibling and sibling.tag == 'dl':
# This is another item on an existing list
dl = sibling
if len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
state = 'looselist'
else:
# This is a new list
dl = etree.SubElement(parent, 'dl')
# Add terms
for term in terms:
dt = etree.SubElement(dl, 'dt')
dt.text = term
# Add definition
self.parser.state.set(state)
dd = etree.SubElement(dl, 'dd')
self.parser.parseBlocks(dd, [d])
self.parser.state.reset()
if theRest:
blocks.insert(0, theRest)
class DefListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
""" Process indented children of definition list items. """
ITEM_TYPES = ['dd']
LIST_TYPES = ['dl']
def create_item(parent, block):
""" Create a new dd and parse the block with it as the parent. """
dd = markdown.etree.SubElement(parent, 'dd')
self.parser.parseBlocks(dd, [block])
class DefListExtension(markdown.Extension):
""" Add definition lists to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of DefListProcessor to BlockParser. """
md.parser.blockprocessors.add('defindent',
DefListIndentProcessor(md.parser),
'>indent')
md.parser.blockprocessors.add('deflist',
DefListProcessor(md.parser),
'>ulist')
def makeExtension(configs={}):
return DefListExtension(configs=configs)
| mit |
mluo613/osf.io | scripts/migration/migrate_deleted_wikis.py | 18 | 1596 | # -*- coding: utf-8 -*-
import logging
from copy import deepcopy
import sys
from modularodm import Q
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.models import NodeLog
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def get_targets():
return NodeLog.find(Q('action', 'eq', NodeLog.WIKI_DELETED))
def migrate(targets, dry_run=True):
# iterate over targets
count = 0
for log in targets:
node = log.node
logger.debug('Checking node {}'.format(node._id))
versions = node.wiki_pages_versions
original_versions = deepcopy(versions)
current = node.wiki_pages_current
updated_versions = {}
for wiki in versions:
if wiki in current:
updated_versions[wiki] = versions[wiki]
if original_versions != updated_versions:
logger.info('Updating wiki_pages_versions from {} to {}'.format(original_versions, updated_versions))
node.wiki_pages_versions = updated_versions
node.save()
count += 1
logger.info('Migrated {} nodes'.format(count))
def main():
dry_run = False
if '--dry' in sys.argv:
dry_run = True
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
with TokuTransaction():
migrate(targets=get_targets(), dry_run=dry_run)
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
if __name__ == '__main__':
main()
| apache-2.0 |
kou/zulip | zerver/webhooks/beeminder/tests.py | 3 | 1441 | from typing import Any
from unittest.mock import patch
from zerver.lib.test_classes import WebhookTestCase
class BeeminderHookTests(WebhookTestCase):
STREAM_NAME = 'beeminder'
URL_TEMPLATE = "/api/v1/external/beeminder?api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = "beeminder"
@patch('zerver.webhooks.beeminder.view.time.time')
def test_beeminder_derail(self, time: Any) -> None:
time.return_value = 1517739100 # 5.6 hours from fixture value
expected_topic = "beekeeper"
expected_message = """
You are going to derail from goal **gainweight** in **5.6 hours**. You need **+2 in 7 days (60)** to avoid derailing.
* Pledge: **0$** :relieved:
""".strip()
self.check_webhook(
"derail",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
@patch('zerver.webhooks.beeminder.view.time.time')
def test_beeminder_derail_worried(self, time: Any) -> None:
time.return_value = 1517739100 # 5.6 hours from fixture value
expected_topic = "beekeeper"
expected_message = """
You are going to derail from goal **gainweight** in **5.6 hours**. You need **+2 in 7 days (60)** to avoid derailing.
* Pledge: **5$** :worried:
""".strip()
self.check_webhook(
"derail_worried", expected_topic, expected_message, content_type="application/json"
)
| apache-2.0 |
joomel1/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/handlers/statusbubble_unittest.py | 128 | 2482 | # Copyright (C) 2010 Google, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from handlers.statusbubble import StatusBubble
from model.queues import Queue
class MockAttachment(object):
def __init__(self):
self.id = 1
def status_for_queue(self, queue):
return None
def position_in_queue(self, queue):
return 1
class StatusBubbleTest(unittest.TestCase):
def test_build_bubble(self):
bubble = StatusBubble()
queue = Queue("mac-ews")
attachment = MockAttachment()
bubble_dict = bubble._build_bubble(queue, attachment, 1)
# FIXME: assertDictEqual (in Python 2.7) would be better to use here.
self.assertEqual(bubble_dict["name"], "mac")
self.assertEqual(bubble_dict["attachment_id"], 1)
self.assertEqual(bubble_dict["queue_position"], 1)
self.assertEqual(bubble_dict["state"], "none")
self.assertEqual(bubble_dict["status"], None)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
dholbach/snapcraft | integration_tests/test_tour.py | 9 | 1727 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import integration_tests
class TourTestCase(integration_tests.TestCase):
def test_tour_no_arg(self):
output = self.run_snapcraft('tour')
self.assertTrue(os.path.isdir('snapcraft-tour'),
'Tour directory was created')
self.assertIn('Snapcraft tour initialized in ./snapcraft-tour/',
output)
def test_tour_with_relative_dir(self):
dest_dir = 'foo'
output = self.run_snapcraft(['tour', dest_dir])
self.assertTrue(os.path.isdir(dest_dir),
'Tour directory was created')
self.assertIn('Snapcraft tour initialized in {}'.format(dest_dir),
output)
def test_tour_with_absolute_dir(self):
dest_dir = os.path.abspath('foo')
output = self.run_snapcraft(['tour', dest_dir])
self.assertTrue(os.path.isdir(dest_dir),
'Tour directory was created')
self.assertIn('Snapcraft tour initialized in {}'.format(dest_dir),
output)
| gpl-3.0 |
yan12125/youtube-dl | youtube_dl/extractor/xminus.py | 87 | 2937 | # coding: utf-8
from __future__ import unicode_literals
import re
import time
from .common import InfoExtractor
from ..compat import (
compat_ord,
)
from ..utils import (
int_or_none,
parse_duration,
)
class XMinusIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?x-minus\.org/track/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://x-minus.org/track/4542/%D0%BF%D0%B5%D1%81%D0%B5%D0%BD%D0%BA%D0%B0-%D1%88%D0%BE%D1%84%D0%B5%D1%80%D0%B0.html',
'md5': '401a15f2d2dcf6d592cb95528d72a2a8',
'info_dict': {
'id': '4542',
'ext': 'mp3',
'title': 'Леонид Агутин-Песенка шофёра',
'duration': 156,
'tbr': 320,
'filesize_approx': 5900000,
'view_count': int,
'description': 'md5:03238c5b663810bc79cf42ef3c03e371',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
artist = self._html_search_regex(
r'<a[^>]+href="/artist/\d+">([^<]+)</a>', webpage, 'artist')
title = artist + '-' + self._html_search_regex(
r'<span[^>]+class="minustrack-full-title(?:\s+[^"]+)?"[^>]*>([^<]+)', webpage, 'title')
duration = parse_duration(self._html_search_regex(
r'<span[^>]+class="player-duration(?:\s+[^"]+)?"[^>]*>([^<]+)',
webpage, 'duration', fatal=False))
mobj = re.search(
r'<div[^>]+class="dw-info(?:\s+[^"]+)?"[^>]*>(?P<tbr>\d+)\s*кбит/c\s+(?P<filesize>[0-9.]+)\s*мб</div>',
webpage)
tbr = filesize_approx = None
if mobj:
filesize_approx = float(mobj.group('filesize')) * 1000000
tbr = float(mobj.group('tbr'))
view_count = int_or_none(self._html_search_regex(
r'<span><[^>]+class="icon-chart-bar".*?>(\d+)</span>',
webpage, 'view count', fatal=False))
description = self._html_search_regex(
r'(?s)<pre[^>]+id="lyrics-original"[^>]*>(.*?)</pre>',
webpage, 'song lyrics', fatal=False)
if description:
description = re.sub(' *\r *', '\n', description)
k = self._search_regex(
r'<div[^>]+id="player-bottom"[^>]+data-k="([^"]+)">', webpage,
'encoded data')
h = time.time() / 3600
a = sum(map(int, [compat_ord(c) for c in k])) + int(video_id) + h
video_url = 'http://x-minus.me/dl/minus?id=%s&tkn2=%df%d' % (video_id, a, h)
return {
'id': video_id,
'title': title,
'url': video_url,
# The extension is unknown until actual downloading
'ext': 'mp3',
'duration': duration,
'filesize_approx': filesize_approx,
'tbr': tbr,
'view_count': view_count,
'description': description,
}
| unlicense |
mwalli/spark-cloudant | sql-cloudant/examples/python/CloudantApp.py | 2 | 2111 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("Cloudant Spark SQL Example in Python using temp tables")\
.config("cloudant.host","ACCOUNT.cloudant.com")\
.config("cloudant.username", "USERNAME")\
.config("cloudant.password","PASSWORD")\
.getOrCreate()
# ***1. Loading temp table from Cloudant db
spark.sql(" CREATE TEMPORARY TABLE airportTable USING org.apache.bahir.cloudant OPTIONS ( database 'n_airportcodemapping')")
airportData = spark.sql("SELECT _id, airportName FROM airportTable WHERE _id >= 'CAA' AND _id <= 'GAA' ORDER BY _id")
airportData.printSchema()
print ('Total # of rows in airportData: ' + str(airportData.count()))
for code in airportData.collect():
print (code._id)
# ***2. Loading temp table from Cloudant search index
print ('About to test org.apache.bahir.cloudant for flight with index')
spark.sql(" CREATE TEMPORARY TABLE flightTable1 USING org.apache.bahir.cloudant OPTIONS ( database 'n_flight', index '_design/view/_search/n_flights')")
flightData = spark.sql("SELECT flightSegmentId, scheduledDepartureTime FROM flightTable1 WHERE flightSegmentId >'AA9' AND flightSegmentId<'AA95'")
flightData.printSchema()
for code in flightData.collect():
print ('Flight {0} on {1}'.format(code.flightSegmentId, code.scheduledDepartureTime))
| apache-2.0 |
wanderine/nipype | nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py | 9 | 1520 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.dti import TractSkeleton
def test_TractSkeleton_inputs():
input_map = dict(alt_data_file=dict(argstr='-a %s',
),
alt_skeleton=dict(argstr='-s %s',
),
args=dict(argstr='%s',
),
data_file=dict(),
distance_map=dict(),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='-i %s',
mandatory=True,
),
output_type=dict(),
project_data=dict(argstr='-p %.3f %s %s %s %s',
requires=['threshold', 'distance_map', 'data_file'],
),
projected_data=dict(),
search_mask_file=dict(xor=['use_cingulum_mask'],
),
skeleton_file=dict(argstr='-o %s',
),
terminal_output=dict(nohash=True,
),
threshold=dict(),
use_cingulum_mask=dict(usedefault=True,
xor=['search_mask_file'],
),
)
inputs = TractSkeleton.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_TractSkeleton_outputs():
output_map = dict(projected_data=dict(),
skeleton_file=dict(),
)
outputs = TractSkeleton.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
DavidLP/home-assistant | homeassistant/components/gtt/sensor.py | 7 | 3076 | """Sensor to get GTT's timetable for a stop."""
import logging
from datetime import timedelta, datetime
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import DEVICE_CLASS_TIMESTAMP
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_STOP = 'stop'
CONF_BUS_NAME = 'bus_name'
ICON = 'mdi:train'
SCAN_INTERVAL = timedelta(minutes=2)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_STOP): cv.string,
vol.Optional(CONF_BUS_NAME): cv.string
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Gtt platform."""
stop = config[CONF_STOP]
bus_name = config.get(CONF_BUS_NAME)
add_entities([GttSensor(stop, bus_name)], True)
class GttSensor(Entity):
"""Representation of a Gtt Sensor."""
def __init__(self, stop, bus_name):
"""Initialize the Gtt sensor."""
self.data = GttData(stop, bus_name)
self._state = None
self._name = 'Stop {}'.format(stop)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon of the sensor."""
return ICON
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_class(self):
"""Return the device class."""
return DEVICE_CLASS_TIMESTAMP
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
attr = {
'bus_name': self.data.state_bus['bus_name']
}
return attr
def update(self):
"""Update device state."""
self.data.get_data()
next_time = get_datetime(self.data.state_bus)
self._state = next_time.isoformat()
class GttData:
"""Inteface to PyGTT."""
def __init__(self, stop, bus_name):
"""Initialize the GttData class."""
from pygtt import PyGTT
self._pygtt = PyGTT()
self._stop = stop
self._bus_name = bus_name
self.bus_list = {}
self.state_bus = {}
def get_data(self):
"""Get the data from the api."""
self.bus_list = self._pygtt.get_by_stop(self._stop)
self.bus_list.sort(key=get_datetime)
if self._bus_name is not None:
self.state_bus = self.get_bus_by_name()
return
self.state_bus = self.bus_list[0]
def get_bus_by_name(self):
"""Get the bus by name."""
for bus in self.bus_list:
if bus['bus_name'] == self._bus_name:
return bus
def get_datetime(bus):
"""Get the datetime from a bus."""
bustime = datetime.strptime(bus['time'][0]['run'], "%H:%M")
now = datetime.now()
bustime = bustime.replace(year=now.year, month=now.month, day=now.day)
if bustime < now:
bustime = bustime + timedelta(days=1)
return bustime
| apache-2.0 |
tizzybec/mapbox-studio | scripts/generate-s3-listing.py | 9 | 1031 | #!/usr/bin/env python
import fileinput
import os
base_url = 'https://mapbox.s3.amazonaws.com/mapbox-studio'
html_start = '''
<html>
<head><title>Mapbox Studio downloads</title></head>
<body bgcolor="white">
<h1>Mapbox Studio downloads</h1><hr><pre>
'''
html_end = '''
</pre><hr></body>
</html>
'''
line_template = '%(date)s %(time)s %(size)s <a href="%(name)s">%(name)s</a> build: <a href="https://github.com/mapbox/mapbox-studio/commit/%(gitSHA)s">%(gitSHA)s</a>'
print html_start
downloads = {}
for line in fileinput.input():
stripped = line.strip()
if 'mapbox-studio-' in stripped:
parts = stripped.split(' ')
date = parts[0]
time = parts[1]
size = '%s MB' % (int(parts[3])/1000000)
name = parts[4]
gitSHA = '-'.join(os.path.splitext(name)[0].split('-')[4:])
downloads[date+name] = line_template % (locals())
sorted_by_date = sorted(downloads, key=lambda key: downloads[key], reverse=True)
for dl in sorted_by_date: print downloads[dl]
print html_end | bsd-3-clause |
openstack/ironic | ironic/tests/unit/drivers/modules/intel_ipmi/test_management.py | 1 | 4164 | # Copyright 2015, Cisco Systems.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules import ipmitool
from ironic.tests.unit.drivers.modules.intel_ipmi import base
class IntelIPMIManagementTestCase(base.IntelIPMITestCase):
def test_configure_intel_speedselect_empty(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(
exception.InvalidParameterValue,
task.driver.management.configure_intel_speedselect,
task)
@mock.patch.object(ipmitool, "send_raw", spec_set=True,
autospec=True)
def test_configure_intel_speedselect(self, send_raw_mock):
send_raw_mock.return_value = [None, None]
config = {"intel_speedselect_config": "0x02", "socket_count": 1}
with task_manager.acquire(self.context, self.node.uuid) as task:
ret = task.driver.management.configure_intel_speedselect(task,
**config)
self.assertIsNone(ret)
send_raw_mock.assert_called_once_with(task,
'0x2c 0x41 0x04 0x00 0x00 0x02')
@mock.patch.object(ipmitool, "send_raw", spec_set=True,
autospec=True)
def test_configure_intel_speedselect_more_socket(self, send_raw_mock):
send_raw_mock.return_value = [None, None]
config = {"intel_speedselect_config": "0x02", "socket_count": 4}
with task_manager.acquire(self.context, self.node.uuid) as task:
ret = task.driver.management.configure_intel_speedselect(task,
**config)
self.assertIsNone(ret)
self.assertEqual(send_raw_mock.call_count, 4)
calls = [
mock.call(task, '0x2c 0x41 0x04 0x00 0x00 0x02'),
mock.call(task, '0x2c 0x41 0x04 0x00 0x01 0x02'),
mock.call(task, '0x2c 0x41 0x04 0x00 0x02 0x02'),
mock.call(task, '0x2c 0x41 0x04 0x00 0x03 0x02')
]
send_raw_mock.assert_has_calls(calls)
@mock.patch.object(ipmitool, "send_raw", spec_set=True,
autospec=True)
def test_configure_intel_speedselect_error(self, send_raw_mock):
send_raw_mock.side_effect = exception.IPMIFailure('err')
config = {"intel_speedselect_config": "0x02", "socket_count": 1}
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaisesRegex(
exception.IPMIFailure,
"Failed to set Intel SST-PP configuration",
task.driver.management.configure_intel_speedselect,
task, **config)
def test_configure_intel_speedselect_invalid_input(self):
config = {"intel_speedselect_config": "0", "socket_count": 1}
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(
exception.InvalidParameterValue,
task.driver.management.configure_intel_speedselect,
task, **config)
for value in (-1, None):
config = {"intel_speedselect_config": "0x00",
"socket_count": value}
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(
exception.InvalidParameterValue,
task.driver.management.configure_intel_speedselect,
task, **config)
| apache-2.0 |
factorlibre/purchase-workflow | purchase_request_to_rfq/models/purchase_order.py | 5 | 4365 | # -*- coding: utf-8 -*-
# © 2015 Eficent Business and IT Consulting Services S.L.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from openerp import _, api, exceptions, fields, models
class PurchaseOrder(models.Model):
_inherit = "purchase.order"
@api.model
def _purchase_request_confirm_message_content(self, po, request,
request_dict):
if not request_dict:
request_dict = {}
title = _('Order confirmation %s for your Request %s') % (
po.name, request.name)
message = '<h3>%s</h3><ul>' % title
message += _('The following requested items from Purchase Request %s '
'have now been confirmed in Purchase Order %s:') % (
request.name, po.name)
for line in request_dict.values():
message += _(
'<li><b>%s</b>: Ordered quantity %s %s, Planned date %s</li>'
) % (line['name'],
line['product_qty'],
line['product_uom'],
line['date_planned'],
)
message += '</ul>'
return message
@api.multi
def _purchase_request_confirm_message(self):
request_obj = self.env['purchase.request']
for po in self:
requests_dict = {}
for line in po.order_line:
for request_line in line.purchase_request_lines:
request_id = request_line.request_id.id
if request_id not in requests_dict:
requests_dict[request_id] = {}
date_planned = "%s" % line.date_planned
data = {
'name': request_line.name,
'product_qty': line.product_qty,
'product_uom': line.product_uom.name,
'date_planned': date_planned,
}
requests_dict[request_id][request_line.id] = data
for request_id in requests_dict:
request = request_obj.browse(request_id)
message = self._purchase_request_confirm_message_content(
po, request, requests_dict[request_id])
request.message_post(body=message, subtype='mail.mt_comment')
return True
@api.multi
def _purchase_request_line_check(self):
for po in self:
for line in po.order_line:
for request_line in line.purchase_request_lines:
if request_line.purchase_state == 'done':
raise exceptions.Warning(
_('Purchase Request %s has already '
'been completed') % request_line.request_id.name)
return True
@api.multi
def wkf_confirm_order(self):
self._purchase_request_line_check()
res = super(PurchaseOrder, self).wkf_confirm_order()
self._purchase_request_confirm_message()
return res
class PurchaseOrderLine(models.Model):
_inherit = "purchase.order.line"
@api.multi
def _compute_has_purchase_request_lines(self):
for rec in self:
rec.has_purchase_request_lines = bool(rec.purchase_request_lines)
purchase_request_lines = fields.Many2many(
'purchase.request.line',
'purchase_request_purchase_order_line_rel',
'purchase_order_line_id',
'purchase_request_line_id',
'Purchase Request Lines', readonly=True, copy=False)
has_purchase_request_lines = fields.Boolean(
compute="_compute_has_purchase_request_lines",
string="Has Purchase Request Lines")
@api.multi
def action_openRequestLineTreeView(self):
"""
:return dict: dictionary value for created view
"""
request_line_ids = []
for line in self:
request_line_ids = [request_line.id for request_line
in line.purchase_request_lines]
domain = [('id', 'in', request_line_ids)]
return {'name': _('Purchase Request Lines'),
'type': 'ir.actions.act_window',
'res_model': 'purchase.request.line',
'view_type': 'form',
'view_mode': 'tree,form',
'domain': domain}
| agpl-3.0 |
gotomypc/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/f95.py | 61 | 2043 | """engine.SCons.Tool.f95
Tool-specific initialization for the generic Posix f95 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/f95.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Defaults
import SCons.Tool
import SCons.Util
import fortran
from SCons.Tool.FortranCommon import add_all_to_env, add_f95_to_env
compilers = ['f95']
def generate(env):
add_all_to_env(env)
add_f95_to_env(env)
fcomp = env.Detect(compilers) or 'f95'
env['F95'] = fcomp
env['SHF95'] = fcomp
env['FORTRAN'] = fcomp
env['SHFORTRAN'] = fcomp
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
rspavel/spack | var/spack/repos/builtin/packages/redundans/package.py | 3 | 1801 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import glob
class Redundans(Package):
"""Redundans pipeline assists an assembly of heterozygous genomes."""
homepage = "https://github.com/Gabaldonlab/redundans"
url = "https://github.com/Gabaldonlab/redundans/archive/v0.13c.tar.gz"
git = "https://github.com/Gabaldonlab/redundans.git"
version('0.14a', commit='a20215a862aed161cbfc79df9133206156a1e9f0')
version('0.13c', sha256='26d48f27a32678d94c1d00cb3b8991d74891d6cad64a94569901ff9607a7a736')
depends_on('python', type=('build', 'run'))
depends_on('py-pyscaf', type=('build', 'run'))
depends_on('py-fastaindex', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('perl', type=('build', 'run'))
depends_on('sspace-standard')
depends_on('bwa')
depends_on('last')
depends_on('gapcloser')
depends_on('parallel')
depends_on('snap-berkeley@1.0beta.18:', type=('build', 'run'))
def install(self, spec, prefix):
sspace_location = join_path(spec['sspace-standard'].prefix,
'SSPACE_Standard_v3.0.pl')
filter_file(r'sspacebin = os.path.join(.*)$',
'sspacebin = \'' + sspace_location + '\'',
'redundans.py')
binfiles = ['redundans.py', 'bin/filterReads.py']
binfiles.extend(glob.glob('bin/fast?2*.py'))
# new internal dep with 0.14a
if spec.satisfies('@0.14a:'):
binfiles.append('bin/denovo.py')
mkdirp(prefix.bin)
for f in binfiles:
install(f, prefix.bin)
| lgpl-2.1 |
inspyration/odoo | openerp/tools/convert.py | 3 | 41750 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import cStringIO
import csv
import logging
import os.path
import pickle
import re
import sys
# for eval context:
import time
import openerp
import openerp.release
import openerp.workflow
from yaml_import import convert_yaml_import
import assertion_report
_logger = logging.getLogger(__name__)
try:
import pytz
except:
_logger.warning('could not find pytz library, please install it')
class pytzclass(object):
all_timezones=[]
pytz=pytzclass()
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from lxml import etree, builder
import misc
from config import config
from translate import _
# List of etree._Element subclasses that we choose to ignore when parsing XML.
from misc import SKIPPED_ELEMENT_TYPES
from misc import unquote
from openerp import SUPERUSER_ID
# Import of XML records requires the unsafe eval as well,
# almost everywhere, which is ok because it supposedly comes
# from trusted data, but at least we make it obvious now.
unsafe_eval = eval
from safe_eval import safe_eval as eval
class ParseError(Exception):
def __init__(self, msg, text, filename, lineno):
self.msg = msg
self.text = text
self.filename = filename
self.lineno = lineno
def __str__(self):
return '"%s" while parsing %s:%s, near\n%s' \
% (self.msg, self.filename, self.lineno, self.text)
def _ref(self, cr):
return lambda x: self.id_get(cr, x)
def _obj(pool, cr, uid, model_str, context=None):
model = pool[model_str]
return lambda x: model.browse(cr, uid, x, context=context)
def _get_idref(self, cr, uid, model_str, context, idref):
idref2 = dict(idref,
time=time,
DateTime=datetime,
datetime=datetime,
timedelta=timedelta,
relativedelta=relativedelta,
version=openerp.release.major_version,
ref=_ref(self, cr),
pytz=pytz)
if len(model_str):
idref2['obj'] = _obj(self.pool, cr, uid, model_str, context=context)
return idref2
def _fix_multiple_roots(node):
"""
Surround the children of the ``node`` element of an XML field with a
single root "data" element, to prevent having a document with multiple
roots once parsed separately.
XML nodes should have one root only, but we'd like to support
direct multiple roots in our partial documents (like inherited view architectures).
As a convention we'll surround multiple root with a container "data" element, to be
ignored later when parsing.
"""
real_nodes = [x for x in node if not isinstance(x, SKIPPED_ELEMENT_TYPES)]
if len(real_nodes) > 1:
data_node = etree.Element("data")
for child in node:
data_node.append(child)
node.append(data_node)
def _eval_xml(self, node, pool, cr, uid, idref, context=None):
if context is None:
context = {}
if node.tag in ('field','value'):
t = node.get('type','char')
f_model = node.get('model', '').encode('utf-8')
if node.get('search'):
f_search = node.get("search",'').encode('utf-8')
f_use = node.get("use",'id').encode('utf-8')
f_name = node.get("name",'').encode('utf-8')
idref2 = {}
if f_search:
idref2 = _get_idref(self, cr, uid, f_model, context, idref)
q = unsafe_eval(f_search, idref2)
ids = pool[f_model].search(cr, uid, q)
if f_use != 'id':
ids = map(lambda x: x[f_use], pool[f_model].read(cr, uid, ids, [f_use]))
_cols = pool[f_model]._columns
if (f_name in _cols) and _cols[f_name]._type=='many2many':
return ids
f_val = False
if len(ids):
f_val = ids[0]
if isinstance(f_val, tuple):
f_val = f_val[0]
return f_val
a_eval = node.get('eval','')
if a_eval:
idref2 = _get_idref(self, cr, uid, f_model, context, idref)
try:
return unsafe_eval(a_eval, idref2)
except Exception:
logging.getLogger('openerp.tools.convert.init').error(
'Could not eval(%s) for %s in %s', a_eval, node.get('name'), context)
raise
def _process(s, idref):
matches = re.finditer('[^%]%\((.*?)\)[ds]', s)
done = []
for m in matches:
found = m.group()[1:]
if found in done:
continue
done.append(found)
id = m.groups()[0]
if not id in idref:
idref[id] = self.id_get(cr, id)
s = s.replace(found, str(idref[id]))
s = s.replace('%%', '%') # Quite wierd but it's for (somewhat) backward compatibility sake
return s
if t == 'xml':
_fix_multiple_roots(node)
return '<?xml version="1.0"?>\n'\
+_process("".join([etree.tostring(n, encoding='utf-8')
for n in node]), idref)
if t == 'html':
return _process("".join([etree.tostring(n, encoding='utf-8')
for n in node]), idref)
data = node.text
if node.get('file'):
with openerp.tools.file_open(node.get('file'), 'rb') as f:
data = f.read()
if t == 'file':
from ..modules import module
path = data.strip()
if not module.get_module_resource(self.module, path):
raise IOError("No such file or directory: '%s' in %s" % (
path, self.module))
return '%s,%s' % (self.module, path)
if t == 'char':
return data
if t == 'base64':
return data.encode('base64')
if t == 'int':
d = data.strip()
if d == 'None':
return None
return int(d)
if t == 'float':
return float(data.strip())
if t in ('list','tuple'):
res=[]
for n in node.iterchildren(tag='value'):
res.append(_eval_xml(self,n,pool,cr,uid,idref))
if t=='tuple':
return tuple(res)
return res
elif node.tag == "function":
args = []
a_eval = node.get('eval','')
# FIXME: should probably be exclusive
if a_eval:
idref['ref'] = lambda x: self.id_get(cr, x)
args = unsafe_eval(a_eval, idref)
for n in node:
return_val = _eval_xml(self,n, pool, cr, uid, idref, context)
if return_val is not None:
args.append(return_val)
model = pool[node.get('model', '')]
method = node.get('name')
res = getattr(model, method)(cr, uid, *args)
return res
elif node.tag == "test":
return node.text
escape_re = re.compile(r'(?<!\\)/')
def escape(x):
return x.replace('\\/', '/')
class xml_import(object):
@staticmethod
def nodeattr2bool(node, attr, default=False):
if not node.get(attr):
return default
val = node.get(attr).strip()
if not val:
return default
return val.lower() not in ('0', 'false', 'off')
def isnoupdate(self, data_node=None):
return self.noupdate or (len(data_node) and self.nodeattr2bool(data_node, 'noupdate', False))
def get_context(self, data_node, node, eval_dict):
data_node_context = (len(data_node) and data_node.get('context','').encode('utf8'))
node_context = node.get("context",'').encode('utf8')
context = {}
for ctx in (data_node_context, node_context):
if ctx:
try:
ctx_res = unsafe_eval(ctx, eval_dict)
if isinstance(context, dict):
context.update(ctx_res)
else:
context = ctx_res
except NameError:
# Some contexts contain references that are only valid at runtime at
# client-side, so in that case we keep the original context string
# as it is. We also log it, just in case.
context = ctx
_logger.debug('Context value (%s) for element with id "%s" or its data node does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
ctx, node.get('id','n/a'), exc_info=True)
return context
def get_uid(self, cr, uid, data_node, node):
node_uid = node.get('uid','') or (len(data_node) and data_node.get('uid',''))
if node_uid:
return self.id_get(cr, node_uid)
return uid
def _test_xml_id(self, xml_id):
id = xml_id
if '.' in xml_id:
module, id = xml_id.split('.', 1)
assert '.' not in id, """The ID reference "%s" must contain
maximum one dot. They are used to refer to other modules ID, in the
form: module.record_id""" % (xml_id,)
if module != self.module:
modcnt = self.pool['ir.module.module'].search_count(self.cr, self.uid, ['&', ('name', '=', module), ('state', 'in', ['installed'])])
assert modcnt == 1, """The ID "%s" refers to an uninstalled module""" % (xml_id,)
if len(id) > 64:
_logger.error('id: %s is to long (max: 64)', id)
def _tag_delete(self, cr, rec, data_node=None, mode=None):
d_model = rec.get("model")
d_search = rec.get("search",'').encode('utf-8')
d_id = rec.get("id")
ids = []
if d_search:
idref = _get_idref(self, cr, self.uid, d_model, context={}, idref={})
try:
ids = self.pool[d_model].search(cr, self.uid, unsafe_eval(d_search, idref))
except ValueError:
_logger.warning('Skipping deletion for failed search `%r`', d_search, exc_info=True)
pass
if d_id:
try:
ids.append(self.id_get(cr, d_id))
except ValueError:
# d_id cannot be found. doesn't matter in this case
_logger.warning('Skipping deletion for missing XML ID `%r`', d_id, exc_info=True)
pass
if ids:
self.pool[d_model].unlink(cr, self.uid, ids)
def _remove_ir_values(self, cr, name, value, model):
ir_values_obj = self.pool['ir.values']
ir_value_ids = ir_values_obj.search(cr, self.uid, [('name','=',name),('value','=',value),('model','=',model)])
if ir_value_ids:
ir_values_obj.unlink(cr, self.uid, ir_value_ids)
return True
def _tag_report(self, cr, rec, data_node=None, mode=None):
res = {}
for dest,f in (('name','string'),('model','model'),('report_name','name')):
res[dest] = rec.get(f,'').encode('utf8')
assert res[dest], "Attribute %s of report is empty !" % (f,)
for field,dest in (('rml','report_rml'),('file','report_rml'),('xml','report_xml'),('xsl','report_xsl'),
('attachment','attachment'),('attachment_use','attachment_use'), ('usage','usage'),
('report_type', 'report_type'), ('parser', 'parser')):
if rec.get(field):
res[dest] = rec.get(field).encode('utf8')
if rec.get('auto'):
res['auto'] = eval(rec.get('auto','False'))
if rec.get('sxw'):
sxw_content = misc.file_open(rec.get('sxw')).read()
res['report_sxw_content'] = sxw_content
if rec.get('header'):
res['header'] = eval(rec.get('header','False'))
res['multi'] = rec.get('multi') and eval(rec.get('multi','False'))
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
res['groups_id'] = groups_value
id = self.pool['ir.model.data']._update(cr, self.uid, "ir.actions.report.xml", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
if not rec.get('menu') or eval(rec.get('menu','False')):
keyword = str(rec.get('keyword', 'client_print_multi'))
value = 'ir.actions.report.xml,'+str(id)
replace = rec.get('replace', True)
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', keyword, res['name'], [res['model']], value, replace=replace, isobject=True, xml_id=xml_id)
elif self.mode=='update' and eval(rec.get('menu','False'))==False:
# Special check for report having attribute menu=False on update
value = 'ir.actions.report.xml,'+str(id)
self._remove_ir_values(cr, res['name'], value, res['model'])
return id
def _tag_function(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
context = self.get_context(data_node, rec, {'ref': _ref(self, cr)})
uid = self.get_uid(cr, self.uid, data_node, rec)
_eval_xml(self,rec, self.pool, cr, uid, self.idref, context=context)
return
def _tag_url(self, cr, rec, data_node=None, mode=None):
url = rec.get("url",'').encode('utf8')
target = rec.get("target",'').encode('utf8')
name = rec.get("name",'').encode('utf8')
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
res = {'name': name, 'url': url, 'target':target}
id = self.pool['ir.model.data']._update(cr, self.uid, "ir.actions.act_url", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
def _tag_act_window(self, cr, rec, data_node=None, mode=None):
name = rec.get('name','').encode('utf-8')
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
type = rec.get('type','').encode('utf-8') or 'ir.actions.act_window'
view_id = False
if rec.get('view_id'):
view_id = self.id_get(cr, rec.get('view_id','').encode('utf-8'))
domain = rec.get('domain','').encode('utf-8') or '[]'
res_model = rec.get('res_model','').encode('utf-8')
src_model = rec.get('src_model','').encode('utf-8')
view_type = rec.get('view_type','').encode('utf-8') or 'form'
view_mode = rec.get('view_mode','').encode('utf-8') or 'tree,form'
usage = rec.get('usage','').encode('utf-8')
limit = rec.get('limit','').encode('utf-8')
auto_refresh = rec.get('auto_refresh','').encode('utf-8')
uid = self.uid
# Act_window's 'domain' and 'context' contain mostly literals
# but they can also refer to the variables provided below
# in eval_context, so we need to eval() them before storing.
# Among the context variables, 'active_id' refers to
# the currently selected items in a list view, and only
# takes meaning at runtime on the client side. For this
# reason it must remain a bare variable in domain and context,
# even after eval() at server-side. We use the special 'unquote'
# class to achieve this effect: a string which has itself, unquoted,
# as representation.
active_id = unquote("active_id")
active_ids = unquote("active_ids")
active_model = unquote("active_model")
def ref(str_id):
return self.id_get(cr, str_id)
# Include all locals() in eval_context, for backwards compatibility
eval_context = {
'name': name,
'xml_id': xml_id,
'type': type,
'view_id': view_id,
'domain': domain,
'res_model': res_model,
'src_model': src_model,
'view_type': view_type,
'view_mode': view_mode,
'usage': usage,
'limit': limit,
'auto_refresh': auto_refresh,
'uid' : uid,
'active_id': active_id,
'active_ids': active_ids,
'active_model': active_model,
'ref' : ref,
}
context = self.get_context(data_node, rec, eval_context)
try:
domain = unsafe_eval(domain, eval_context)
except NameError:
# Some domains contain references that are only valid at runtime at
# client-side, so in that case we keep the original domain string
# as it is. We also log it, just in case.
_logger.debug('Domain value (%s) for element with id "%s" does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
domain, xml_id or 'n/a', exc_info=True)
res = {
'name': name,
'type': type,
'view_id': view_id,
'domain': domain,
'context': context,
'res_model': res_model,
'src_model': src_model,
'view_type': view_type,
'view_mode': view_mode,
'usage': usage,
'limit': limit,
'auto_refresh': auto_refresh,
}
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
res['groups_id'] = groups_value
if rec.get('target'):
res['target'] = rec.get('target','')
if rec.get('multi'):
res['multi'] = eval(rec.get('multi', 'False'))
id = self.pool['ir.model.data']._update(cr, self.uid, 'ir.actions.act_window', self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
if src_model:
#keyword = 'client_action_relate'
keyword = rec.get('key2','').encode('utf-8') or 'client_action_relate'
value = 'ir.actions.act_window,'+str(id)
replace = rec.get('replace','') or True
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', keyword, xml_id, [src_model], value, replace=replace, isobject=True, xml_id=xml_id)
# TODO add remove ir.model.data
def _tag_ir_set(self, cr, rec, data_node=None, mode=None):
if self.mode != 'init':
return
res = {}
for field in rec.findall('./field'):
f_name = field.get("name",'').encode('utf-8')
f_val = _eval_xml(self,field,self.pool, cr, self.uid, self.idref)
res[f_name] = f_val
self.pool['ir.model.data'].ir_set(cr, self.uid, res['key'], res['key2'], res['name'], res['models'], res['value'], replace=res.get('replace',True), isobject=res.get('isobject', False), meta=res.get('meta',None))
def _tag_workflow(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
model = rec.get('model').encode('ascii')
w_ref = rec.get('ref')
if w_ref:
id = self.id_get(cr, w_ref)
else:
number_children = len(rec)
assert number_children > 0,\
'You must define a child node if you dont give a ref'
assert number_children == 1,\
'Only one child node is accepted (%d given)' % number_children
id = _eval_xml(self, rec[0], self.pool, cr, self.uid, self.idref)
uid = self.get_uid(cr, self.uid, data_node, rec)
openerp.workflow.trg_validate(
uid, model, id, rec.get('action').encode('ascii'), cr)
#
# Support two types of notation:
# name="Inventory Control/Sending Goods"
# or
# action="action_id"
# parent="parent_id"
#
def _tag_menuitem(self, cr, rec, data_node=None, mode=None):
rec_id = rec.get("id",'').encode('ascii')
self._test_xml_id(rec_id)
m_l = map(escape, escape_re.split(rec.get("name",'').encode('utf8')))
values = {'parent_id': False}
if rec.get('parent', False) is False and len(m_l) > 1:
# No parent attribute specified and the menu name has several menu components,
# try to determine the ID of the parent according to menu path
pid = False
res = None
values['name'] = m_l[-1]
m_l = m_l[:-1] # last part is our name, not a parent
for idx, menu_elem in enumerate(m_l):
if pid:
cr.execute('select id from ir_ui_menu where parent_id=%s and name=%s', (pid, menu_elem))
else:
cr.execute('select id from ir_ui_menu where parent_id is null and name=%s', (menu_elem,))
res = cr.fetchone()
if res:
pid = res[0]
else:
# the menuitem does't exist but we are in branch (not a leaf)
_logger.warning('Warning no ID for submenu %s of menu %s !', menu_elem, str(m_l))
pid = self.pool['ir.ui.menu'].create(cr, self.uid, {'parent_id' : pid, 'name' : menu_elem})
values['parent_id'] = pid
else:
# The parent attribute was specified, if non-empty determine its ID, otherwise
# explicitly make a top-level menu
if rec.get('parent'):
menu_parent_id = self.id_get(cr, rec.get('parent',''))
else:
# we get here with <menuitem parent="">, explicit clear of parent, or
# if no parent attribute at all but menu name is not a menu path
menu_parent_id = False
values = {'parent_id': menu_parent_id}
if rec.get('name'):
values['name'] = rec.get('name')
try:
res = [ self.id_get(cr, rec.get('id','')) ]
except:
res = None
if rec.get('action'):
a_action = rec.get('action','').encode('utf8')
# determine the type of action
action_type, action_id = self.model_id_get(cr, a_action)
action_type = action_type.split('.')[-1] # keep only type part
if not values.get('name') and action_type in ('act_window', 'wizard', 'url', 'client', 'server'):
a_table = 'ir_act_%s' % action_type.replace('act_', '')
cr.execute('select name from "%s" where id=%%s' % a_table, (int(action_id),))
resw = cr.fetchone()
if resw:
values['name'] = resw[0]
if not values.get('name'):
# ensure menu has a name
values['name'] = rec_id or '?'
if rec.get('sequence'):
values['sequence'] = int(rec.get('sequence'))
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
values['groups_id'] = groups_value
pid = self.pool['ir.model.data']._update(cr, self.uid, 'ir.ui.menu', self.module, values, rec_id, noupdate=self.isnoupdate(data_node), mode=self.mode, res_id=res and res[0] or False)
if rec_id and pid:
self.idref[rec_id] = int(pid)
if rec.get('action') and pid:
action = "ir.actions.%s,%d" % (action_type, action_id)
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', 'tree_but_open', 'Menuitem', [('ir.ui.menu', int(pid))], action, True, True, xml_id=rec_id)
return 'ir.ui.menu', pid
def _assert_equals(self, f1, f2, prec=4):
return not round(f1 - f2, prec)
def _tag_assert(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
rec_model = rec.get("model",'').encode('ascii')
model = self.pool[rec_model]
rec_id = rec.get("id",'').encode('ascii')
self._test_xml_id(rec_id)
rec_src = rec.get("search",'').encode('utf8')
rec_src_count = rec.get("count")
rec_string = rec.get("string",'').encode('utf8') or 'unknown'
ids = None
eval_dict = {'ref': _ref(self, cr)}
context = self.get_context(data_node, rec, eval_dict)
uid = self.get_uid(cr, self.uid, data_node, rec)
if rec_id:
ids = [self.id_get(cr, rec_id)]
elif rec_src:
q = unsafe_eval(rec_src, eval_dict)
ids = self.pool[rec_model].search(cr, uid, q, context=context)
if rec_src_count:
count = int(rec_src_count)
if len(ids) != count:
self.assertion_report.record_failure()
msg = 'assertion "%s" failed!\n' \
' Incorrect search count:\n' \
' expected count: %d\n' \
' obtained count: %d\n' \
% (rec_string, count, len(ids))
_logger.error(msg)
return
assert ids is not None,\
'You must give either an id or a search criteria'
ref = _ref(self, cr)
for id in ids:
brrec = model.browse(cr, uid, id, context)
class d(dict):
def __getitem__(self2, key):
if key in brrec:
return brrec[key]
return dict.__getitem__(self2, key)
globals_dict = d()
globals_dict['floatEqual'] = self._assert_equals
globals_dict['ref'] = ref
globals_dict['_ref'] = ref
for test in rec.findall('./test'):
f_expr = test.get("expr",'').encode('utf-8')
expected_value = _eval_xml(self, test, self.pool, cr, uid, self.idref, context=context) or True
expression_value = unsafe_eval(f_expr, globals_dict)
if expression_value != expected_value: # assertion failed
self.assertion_report.record_failure()
msg = 'assertion "%s" failed!\n' \
' xmltag: %s\n' \
' expected value: %r\n' \
' obtained value: %r\n' \
% (rec_string, etree.tostring(test), expected_value, expression_value)
_logger.error(msg)
return
else: # all tests were successful for this assertion tag (no break)
self.assertion_report.record_success()
def _tag_record(self, cr, rec, data_node=None, mode=None):
rec_model = rec.get("model").encode('ascii')
model = self.pool[rec_model]
rec_id = rec.get("id",'').encode('ascii')
rec_context = rec.get("context", {})
if rec_context:
rec_context = unsafe_eval(rec_context)
if self.xml_filename and rec_id:
rec_context['install_mode_data'] = dict(
xml_file=self.xml_filename,
xml_id=rec_id,
model=rec_model,
)
self._test_xml_id(rec_id)
# in update mode, the record won't be updated if the data node explicitely
# opt-out using @noupdate="1". A second check will be performed in
# ir.model.data#_update() using the record's ir.model.data `noupdate` field.
if self.isnoupdate(data_node) and self.mode != 'init':
# check if the xml record has no id, skip
if not rec_id:
return None
if '.' in rec_id:
module,rec_id2 = rec_id.split('.')
else:
module = self.module
rec_id2 = rec_id
id = self.pool['ir.model.data']._update_dummy(cr, self.uid, rec_model, module, rec_id2)
if id:
# if the resource already exists, don't update it but store
# its database id (can be useful)
self.idref[rec_id] = int(id)
return None
elif not self.nodeattr2bool(rec, 'forcecreate', True):
# if it doesn't exist and we shouldn't create it, skip it
return None
# else create it normally
res = {}
for field in rec.findall('./field'):
#TODO: most of this code is duplicated above (in _eval_xml)...
f_name = field.get("name").encode('utf-8')
f_ref = field.get("ref",'').encode('utf-8')
f_search = field.get("search",'').encode('utf-8')
f_model = field.get("model",'').encode('utf-8')
if not f_model and f_name in model._fields:
f_model = model._fields[f_name].comodel_name
f_use = field.get("use",'').encode('utf-8') or 'id'
f_val = False
if f_search:
q = unsafe_eval(f_search, self.idref)
assert f_model, 'Define an attribute model="..." in your .XML file !'
f_obj = self.pool[f_model]
# browse the objects searched
s = f_obj.browse(cr, self.uid, f_obj.search(cr, self.uid, q))
# column definitions of the "local" object
_fields = self.pool[rec_model]._fields
# if the current field is many2many
if (f_name in _fields) and _fields[f_name].type == 'many2many':
f_val = [(6, 0, map(lambda x: x[f_use], s))]
elif len(s):
# otherwise (we are probably in a many2one field),
# take the first element of the search
f_val = s[0][f_use]
elif f_ref:
if f_name in model._fields and model._fields[f_name].type == 'reference':
val = self.model_id_get(cr, f_ref)
f_val = val[0] + ',' + str(val[1])
else:
f_val = self.id_get(cr, f_ref)
else:
f_val = _eval_xml(self,field, self.pool, cr, self.uid, self.idref)
if f_name in model._fields:
if model._fields[f_name].type == 'integer':
f_val = int(f_val)
res[f_name] = f_val
id = self.pool['ir.model.data']._update(cr, self.uid, rec_model, self.module, res, rec_id or False, not self.isnoupdate(data_node), noupdate=self.isnoupdate(data_node), mode=self.mode, context=rec_context )
if rec_id:
self.idref[rec_id] = int(id)
if config.get('import_partial'):
cr.commit()
return rec_model, id
def _tag_template(self, cr, el, data_node=None, mode=None):
# This helper transforms a <template> element into a <record> and forwards it
tpl_id = el.get('id', el.get('t-name', '')).encode('ascii')
full_tpl_id = tpl_id
if '.' not in full_tpl_id:
full_tpl_id = '%s.%s' % (self.module, tpl_id)
# set the full template name for qweb <module>.<id>
if not el.get('inherit_id'):
el.set('t-name', full_tpl_id)
el.tag = 't'
else:
el.tag = 'data'
el.attrib.pop('id', None)
record_attrs = {
'id': tpl_id,
'model': 'ir.ui.view',
}
for att in ['forcecreate', 'context']:
if att in el.keys():
record_attrs[att] = el.attrib.pop(att)
Field = builder.E.field
name = el.get('name', tpl_id)
record = etree.Element('record', attrib=record_attrs)
record.append(Field(name, name='name'))
record.append(Field(full_tpl_id, name='key'))
record.append(Field("qweb", name='type'))
record.append(Field(el.get('priority', "16"), name='priority'))
if 'inherit_id' in el.attrib:
record.append(Field(name='inherit_id', ref=el.get('inherit_id')))
if el.get('active') in ("True", "False"):
view_id = self.id_get(cr, tpl_id, raise_if_not_found=False)
if mode != "update" or not view_id:
record.append(Field(name='active', eval=el.get('active')))
if el.get('customize_show') in ("True", "False"):
record.append(Field(name='customize_show', eval=el.get('customize_show')))
groups = el.attrib.pop('groups', None)
if groups:
grp_lst = map(lambda x: "ref('%s')" % x, groups.split(','))
record.append(Field(name="groups_id", eval="[(6, 0, ["+', '.join(grp_lst)+"])]"))
if el.attrib.pop('page', None) == 'True':
record.append(Field(name="page", eval="True"))
if el.get('primary') == 'True':
# Pseudo clone mode, we'll set the t-name to the full canonical xmlid
el.append(
builder.E.xpath(
builder.E.attribute(full_tpl_id, name='t-name'),
expr=".",
position="attributes",
)
)
record.append(Field('primary', name='mode'))
# inject complete <template> element (after changing node name) into
# the ``arch`` field
record.append(Field(el, name="arch", type="xml"))
return self._tag_record(cr, record, data_node)
def id_get(self, cr, id_str, raise_if_not_found=True):
if id_str in self.idref:
return self.idref[id_str]
res = self.model_id_get(cr, id_str, raise_if_not_found)
if res and len(res)>1: res = res[1]
return res
def model_id_get(self, cr, id_str, raise_if_not_found=True):
model_data_obj = self.pool['ir.model.data']
mod = self.module
if '.' not in id_str:
id_str = '%s.%s' % (mod, id_str)
return model_data_obj.xmlid_to_res_model_res_id(
cr, self.uid, id_str,
raise_if_not_found=raise_if_not_found)
def parse(self, de, mode=None):
if de.tag != 'openerp':
raise Exception("Mismatch xml format: root tag must be `openerp`.")
for n in de.findall('./data'):
for rec in n:
if rec.tag in self._tags:
try:
self._tags[rec.tag](self.cr, rec, n, mode=mode)
except Exception, e:
self.cr.rollback()
exc_info = sys.exc_info()
raise ParseError, (misc.ustr(e), etree.tostring(rec).rstrip(), rec.getroottree().docinfo.URL, rec.sourceline), exc_info[2]
return True
def __init__(self, cr, module, idref, mode, report=None, noupdate=False, xml_filename=None):
self.mode = mode
self.module = module
self.cr = cr
self.idref = idref
self.pool = openerp.registry(cr.dbname)
self.uid = 1
if report is None:
report = assertion_report.assertion_report()
self.assertion_report = report
self.noupdate = noupdate
self.xml_filename = xml_filename
self._tags = {
'record': self._tag_record,
'delete': self._tag_delete,
'function': self._tag_function,
'menuitem': self._tag_menuitem,
'template': self._tag_template,
'workflow': self._tag_workflow,
'report': self._tag_report,
'ir_set': self._tag_ir_set,
'act_window': self._tag_act_window,
'url': self._tag_url,
'assert': self._tag_assert,
}
def convert_file(cr, module, filename, idref, mode='update', noupdate=False, kind=None, report=None, pathname=None):
if pathname is None:
pathname = os.path.join(module, filename)
fp = misc.file_open(pathname)
ext = os.path.splitext(filename)[1].lower()
try:
if ext == '.csv':
convert_csv_import(cr, module, pathname, fp.read(), idref, mode, noupdate)
elif ext == '.sql':
convert_sql_import(cr, fp)
elif ext == '.yml':
convert_yaml_import(cr, module, fp, kind, idref, mode, noupdate, report)
elif ext == '.xml':
convert_xml_import(cr, module, fp, idref, mode, noupdate, report)
elif ext == '.js':
pass # .js files are valid but ignored here.
else:
_logger.warning("Can't load unknown file type %s.", filename)
finally:
fp.close()
def convert_sql_import(cr, fp):
queries = fp.read().split(';')
for query in queries:
new_query = ' '.join(query.split())
if new_query:
cr.execute(new_query)
def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
noupdate=False):
'''Import csv file :
quote: "
delimiter: ,
encoding: utf-8'''
if not idref:
idref={}
model = ('.'.join(fname.split('.')[:-1]).split('-'))[0]
#remove folder path from model
head, model = os.path.split(model)
input = cStringIO.StringIO(csvcontent) #FIXME
reader = csv.reader(input, quotechar='"', delimiter=',')
fields = reader.next()
fname_partial = ""
if config.get('import_partial'):
fname_partial = module + '/'+ fname
if not os.path.isfile(config.get('import_partial')):
pickle.dump({}, file(config.get('import_partial'),'w+'))
else:
data = pickle.load(file(config.get('import_partial')))
if fname_partial in data:
if not data[fname_partial]:
return
else:
for i in range(data[fname_partial]):
reader.next()
if not (mode == 'init' or 'id' in fields):
_logger.error("Import specification does not contain 'id' and we are in init mode, Cannot continue.")
return
uid = 1
datas = []
for line in reader:
if not (line and any(line)):
continue
try:
datas.append(map(misc.ustr, line))
except:
_logger.error("Cannot import the line: %s", line)
registry = openerp.registry(cr.dbname)
result, rows, warning_msg, dummy = registry[model].import_data(cr, uid, fields, datas,mode, module, noupdate, filename=fname_partial)
if result < 0:
# Report failed import and abort module install
raise Exception(_('Module loading %s failed: file %s could not be processed:\n %s') % (module, fname, warning_msg))
if config.get('import_partial'):
data = pickle.load(file(config.get('import_partial')))
data[fname_partial] = 0
pickle.dump(data, file(config.get('import_partial'),'wb'))
cr.commit()
#
# xml import/export
#
def convert_xml_import(cr, module, xmlfile, idref=None, mode='init', noupdate=False, report=None):
doc = etree.parse(xmlfile)
relaxng = etree.RelaxNG(
etree.parse(os.path.join(config['root_path'],'import_xml.rng' )))
try:
relaxng.assert_(doc)
except Exception:
_logger.error('The XML file does not fit the required schema !')
_logger.error(misc.ustr(relaxng.error_log.last_error))
raise
if idref is None:
idref={}
if isinstance(xmlfile, file):
xml_filename = xmlfile.name
else:
xml_filename = xmlfile
obj = xml_import(cr, module, idref, mode, report=report, noupdate=noupdate, xml_filename=xml_filename)
obj.parse(doc.getroot(), mode=mode)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
justinglibert/flapai | flapai.py | 1 | 19355 | from itertools import cycle
import random
import sys
import time
import datetime
import logging
import os
import pygame
from pygame.locals import *
import matplotlib.pyplot as plt
import json
from genome import Genome
from network import Network
from population import Population
from config import Config
import pickle
import numpy as np
import subprocess
from colorama import *
today = "save/" + str(datetime.date.today()) + "_" + time.strftime("%X")
if not os.path.exists(today):
os.makedirs(today)
savestat = True
fpsspeed=3
FPS = 4000
bestFitness = 0
fitnessovergeneration = []
fittestovergeneration = []
#detectionOffset = 36
detectionOffset = 40
SCREENWIDTH = 288
SCREENHEIGHT = 512
# amount by which base can maximum shift to left
PIPEGAPSIZE = 100 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
# image and hitmask dicts
IMAGES, HITMASKS = {}, {}
DIEIFTOUCHTOP = True
# list of all possible players (tuple of 3 positions of flap)
PLAYERS_LIST = (
# red bird
(
'assets/sprites/redbird-upflap.png',
'assets/sprites/redbird-midflap.png',
'assets/sprites/redbird-downflap.png',
),
# blue bird
(
# amount by which base can maximum shift to left
'assets/sprites/bluebird-upflap.png',
'assets/sprites/bluebird-midflap.png',
'assets/sprites/bluebird-downflap.png',
),
# yellow bird
(
'assets/sprites/yellowbird-upflap.png',
'assets/sprites/yellowbird-midflap.png',
'assets/sprites/yellowbird-downflap.png',
),
)
# list of backgrounds
BACKGROUNDS_LIST = (
'assets/sprites/background-day.png',
'assets/sprites/background-night.png',
)
# list of pipes
PIPES_LIST = (
'assets/sprites/pipe-green.png',
'assets/sprites/pipe-red.png',
)
asciiart="""
______ _ ___ _____
| ___| | / _ \|_ _|
| |_ | | __ _ _ __ / /_\ \ | |
| _| | |/ _` | '_ \| _ | | |
| | | | (_| | |_) | | | |_| |_
\_| |_|\__,_| .__/\_| |_/\___/
| |
|_| """
def printc(text,color):
if color == "red":
print(Fore.RED)
if color == "blue":
print(Fore.BLUE)
if color == "green":
print(Fore.GREEN)
print(text)
print(Style.RESET_ALL)
def main():
"""Parse option"""
if len(sys.argv) != 1:
#Evaluate a single genome
if str(sys.argv[1])=="-evaluate":
initPygame("FlapAI: Evaluating")
print str(sys.argv[2])
net = load(str(sys.argv[2]))
genome = Genome(net)
global savestat
savestat = False
fitness = []
for i in xrange(100):
fit = playGame(genome)
fitness.append(fit.fitness)
print "fitness : %s " % fit.fitness
average = sum(fitness) / float(len(fitness))
printc("Average fitness : %s" % average,"red")
pygame.quit()
sys.exit()
#Show the stat of an experiment
if str(sys.argv[1])=="-stats":
showStat(str(sys.argv[2]))
#No argument, starting FlapAI
initPygame("FlapAI: Learning")
"""Init the population"""
bestFitness = 0
population = Population()
population.generateRandomPopulation()
generation = 1
maxgeneration = Config.maxGeneration
lastgenerationaveragefitness = 0
#Main Loop
while generation <= maxgeneration :
birdnmbr = 1
for i in xrange(population.size()):
genome = playGame(population.getGenome(i))
population.setGenomeFitness(i,genome.fitness)
informationforscreen = {
'generation' : generation,
'birdnumber' : birdnmbr,
'lastfitness' : genome.fitness,
'lastgenerationaveragefitness' : lastgenerationaveragefitness,
'bestfitness' : bestFitness
}
updateScreen(informationforscreen)
if genome.fitness > bestFitness:
global bestFitness
bestFitness = genome.fitness
genome.network.save(today + "/bestfitness.json")
birdnmbr += 1
global fitnessovergeneration
fitnessovergeneration.append(population.averageFitness())
lastgenerationaveragefitness = population.averageFitness()
global fittestovergeneration
fittestovergeneration.append(population.findFittest().fitness)
#Evolve the population
population.evolvePopulation()
generation += 1
def initPygame(screencaption):
global SCREEN, FPSCLOCK
pygame.init()
init()
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption(screencaption)
# numbers sprites for score display
IMAGES['numbers'] = (
pygame.image.load('assets/sprites/0.png').convert_alpha(),
pygame.image.load('assets/sprites/1.png').convert_alpha(),
pygame.image.load('assets/sprites/2.png').convert_alpha(),
pygame.image.load('assets/sprites/3.png').convert_alpha(),
pygame.image.load('assets/sprites/4.png').convert_alpha(),
pygame.image.load('assets/sprites/5.png').convert_alpha(),
pygame.image.load('assets/sprites/6.png').convert_alpha(),
pygame.image.load('assets/sprites/7.png').convert_alpha(),
pygame.image.load('assets/sprites/8.png').convert_alpha(),
pygame.image.load('assets/sprites/9.png').convert_alpha()
)
# base (ground) sprite
IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha()
def playGame(genome):
"""Info pour le jeux"""
# select random background sprites
randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1)
IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert()
# select random player sprites
randPlayer = random.randint(0, len(PLAYERS_LIST) - 1)
IMAGES['player'] = (
pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(),
)
# select random pipe sprites
pipeindex = random.randint(0, len(PIPES_LIST) - 1)
IMAGES['pipe'] = (
pygame.transform.rotate(
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180),
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(),
)
# hismask for pipes
HITMASKS['pipe'] = (
getHitmask(IMAGES['pipe'][0]),
getHitmask(IMAGES['pipe'][1]),
)
# hitmask for player
HITMASKS['player'] = (
getHitmask(IMAGES['player'][0]),
getHitmask(IMAGES['player'][1]),
getHitmask(IMAGES['player'][2]),
)
"""Info pour lancer le jeux sans le message au depart"""
playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2)
playerShmVals = {'val': 0, 'dir': 1}
basex = 0
playerIndexGen = cycle([0, 1, 2, 1])
movementInfo = {
'playery': playery + playerShmVals['val'],
'basex': basex,
'playerIndexGen': playerIndexGen,
}
#Update the network with current genes
genome.network.fromgenes(genome.genes)
crashInfo = mainGame(movementInfo,genome)
#fitness = showGameOverScreen(crashInfo)
genome = crashInfo['genome']
if Config.fitnessIsScore:
genome.fitness = crashInfo['score']
if genome.fitness < 0:
genome.fitness = 0
return genome
def mainGame(movementInfo,genome):
score = playerIndex = loopIter = 0
playerIndexGen = movementInfo['playerIndexGen']
playerx, playery = int(SCREENWIDTH * 0.2), movementInfo['playery']
basex = movementInfo['basex']
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# get 2 new pipes to add to upperPipes lowerPipes list
newPipe1 = getRandomPipe()
newPipe2 = getRandomPipe()
# list of upper pipes
upperPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
]
# list of lowerpipe
lowerPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
]
pipeVelX = -4
# player velocity, max velocity, downward accleration, accleration on flap
playerVelY = -9 # player's velocity along Y, default same as playerFlapped
playerMaxVelY = 10 # max vel along Y, max descend speed
playerMinVelY = -8 # min vel along Y, max ascend speed
playerAccY = 1 # players downward accleration
playerFlapAcc = -9 # players speed on flapping
playerFlapped = False # True when player flaps
framesurvived = 0
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
#Store Stat
if savestat==True:
reportStat()
pygame.quit()
if savestat==True:
showStat(today)
else:
sys.exit()
if event.type == KEYDOWN and event.key == K_UP:
if fpsspeed < 4:
global fpsspeed
fpsspeed += 1
if event.type == KEYDOWN and event.key == K_DOWN:
if fpsspeed != -2:
global fpsspeed
fpsspeed -= 1
#Evaluate the NN
if playerx < lowerPipes[0]['x'] + detectionOffset:
nextPipe = lowerPipes[0]
else:
nextPipe = lowerPipes[1]
nextPipeY = float(SCREENHEIGHT - nextPipe['y']) / SCREENHEIGHT
playerYcorrectAxis = float(SCREENHEIGHT - playery) / SCREENHEIGHT
distanceBetweenPlayerAndNextPipe = float(nextPipe['x'] - playerx)/ SCREENWIDTH
NNinput = np.array([[playerYcorrectAxis],[nextPipeY]])
NNoutput = genome.network.feedforward(NNinput)
if NNoutput > 0.5:
if playery > -2 * IMAGES['player'][0].get_height():
playerVelY = playerFlapAcc
playerFlapped = True
info = {'playery': playerYcorrectAxis, 'pipey': nextPipeY, 'distance': distanceBetweenPlayerAndNextPipe}
# check for crash here
crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex},
upperPipes, lowerPipes)
if crashTest[0] or playery < 5:
genome.fitness = framesurvived
return {
'score': score,
'genome': genome
}
# check for score
playerMidPos = playerx + IMAGES['player'][0].get_width() / 2
for pipe in upperPipes:
pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2
if pipeMidPos <= playerMidPos < pipeMidPos + 4:
score += 1
# playerIndex basex change
if (loopIter + 1) % 3 == 0:
playerIndex = playerIndexGen.next()
loopIter = (loopIter + 1) % 30
basex = -((-basex + 100) % baseShift)
# player's movement
if playerVelY < playerMaxVelY and not playerFlapped:
playerVelY += playerAccY
if playerFlapped:
playerFlapped = False
playerHeight = IMAGES['player'][playerIndex].get_height()
if playery > 5:
playery += min(playerVelY, BASEY - playery - playerHeight)
# move pipes to left
for uPipe, lPipe in zip(upperPipes, lowerPipes):
uPipe['x'] += pipeVelX
lPipe['x'] += pipeVelX
# add new pipe when first pipe is about to touch left of screen
if 0 < upperPipes[0]['x'] < 5:
newPipe = getRandomPipe()
upperPipes.append(newPipe[0])
lowerPipes.append(newPipe[1])
# remove first pipe if its out of the screen
if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width():
upperPipes.pop(0)
lowerPipes.pop(0)
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
for uPipe, lPipe in zip(upperPipes, lowerPipes):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
# print score so player overlaps the score
showScore(score)
if Config.debug:
displayInfo(info)
framesurvived += 1
SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery))
global FPS
if fpsspeed==4:
#No FPS clock ticking, may be instable
continue
if fpsspeed==3:
FPS=4000
if fpsspeed==2:
FPS=400
if fpsspeed==1:
FPS=40
if fpsspeed==0:
FPS=30
if fpsspeed==-1:
FPS=15
if fpsspeed==-2:
FPS=3
pygame.display.update()
FPSCLOCK.tick(FPS)
def playerShm(playerShm):
"""oscillates the value of playerShm['val'] between 8 and -8"""
if abs(playerShm['val']) == 8:
playerShm['dir'] *= -1
if playerShm['dir'] == 1:
playerShm['val'] += 1
else:
playerShm['val'] -= 1
def getRandomPipe():
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE))
gapY += int(BASEY * 0.2)
pipeHeight = IMAGES['pipe'][0].get_height()
pipeX = SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
]
def displayInfo(info):
###Display useful info : the input for the ANN
myfont = pygame.font.Font(None, 30)
# render text
playery = str(info['playery'])
tubey = str(info['pipey'])
distance = str(info['distance'])
labelplayery = myfont.render(playery,1,(255,255,0))
labeltubey = myfont.render(tubey,1,(0,255,255))
labeldistance = myfont.render(distance,1,(255,255,255))
SCREEN.blit(labelplayery, (SCREENWIDTH / 2 - 100, SCREENHEIGHT * 0.7))
SCREEN.blit(labeltubey, (SCREENWIDTH / 2 - 100, SCREENHEIGHT * 0.8))
SCREEN.blit(labeldistance, (SCREENWIDTH / 2 - 100, SCREENHEIGHT * 0.9))
def showScore(score):
"""displays score in center of screen"""
scoreDigits = [int(x) for x in list(str(score))]
totalWidth = 0 # total width of all numbers to be printed
for digit in scoreDigits:
totalWidth += IMAGES['numbers'][digit].get_width()
Xoffset = (SCREENWIDTH - totalWidth) / 2
for digit in scoreDigits:
SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1))
Xoffset += IMAGES['numbers'][digit].get_width()
def checkCrash(player, upperPipes, lowerPipes):
"""returns True if player collders with base or pipes."""
pi = player['index']
player['w'] = IMAGES['player'][0].get_width()
player['h'] = IMAGES['player'][0].get_height()
# if player crashes into ground
if player['y'] + player['h'] >= BASEY - 1:
return [True, True]
else:
playerRect = pygame.Rect(player['x'], player['y'],
player['w'], player['h'])
pipeW = IMAGES['pipe'][0].get_width()
pipeH = IMAGES['pipe'][0].get_height()
for uPipe, lPipe in zip(upperPipes, lowerPipes):
# upper and lower pipe rects
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and upper/lower pipe hitmasks
pHitMask = HITMASKS['player'][pi]
uHitmask = HITMASKS['pipe'][0]
lHitmask = HITMASKS['pipe'][1]
# if bird collided with upipe or lpipe
uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if uCollide or lCollide:
return [True, False]
return [False, False]
def pixelCollision(rect1, rect2, hitmask1, hitmask2):
"""Checks if two objects collide and not just their rects"""
rect = rect1.clip(rect2)
if rect.width == 0 or rect.height == 0:
return False
x1, y1 = rect.x - rect1.x, rect.y - rect1.y
x2, y2 = rect.x - rect2.x, rect.y - rect2.y
for x in xrange(rect.width):
for y in xrange(rect.height):
if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:
return True
return False
def getHitmask(image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in range(image.get_width()):
mask.append([])
for y in range(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
def load(filename):
"""Load a neural network from the file ``filename``. Returns an
instance of Network.
"""
f = open(filename, "r")
data = json.load(f)
f.close()
net = Network(data["sizes"])
net.weights = [np.array(w) for w in data["weights"]]
net.biases = [np.array(b) for b in data["biases"]]
return net
def reportStat():
with open(today + '/fitnessovergeneration.dat', 'wb') as handle:
pickle.dump(fitnessovergeneration, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(today + '/fittestovergeneration.dat', 'wb') as handle:
pickle.dump(fittestovergeneration, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(today + '/bestfitness.dat', 'wb') as handle:
pickle.dump(bestFitness, handle, protocol=pickle.HIGHEST_PROTOCOL)
def updateScreen(info):
#Clear the screen
subprocess.call(["printf", "\033c"])
#Print asciiart
printc(asciiart,"green")
if info["generation"] > 1:
print("----Last generation----")
printc("Average fitness: %s" % str(info["lastgenerationaveragefitness"]), "blue")
print("-----------------------")
if info["birdnumber"] > 1:
printc("Last Fitness: %s" % str(info["lastfitness"]), "green")
printc("Best Fitness: %s" % str(info["bestfitness"]),"red")
print("----Status----")
printc("Generation number : %s/%s" % (str(info["generation"]),str(Config.maxGeneration)),"blue")
printc("Bird number: %s/%s" % (str(info["birdnumber"]), str(Config.numberOfIndividuals)),"blue")
def showStat(folder):
fitnessovergeneration = pickle.load(open(folder + '/fitnessovergeneration.dat', 'rb'))
fittestovergeneration = pickle.load(open(folder + '/fittestovergeneration.dat', 'rb'))
bestfitness = pickle.load(open(folder + '/bestfitness.dat', 'rb'))
#Clear the screen
subprocess.call(["printf", "\033c"])
printc("Statistics of %s" % folder,"blue")
printc("-" * 20,"green")
print "Number of generation: %s" % len(fittestovergeneration)
printc("Best Fitness: %s" % bestFitness,"red")
plt.figure(1)
plt.plot(fittestovergeneration)
plt.plot(fitnessovergeneration)
plt.show()
sys.exit()
if __name__ == '__main__':
main()
| mit |
dexgecko/podsixnet_python3.4.1 | PodSixNet-78/PodSixNet/Channel.py | 1 | 1480 | import sys, traceback
from async import asynchat
from rencode import loads, dumps
class Channel(asynchat.async_chat):
endchars = '\0---\0'
def __init__(self, conn=None, addr=(), server=None, map=None):
asynchat.async_chat.__init__(self, conn, map)
self.addr = addr
self._server = server
self._ibuffer = ""
self.set_terminator(self.endchars)
self.sendqueue = []
def collect_incoming_data(self, data):
self._ibuffer += data
def found_terminator(self):
data = loads(self._ibuffer)
self._ibuffer = ""
if type(dict()) == type(data) and data.has_key('action'):
[getattr(self, n)(data) for n in ('Network', 'Network_' + data['action']) if hasattr(self, n)]
else:
print("OOB data (no such Network_action):", data)
def Pump(self):
[asynchat.async_chat.push(self, d) for d in self.sendqueue]
self.sendqueue = []
def Send(self, data):
"""Returns the number of bytes sent after enoding."""
outgoing = dumps(data) + self.endchars
self.sendqueue.append(outgoing)
return len(outgoing)
def handle_connect(self):
if hasattr(self, "Connected"):
self.Connected()
else:
print("Unhandled Connected()")
def handle_error(self):
try:
self.close()
except:
pass
if hasattr(self, "Error"):
self.Error(sys.exc_info()[1])
else:
asynchat.async_chat.handle_error(self)
def handle_expt(self):
pass
def handle_close(self):
if hasattr(self, "Close"):
self.Close()
asynchat.async_chat.handle_close(self)
| lgpl-3.0 |
amitsela/beam | sdks/python/apache_beam/examples/cookbook/filters.py | 8 | 4069 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An example workflow that demonstrates filters and other features.
- Reading and writing data from BigQuery.
- Manipulating BigQuery rows (as Python dicts) in memory.
- Global aggregates.
- Filtering PCollections using both user-specified parameters
as well as global aggregates computed during pipeline execution.
"""
from __future__ import absolute_import
import argparse
import logging
import apache_beam as beam
from apache_beam.pvalue import AsSingleton
def filter_cold_days(input_data, month_filter):
"""Workflow computing rows in a specific month with low temperatures.
Args:
input_data: a PCollection of dictionaries representing table rows. Each
dictionary must have the keys ['year', 'month', 'day', and 'mean_temp'].
month_filter: an int representing the month for which colder-than-average
days should be returned.
Returns:
A PCollection of dictionaries with the same keys described above. Each
row represents a day in the specified month where temperatures were
colder than the global mean temperature in the entire dataset.
"""
# Project to only the desired fields from a complete input row.
# E.g., SELECT f1, f2, f3, ... FROM InputTable.
projection_fields = ['year', 'month', 'day', 'mean_temp']
fields_of_interest = (
input_data
| 'Projected' >> beam.Map(
lambda row: {f: row[f] for f in projection_fields}))
# Compute the global mean temperature.
global_mean = AsSingleton(
fields_of_interest
| 'ExtractMean' >> beam.Map(lambda row: row['mean_temp'])
| 'GlobalMean' >> beam.combiners.Mean.Globally())
# Filter to the rows representing days in the month of interest
# in which the mean daily temperature is below the global mean.
return (
fields_of_interest
| 'DesiredMonth' >> beam.Filter(lambda row: row['month'] == month_filter)
| 'BelowMean' >> beam.Filter(
lambda row, mean: row['mean_temp'] < mean, global_mean))
def run(argv=None):
"""Constructs and runs the example filtering pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
help='BigQuery table to read from.',
default='clouddataflow-readonly:samples.weather_stations')
parser.add_argument('--output',
required=True,
help='BigQuery table to write to.')
parser.add_argument('--month_filter',
default=7,
help='Numeric value of month to filter on.')
known_args, pipeline_args = parser.parse_known_args(argv)
p = beam.Pipeline(argv=pipeline_args)
input_data = p | beam.Read(beam.io.BigQuerySource(known_args.input))
# pylint: disable=expression-not-assigned
(filter_cold_days(input_data, known_args.month_filter)
| 'SaveToBQ' >> beam.io.Write(beam.io.BigQuerySink(
known_args.output,
schema='year:INTEGER,month:INTEGER,day:INTEGER,mean_temp:FLOAT',
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)))
# Actually run the pipeline (all operations above are deferred).
p.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| apache-2.0 |
idem2lyon/persomov | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/screencastomatic.py | 149 | 1713 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
ExtractorError,
js_to_json,
)
class ScreencastOMaticIE(InfoExtractor):
_VALID_URL = r'https?://screencast-o-matic\.com/watch/(?P<id>[0-9a-zA-Z]+)'
_TEST = {
'url': 'http://screencast-o-matic.com/watch/c2lD3BeOPl',
'md5': '483583cb80d92588f15ccbedd90f0c18',
'info_dict': {
'id': 'c2lD3BeOPl',
'ext': 'mp4',
'title': 'Welcome to 3-4 Philosophy @ DECV!',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'as the title says! also: some general info re 1) VCE philosophy and 2) distance learning.',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
setup_js = self._search_regex(
r"(?s)jwplayer\('mp4Player'\).setup\((\{.*?\})\);",
webpage, 'setup code')
data = self._parse_json(setup_js, video_id, transform_source=js_to_json)
try:
video_data = next(
m for m in data['modes'] if m.get('type') == 'html5')
except StopIteration:
raise ExtractorError('Could not find any video entries!')
video_url = compat_urlparse.urljoin(url, video_data['config']['file'])
thumbnail = data.get('image')
return {
'id': video_id,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'url': video_url,
'ext': 'mp4',
'thumbnail': thumbnail,
}
| gpl-3.0 |
nomadcube/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
andsor/numpy | numpy/ma/core.py | 23 | 236882 | """
numpy.ma : a package to handle missing or invalid values.
This package was initially written for numarray by Paul F. Dubois
at Lawrence Livermore National Laboratory.
In 2006, the package was completely rewritten by Pierre Gerard-Marchant
(University of Georgia) to make the MaskedArray class a subclass of ndarray,
and to improve support of structured arrays.
Copyright 1999, 2000, 2001 Regents of the University of California.
Released for unlimited redistribution.
* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois.
* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant
(pgmdevlist_AT_gmail_DOT_com)
* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com)
.. moduleauthor:: Pierre Gerard-Marchant
"""
# pylint: disable-msg=E1002
from __future__ import division, absolute_import, print_function
import sys
import warnings
from functools import reduce
import numpy as np
import numpy.core.umath as umath
import numpy.core.numerictypes as ntypes
from numpy import ndarray, amax, amin, iscomplexobj, bool_
from numpy import array as narray
from numpy.lib.function_base import angle
from numpy.compat import getargspec, formatargspec, long, basestring
from numpy import expand_dims as n_expand_dims
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
__author__ = "Pierre GF Gerard-Marchant"
__docformat__ = "restructuredtext en"
__all__ = ['MAError', 'MaskError', 'MaskType', 'MaskedArray',
'bool_',
'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue',
'amax', 'amin', 'angle', 'anom', 'anomalies', 'any', 'append', 'arange',
'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2',
'arctanh', 'argmax', 'argmin', 'argsort', 'around',
'array', 'asarray', 'asanyarray',
'bitwise_and', 'bitwise_or', 'bitwise_xor',
'ceil', 'choose', 'clip', 'common_fill_value', 'compress',
'compressed', 'concatenate', 'conjugate', 'copy', 'cos', 'cosh',
'count', 'cumprod', 'cumsum',
'default_fill_value', 'diag', 'diagonal', 'diff', 'divide', 'dump',
'dumps',
'empty', 'empty_like', 'equal', 'exp', 'expand_dims',
'fabs', 'flatten_mask', 'fmod', 'filled', 'floor', 'floor_divide',
'fix_invalid', 'flatten_structured_array', 'frombuffer', 'fromflex',
'fromfunction',
'getdata', 'getmask', 'getmaskarray', 'greater', 'greater_equal',
'harden_mask', 'hypot',
'identity', 'ids', 'indices', 'inner', 'innerproduct',
'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray',
'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log2',
'log10', 'logical_and', 'logical_not', 'logical_or', 'logical_xor',
'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or',
'masked', 'masked_array', 'masked_equal', 'masked_greater',
'masked_greater_equal', 'masked_inside', 'masked_invalid',
'masked_less', 'masked_less_equal', 'masked_not_equal',
'masked_object', 'masked_outside', 'masked_print_option',
'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum',
'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
'mod', 'multiply', 'mvoid',
'negative', 'nomask', 'nonzero', 'not_equal',
'ones', 'outer', 'outerproduct',
'power', 'prod', 'product', 'ptp', 'put', 'putmask',
'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize',
'right_shift', 'round_', 'round',
'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue',
'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
'swapaxes',
'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide',
'var', 'where',
'zeros']
MaskType = np.bool_
nomask = MaskType(0)
def doc_note(initialdoc, note):
"""
Adds a Notes section to an existing docstring.
"""
if initialdoc is None:
return
if note is None:
return initialdoc
newdoc = """
%s
Notes
-----
%s
"""
return newdoc % (initialdoc, note)
def get_object_signature(obj):
"""
Get the signature from obj
"""
try:
sig = formatargspec(*getargspec(obj))
except TypeError as errmsg:
sig = ''
# msg = "Unable to retrieve the signature of %s '%s'\n"\
# "(Initial error message: %s)"
# warnings.warn(msg % (type(obj),
# getattr(obj, '__name__', '???'),
# errmsg))
return sig
#####--------------------------------------------------------------------------
#---- --- Exceptions ---
#####--------------------------------------------------------------------------
class MAError(Exception):
"""Class for masked array related errors."""
pass
class MaskError(MAError):
"Class for mask related errors."
pass
#####--------------------------------------------------------------------------
#---- --- Filling options ---
#####--------------------------------------------------------------------------
# b: boolean - c: complex - f: floats - i: integer - O: object - S: string
default_filler = {'b': True,
'c' : 1.e20 + 0.0j,
'f' : 1.e20,
'i' : 999999,
'O' : '?',
'S' : 'N/A',
'u' : 999999,
'V' : '???',
'U' : 'N/A'
}
# Add datetime64 and timedelta64 types
for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps",
"fs", "as"]:
default_filler["M8[" + v + "]"] = np.datetime64("NaT", v)
default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v)
max_filler = ntypes._minvals
max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]])
min_filler = ntypes._maxvals
min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]])
if 'float128' in ntypes.typeDict:
max_filler.update([(np.float128, -np.inf)])
min_filler.update([(np.float128, +np.inf)])
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
The default filling value depends on the datatype of the input
array or the type of the input scalar:
======== ========
datatype default
======== ========
bool True
int 999999
float 1.e20
complex 1.e20+0j
object '?'
string 'N/A'
======== ========
Parameters
----------
obj : ndarray, dtype or scalar
The array data-type or scalar for which the default fill value
is returned.
Returns
-------
fill_value : scalar
The default fill value.
Examples
--------
>>> np.ma.default_fill_value(1)
999999
>>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))
1e+20
>>> np.ma.default_fill_value(np.dtype(complex))
(1e+20+0j)
"""
if hasattr(obj, 'dtype'):
defval = _check_fill_value(None, obj.dtype)
elif isinstance(obj, np.dtype):
if obj.subdtype:
defval = default_filler.get(obj.subdtype[0].kind, '?')
elif obj.kind in 'Mm':
defval = default_filler.get(obj.str[1:], '?')
else:
defval = default_filler.get(obj.kind, '?')
elif isinstance(obj, float):
defval = default_filler['f']
elif isinstance(obj, int) or isinstance(obj, long):
defval = default_filler['i']
elif isinstance(obj, str):
defval = default_filler['S']
elif isinstance(obj, unicode):
defval = default_filler['U']
elif isinstance(obj, complex):
defval = default_filler['c']
else:
defval = default_filler['O']
return defval
def _recursive_extremum_fill_value(ndtype, extremum):
names = ndtype.names
if names:
deflist = []
for name in names:
fval = _recursive_extremum_fill_value(ndtype[name], extremum)
deflist.append(fval)
return tuple(deflist)
return extremum[ndtype]
def minimum_fill_value(obj):
"""
Return the maximum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the minimum of an array with a given dtype.
Parameters
----------
obj : ndarray or dtype
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The maximum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
maximum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.minimum_fill_value(a)
127
>>> a = np.int32()
>>> ma.minimum_fill_value(a)
2147483647
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.minimum_fill_value(a)
127
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.minimum_fill_value(a)
inf
"""
errmsg = "Unsuitable type for calculating minimum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, min_filler)
elif isinstance(obj, float):
return min_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return min_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return min_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return min_filler[obj]
else:
raise TypeError(errmsg)
def maximum_fill_value(obj):
"""
Return the minimum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the maximum of an array with a given dtype.
Parameters
----------
obj : {ndarray, dtype}
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The minimum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
minimum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.maximum_fill_value(a)
-128
>>> a = np.int32()
>>> ma.maximum_fill_value(a)
-2147483648
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.maximum_fill_value(a)
-128
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.maximum_fill_value(a)
-inf
"""
errmsg = "Unsuitable type for calculating maximum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, max_filler)
elif isinstance(obj, float):
return max_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return max_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return max_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return max_filler[obj]
else:
raise TypeError(errmsg)
def _recursive_set_default_fill_value(dtypedescr):
deflist = []
for currentdescr in dtypedescr:
currenttype = currentdescr[1]
if isinstance(currenttype, list):
deflist.append(tuple(_recursive_set_default_fill_value(currenttype)))
else:
deflist.append(default_fill_value(np.dtype(currenttype)))
return tuple(deflist)
def _recursive_set_fill_value(fillvalue, dtypedescr):
fillvalue = np.resize(fillvalue, len(dtypedescr))
output_value = []
for (fval, descr) in zip(fillvalue, dtypedescr):
cdtype = descr[1]
if isinstance(cdtype, list):
output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
else:
output_value.append(np.array(fval, dtype=cdtype).item())
return tuple(output_value)
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype
if this latter is standard (no fields). If the datatype is flexible (named
fields), fill_value is set to a tuple whose elements are the default fill
values corresponding to each field.
If fill_value is not None, its value is forced to the given dtype.
"""
ndtype = np.dtype(ndtype)
fields = ndtype.fields
if fill_value is None:
if fields:
descr = ndtype.descr
fill_value = np.array(_recursive_set_default_fill_value(descr),
dtype=ndtype,)
else:
fill_value = default_fill_value(ndtype)
elif fields:
fdtype = [(_[0], _[1]) for _ in ndtype.descr]
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=fdtype)
except ValueError:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, fdtype))
else:
descr = ndtype.descr
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, descr),
dtype=ndtype)
else:
if isinstance(fill_value, basestring) and (ndtype.char not in 'OSVU'):
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
else:
# In case we want to convert 1e20 to int...
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except OverflowError:
# Raise TypeError instead of OverflowError. OverflowError
# is seldom used, and the real problem here is that the
# passed fill_value is not compatible with the ndtype.
err_msg = "Fill value %s overflows dtype %s"
raise TypeError(err_msg % (fill_value, ndtype))
return np.array(fill_value)
def set_fill_value(a, fill_value):
"""
Set the filling value of a, if a is a masked array.
This function changes the fill value of the masked array `a` in place.
If `a` is not a masked array, the function returns silently, without
doing anything.
Parameters
----------
a : array_like
Input array.
fill_value : dtype
Filling value. A consistency test is performed to make sure
the value is compatible with the dtype of `a`.
Returns
-------
None
Nothing returned by this function.
See Also
--------
maximum_fill_value : Return the default fill value for a dtype.
MaskedArray.fill_value : Return current fill value.
MaskedArray.set_fill_value : Equivalent method.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> a = ma.masked_where(a < 3, a)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=999999)
>>> ma.set_fill_value(a, -999)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=-999)
Nothing happens if `a` is not a masked array.
>>> a = range(5)
>>> a
[0, 1, 2, 3, 4]
>>> ma.set_fill_value(a, 100)
>>> a
[0, 1, 2, 3, 4]
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> ma.set_fill_value(a, 100)
>>> a
array([0, 1, 2, 3, 4])
"""
if isinstance(a, MaskedArray):
a.set_fill_value(fill_value)
return
def get_fill_value(a):
"""
Return the filling value of a, if any. Otherwise, returns the
default filling value for that type.
"""
if isinstance(a, MaskedArray):
result = a.fill_value
else:
result = default_fill_value(a)
return result
def common_fill_value(a, b):
"""
Return the common filling value of two masked arrays, if any.
If ``a.fill_value == b.fill_value``, return the fill value,
otherwise return None.
Parameters
----------
a, b : MaskedArray
The masked arrays for which to compare fill values.
Returns
-------
fill_value : scalar or None
The common fill value, or None.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=3)
>>> y = np.ma.array([0, 1.], fill_value=3)
>>> np.ma.common_fill_value(x, y)
3.0
"""
t1 = get_fill_value(a)
t2 = get_fill_value(b)
if t1 == t2:
return t1
return None
#####--------------------------------------------------------------------------
def filled(a, fill_value=None):
"""
Return input as an array with masked data replaced by a fill value.
If `a` is not a `MaskedArray`, `a` itself is returned.
If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to
``a.fill_value``.
Parameters
----------
a : MaskedArray or array_like
An input object.
fill_value : scalar, optional
Filling value. Default is None.
Returns
-------
a : ndarray
The filled array.
See Also
--------
compressed
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x.filled()
array([[999999, 1, 2],
[999999, 4, 5],
[ 6, 7, 8]])
"""
if hasattr(a, 'filled'):
return a.filled(fill_value)
elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
elif isinstance(a, dict):
return np.array(a, 'O')
else:
return np.array(a)
#####--------------------------------------------------------------------------
def get_masked_subclass(*arrays):
"""
Return the youngest subclass of MaskedArray from a list of (masked) arrays.
In case of siblings, the first listed takes over.
"""
if len(arrays) == 1:
arr = arrays[0]
if isinstance(arr, MaskedArray):
rcls = type(arr)
else:
rcls = MaskedArray
else:
arrcls = [type(a) for a in arrays]
rcls = arrcls[0]
if not issubclass(rcls, MaskedArray):
rcls = MaskedArray
for cls in arrcls[1:]:
if issubclass(cls, rcls):
rcls = cls
# Don't return MaskedConstant as result: revert to MaskedArray
if rcls.__name__ == 'MaskedConstant':
return MaskedArray
return rcls
#####--------------------------------------------------------------------------
def getdata(a, subok=True):
"""
Return the data of a masked array as an ndarray.
Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,
else return `a` as a ndarray or subclass (depending on `subok`) if not.
Parameters
----------
a : array_like
Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
subok : bool
Whether to force the output to be a `pure` ndarray (False) or to
return a subclass of ndarray if appropriate (True, default).
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getdata(a)
array([[1, 2],
[3, 4]])
Equivalently use the ``MaskedArray`` `data` attribute.
>>> a.data
array([[1, 2],
[3, 4]])
"""
try:
data = a._data
except AttributeError:
data = np.array(a, copy=False, subok=subok)
if not subok:
return data.view(ndarray)
return data
get_data = getdata
def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
"""
Return input with invalid data masked and replaced by a fill value.
Invalid data means values of `nan`, `inf`, etc.
Parameters
----------
a : array_like
Input array, a (subclass of) ndarray.
copy : bool, optional
Whether to use a copy of `a` (True) or to fix `a` in place (False).
Default is True.
fill_value : scalar, optional
Value used for fixing invalid data. Default is None, in which case
the ``a.fill_value`` is used.
Returns
-------
b : MaskedArray
The input array with invalid entries fixed.
Notes
-----
A copy is performed by default.
Examples
--------
>>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
>>> x
masked_array(data = [-- -1.0 nan inf],
mask = [ True False False False],
fill_value = 1e+20)
>>> np.ma.fix_invalid(x)
masked_array(data = [-- -1.0 -- --],
mask = [ True False True True],
fill_value = 1e+20)
>>> fixed = np.ma.fix_invalid(x)
>>> fixed.data
array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20,
1.00000000e+20])
>>> x.data
array([ 1., -1., NaN, Inf])
"""
a = masked_array(a, copy=copy, mask=mask, subok=True)
#invalid = (numpy.isnan(a._data) | numpy.isinf(a._data))
invalid = np.logical_not(np.isfinite(a._data))
if not invalid.any():
return a
a._mask |= invalid
if fill_value is None:
fill_value = a.fill_value
a._data[invalid] = fill_value
return a
#####--------------------------------------------------------------------------
#---- --- Ufuncs ---
#####--------------------------------------------------------------------------
ufunc_domain = {}
ufunc_fills = {}
class _DomainCheckInterval:
"""
Define a valid interval, so that :
``domain_check_interval(a,b)(x) == True`` where
``x < a`` or ``x > b``.
"""
def __init__(self, a, b):
"domain_check_interval(a,b)(x) = true where x < a or y > b"
if (a > b):
(a, b) = (b, a)
self.a = a
self.b = b
def __call__ (self, x):
"Execute the call behavior."
return umath.logical_or(umath.greater (x, self.b),
umath.less(x, self.a))
class _DomainTan:
"""Define a valid interval for the `tan` function, so that:
``domain_tan(eps) = True`` where ``abs(cos(x)) < eps``
"""
def __init__(self, eps):
"domain_tan(eps) = true where abs(cos(x)) < eps)"
self.eps = eps
def __call__ (self, x):
"Executes the call behavior."
return umath.less(umath.absolute(umath.cos(x)), self.eps)
class _DomainSafeDivide:
"""Define a domain for safe division."""
def __init__ (self, tolerance=None):
self.tolerance = tolerance
def __call__ (self, a, b):
# Delay the selection of the tolerance to here in order to reduce numpy
# import times. The calculation of these parameters is a substantial
# component of numpy's import time.
if self.tolerance is None:
self.tolerance = np.finfo(float).tiny
# don't call ma ufuncs from __array_wrap__ which would fail for scalars
a, b = np.asarray(a), np.asarray(b)
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
class _DomainGreater:
"""DomainGreater(v)(x) is True where x <= v."""
def __init__(self, critical_value):
"DomainGreater(v)(x) = true where x <= v"
self.critical_value = critical_value
def __call__ (self, x):
"Executes the call behavior."
return umath.less_equal(x, self.critical_value)
class _DomainGreaterEqual:
"""DomainGreaterEqual(v)(x) is True where x < v."""
def __init__(self, critical_value):
"DomainGreaterEqual(v)(x) = true where x < v"
self.critical_value = critical_value
def __call__ (self, x):
"Executes the call behavior."
return umath.less(x, self.critical_value)
#..............................................................................
class _MaskedUnaryOperation:
"""
Defines masked version of unary operations, where invalid values are
pre-masked.
Parameters
----------
mufunc : callable
The function for which to define a masked version. Made available
as ``_MaskedUnaryOperation.f``.
fill : scalar, optional
Filling value, default is 0.
domain : class instance
Domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
"""
def __init__ (self, mufunc, fill=0, domain=None):
""" _MaskedUnaryOperation(aufunc, fill=0, domain=None)
aufunc(fill) must be defined
self(x) returns aufunc(x)
with masked values where domain(x) is true or getmask(x) is true.
"""
self.f = mufunc
self.fill = fill
self.domain = domain
self.__doc__ = getattr(mufunc, "__doc__", str(mufunc))
self.__name__ = getattr(mufunc, "__name__", str(mufunc))
ufunc_domain[mufunc] = domain
ufunc_fills[mufunc] = fill
#
def __call__ (self, a, *args, **kwargs):
"Execute the call behavior."
d = getdata(a)
# Case 1.1. : Domained function
if self.domain is not None:
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(d, *args, **kwargs)
# Make a mask
m = ~umath.isfinite(result)
m |= self.domain(d)
m |= getmask(a)
# Case 1.2. : Function without a domain
else:
# Get the result and the mask
result = self.f(d, *args, **kwargs)
m = getmask(a)
# Case 2.1. : The result is scalarscalar
if not result.ndim:
if m:
return masked
return result
# Case 2.2. The result is an array
# We need to fill the invalid data back w/ the input
# Now, that's plain silly: in C, we would just skip the element and keep
# the original, but we do have to do it that way in Python
if m is not nomask:
# In case result has a lower dtype than the inputs (as in equal)
try:
np.copyto(result, d, where=m)
except TypeError:
pass
# Transform to
if isinstance(a, MaskedArray):
subtype = type(a)
else:
subtype = MaskedArray
result = result.view(subtype)
result._mask = m
result._update_from(a)
return result
#
def __str__ (self):
return "Masked version of %s. [Invalid values are masked]" % str(self.f)
class _MaskedBinaryOperation:
"""
Define masked version of binary operations, where invalid
values are pre-masked.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_MaskedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__ (self, mbfunc, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = mbfunc
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc))
self.__name__ = getattr(mbfunc, "__name__", str(mbfunc))
ufunc_domain[mbfunc] = None
ufunc_fills[mbfunc] = (fillx, filly)
def __call__ (self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data, as ndarray
(da, db) = (getdata(a, subok=False), getdata(b, subok=False))
# Get the mask
(ma, mb) = (getmask(a), getmask(b))
if ma is nomask:
if mb is nomask:
m = nomask
else:
m = umath.logical_or(getmaskarray(a), mb)
elif mb is nomask:
m = umath.logical_or(ma, getmaskarray(b))
else:
m = umath.logical_or(ma, mb)
# Get the result
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(da, db, *args, **kwargs)
# check it worked
if result is NotImplemented:
return NotImplemented
# Case 1. : scalar
if not result.ndim:
if m:
return masked
return result
# Case 2. : array
# Revert result to da where masked
if m is not nomask:
np.copyto(result, da, casting='unsafe', where=m)
# Transforms to a (subclass of) MaskedArray
result = result.view(get_masked_subclass(a, b))
result._mask = m
# Update the optional info from the inputs
if isinstance(b, MaskedArray):
if isinstance(a, MaskedArray):
result._update_from(a)
else:
result._update_from(b)
elif isinstance(a, MaskedArray):
result._update_from(a)
return result
def reduce(self, target, axis=0, dtype=None):
"""Reduce `target` along the given `axis`."""
if isinstance(target, MaskedArray):
tclass = type(target)
else:
tclass = MaskedArray
m = getmask(target)
t = filled(target, self.filly)
if t.shape == ():
t = t.reshape(1)
if m is not nomask:
m = make_mask(m, copy=1)
m.shape = (1,)
if m is nomask:
return self.f.reduce(t, axis).view(tclass)
t = t.view(tclass)
t._mask = m
tr = self.f.reduce(getdata(t), axis, dtype=dtype or t.dtype)
mr = umath.logical_and.reduce(m, axis)
tr = tr.view(tclass)
if mr.ndim > 0:
tr._mask = mr
return tr
elif mr:
return masked
return tr
def outer (self, a, b):
"""Return the function applied to the outer product of a and b.
"""
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
# check it worked
if d is NotImplemented:
return NotImplemented
if m is not nomask:
np.copyto(d, da, where=m)
if d.shape:
d = d.view(get_masked_subclass(a, b))
d._mask = m
return d
def accumulate (self, target, axis=0):
"""Accumulate `target` along `axis` after filling with y fill
value.
"""
if isinstance(target, MaskedArray):
tclass = type(target)
else:
tclass = MaskedArray
t = filled(target, self.filly)
return self.f.accumulate(t, axis).view(tclass)
def __str__ (self):
return "Masked version of " + str(self.f)
class _DomainedBinaryOperation:
"""
Define binary operations that have a domain, like divide.
They have no reduce, outer or accumulate.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_DomainedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__ (self, dbfunc, domain, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = dbfunc
self.domain = domain
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc))
self.__name__ = getattr(dbfunc, "__name__", str(dbfunc))
ufunc_domain[dbfunc] = domain
ufunc_fills[dbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data and the mask
(da, db) = (getdata(a, subok=False), getdata(b, subok=False))
(ma, mb) = (getmask(a), getmask(b))
# Get the result
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(da, db, *args, **kwargs)
# check it worked
if result is NotImplemented:
return NotImplemented
# Get the mask as a combination of ma, mb and invalid
m = ~umath.isfinite(result)
m |= ma
m |= mb
# Apply the domain
domain = ufunc_domain.get(self.f, None)
if domain is not None:
m |= filled(domain(da, db), True)
# Take care of the scalar case first
if (not m.ndim):
if m:
return masked
else:
return result
# When the mask is True, put back da
np.copyto(result, da, casting='unsafe', where=m)
result = result.view(get_masked_subclass(a, b))
result._mask = m
if isinstance(b, MaskedArray):
if isinstance(a, MaskedArray):
result._update_from(a)
else:
result._update_from(b)
elif isinstance(a, MaskedArray):
result._update_from(a)
return result
def __str__ (self):
return "Masked version of " + str(self.f)
#..............................................................................
# Unary ufuncs
exp = _MaskedUnaryOperation(umath.exp)
conjugate = _MaskedUnaryOperation(umath.conjugate)
sin = _MaskedUnaryOperation(umath.sin)
cos = _MaskedUnaryOperation(umath.cos)
tan = _MaskedUnaryOperation(umath.tan)
arctan = _MaskedUnaryOperation(umath.arctan)
arcsinh = _MaskedUnaryOperation(umath.arcsinh)
sinh = _MaskedUnaryOperation(umath.sinh)
cosh = _MaskedUnaryOperation(umath.cosh)
tanh = _MaskedUnaryOperation(umath.tanh)
abs = absolute = _MaskedUnaryOperation(umath.absolute)
angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base
fabs = _MaskedUnaryOperation(umath.fabs)
negative = _MaskedUnaryOperation(umath.negative)
floor = _MaskedUnaryOperation(umath.floor)
ceil = _MaskedUnaryOperation(umath.ceil)
around = _MaskedUnaryOperation(np.round_)
logical_not = _MaskedUnaryOperation(umath.logical_not)
# Domained unary ufuncs .......................................................
sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0,
_DomainGreaterEqual(0.0))
log = _MaskedUnaryOperation(umath.log, 1.0,
_DomainGreater(0.0))
log2 = _MaskedUnaryOperation(umath.log2, 1.0,
_DomainGreater(0.0))
log10 = _MaskedUnaryOperation(umath.log10, 1.0,
_DomainGreater(0.0))
tan = _MaskedUnaryOperation(umath.tan, 0.0,
_DomainTan(1e-35))
arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccos = _MaskedUnaryOperation(umath.arccos, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0,
_DomainGreaterEqual(1.0))
arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0,
_DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15))
# Binary ufuncs ...............................................................
add = _MaskedBinaryOperation(umath.add)
subtract = _MaskedBinaryOperation(umath.subtract)
multiply = _MaskedBinaryOperation(umath.multiply, 1, 1)
arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0)
equal = _MaskedBinaryOperation(umath.equal)
equal.reduce = None
not_equal = _MaskedBinaryOperation(umath.not_equal)
not_equal.reduce = None
less_equal = _MaskedBinaryOperation(umath.less_equal)
less_equal.reduce = None
greater_equal = _MaskedBinaryOperation(umath.greater_equal)
greater_equal.reduce = None
less = _MaskedBinaryOperation(umath.less)
less.reduce = None
greater = _MaskedBinaryOperation(umath.greater)
greater.reduce = None
logical_and = _MaskedBinaryOperation(umath.logical_and)
alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce
logical_or = _MaskedBinaryOperation(umath.logical_or)
sometrue = logical_or.reduce
logical_xor = _MaskedBinaryOperation(umath.logical_xor)
bitwise_and = _MaskedBinaryOperation(umath.bitwise_and)
bitwise_or = _MaskedBinaryOperation(umath.bitwise_or)
bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor)
hypot = _MaskedBinaryOperation(umath.hypot)
# Domained binary ufuncs ......................................................
divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1)
true_divide = _DomainedBinaryOperation(umath.true_divide,
_DomainSafeDivide(), 0, 1)
floor_divide = _DomainedBinaryOperation(umath.floor_divide,
_DomainSafeDivide(), 0, 1)
remainder = _DomainedBinaryOperation(umath.remainder,
_DomainSafeDivide(), 0, 1)
fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1)
mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1)
#####--------------------------------------------------------------------------
#---- --- Mask creation functions ---
#####--------------------------------------------------------------------------
def _recursive_make_descr(datatype, newtype=bool_):
"Private function allowing recursion in make_descr."
# Do we have some name fields ?
if datatype.names:
descr = []
for name in datatype.names:
field = datatype.fields[name]
if len(field) == 3:
# Prepend the title to the name
name = (field[-1], name)
descr.append((name, _recursive_make_descr(field[0], newtype)))
return descr
# Is this some kind of composite a la (np.float,2)
elif datatype.subdtype:
mdescr = list(datatype.subdtype)
mdescr[0] = newtype
return tuple(mdescr)
else:
return newtype
def make_mask_descr(ndtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with the type of all fields in `ndtype` to a
boolean type. Field names are not altered.
Parameters
----------
ndtype : dtype
The dtype to convert.
Returns
-------
result : dtype
A dtype that looks like `ndtype`, the type of all fields is boolean.
Examples
--------
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, np.int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_descr(dtype)
dtype([('foo', '|b1'), ('bar', '|b1')])
>>> ma.make_mask_descr(np.float32)
<type 'numpy.bool_'>
"""
# Make sure we do have a dtype
if not isinstance(ndtype, np.dtype):
ndtype = np.dtype(ndtype)
return np.dtype(_recursive_make_descr(ndtype, np.bool))
def getmask(a):
"""
Return the mask of a masked array, or nomask.
Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the
mask is not `nomask`, else return `nomask`. To guarantee a full array
of booleans of the same shape as a, use `getmaskarray`.
Parameters
----------
a : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getdata : Return the data of a masked array as an ndarray.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmask(a)
array([[False, True],
[False, False]], dtype=bool)
Equivalently use the `MaskedArray` `mask` attribute.
>>> a.mask
array([[False, True],
[False, False]], dtype=bool)
Result when mask == `nomask`
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> ma.nomask
False
>>> ma.getmask(b) == ma.nomask
True
>>> b.mask == ma.nomask
True
"""
return getattr(a, '_mask', nomask)
get_mask = getmask
def getmaskarray(arr):
"""
Return the mask of a masked array, or full boolean array of False.
Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and
the mask is not `nomask`, else return a full boolean array of False of
the same shape as `arr`.
Parameters
----------
arr : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getdata : Return the data of a masked array as an ndarray.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmaskarray(a)
array([[False, True],
[False, False]], dtype=bool)
Result when mask == ``nomask``
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> >ma.getmaskarray(b)
array([[False, False],
[False, False]], dtype=bool)
"""
mask = getmask(arr)
if mask is nomask:
mask = make_mask_none(np.shape(arr), getdata(arr).dtype)
return mask
def is_mask(m):
"""
Return True if m is a valid, standard mask.
This function does not check the contents of the input, only that the
type is MaskType. In particular, this function returns False if the
mask has a flexible dtype.
Parameters
----------
m : array_like
Array to test.
Returns
-------
result : bool
True if `m.dtype.type` is MaskType, False otherwise.
See Also
--------
isMaskedArray : Test whether input is an instance of MaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> m
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_mask(m)
False
>>> ma.is_mask(m.mask)
True
Input must be an ndarray (or have similar attributes)
for it to be considered a valid mask.
>>> m = [False, True, False]
>>> ma.is_mask(m)
False
>>> m = np.array([False, True, False])
>>> m
array([False, True, False], dtype=bool)
>>> ma.is_mask(m)
True
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
'formats':[np.bool, np.bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
dtype=dtype)
>>> m
array([(True, False), (False, True), (True, False)],
dtype=[('monty', '|b1'), ('pithon', '|b1')])
>>> ma.is_mask(m)
False
"""
try:
return m.dtype.type is MaskType
except AttributeError:
return False
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interepreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has
a dtype of MaskType (bool). If the dtype is flexible, each field
has a boolean dtype.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([ 0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False], dtype=bool)
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
'formats':[np.int, np.int]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i4'), ('mouse', '<i4')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
if m is nomask:
return nomask
elif isinstance(m, ndarray):
# We won't return after this point to make sure we can shrink the mask
# Fill the mask in case there are missing data
m = filled(m, True)
# Make sure the input dtype is valid
dtype = make_mask_descr(dtype)
if m.dtype == dtype:
if copy:
result = m.copy()
else:
result = m
else:
result = np.array(m, dtype=dtype, copy=copy)
else:
result = np.array(filled(m, True), dtype=MaskType)
# Bas les masques !
if shrink and (not result.dtype.names) and (not result.any()):
return nomask
else:
return result
def make_mask_none(newshape, dtype=None):
"""
Return a boolean mask of the given shape, filled with False.
This function returns a boolean ndarray with all entries False, that can
be used in common mask manipulations. If a complex dtype is specified, the
type of each field is converted to a boolean type.
Parameters
----------
newshape : tuple
A tuple indicating the shape of the mask.
dtype : {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
Returns
-------
result : ndarray
An ndarray of appropriate shape and dtype, filled with False.
See Also
--------
make_mask : Create a boolean mask from an array.
make_mask_descr : Construct a dtype description list from a given dtype.
Examples
--------
>>> import numpy.ma as ma
>>> ma.make_mask_none((3,))
array([False, False, False], dtype=bool)
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, np.int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_none((3,), dtype=dtype)
array([(False, False), (False, False), (False, False)],
dtype=[('foo', '|b1'), ('bar', '|b1')])
"""
if dtype is None:
result = np.zeros(newshape, dtype=MaskType)
else:
result = np.zeros(newshape, dtype=make_mask_descr(dtype))
return result
def mask_or (m1, m2, copy=False, shrink=True):
"""
Combine two masks with the ``logical_or`` operator.
The result may be a view on `m1` or `m2` if the other is `nomask`
(i.e. False).
Parameters
----------
m1, m2 : array_like
Input masks.
copy : bool, optional
If copy is False and one of the inputs is `nomask`, return a view
of the other input mask. Defaults to False.
shrink : bool, optional
Whether to shrink the output to `nomask` if all its values are
False. Defaults to True.
Returns
-------
mask : output mask
The result masks values that are masked in either `m1` or `m2`.
Raises
------
ValueError
If `m1` and `m2` have different flexible dtypes.
Examples
--------
>>> m1 = np.ma.make_mask([0, 1, 1, 0])
>>> m2 = np.ma.make_mask([1, 0, 0, 0])
>>> np.ma.mask_or(m1, m2)
array([ True, True, True, False], dtype=bool)
"""
def _recursive_mask_or(m1, m2, newmask):
names = m1.dtype.names
for name in names:
current1 = m1[name]
if current1.dtype.names:
_recursive_mask_or(current1, m2[name], newmask[name])
else:
umath.logical_or(current1, m2[name], newmask[name])
return
#
if (m1 is nomask) or (m1 is False):
dtype = getattr(m2, 'dtype', MaskType)
return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)
if (m2 is nomask) or (m2 is False):
dtype = getattr(m1, 'dtype', MaskType)
return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)
if m1 is m2 and is_mask(m1):
return m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
if (dtype1 != dtype2):
raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
if dtype1.names:
newmask = np.empty_like(m1)
_recursive_mask_or(m1, m2, newmask)
return newmask
return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)
def flatten_mask(mask):
"""
Returns a completely flattened version of the mask, where nested fields
are collapsed.
Parameters
----------
mask : array_like
Input array, which will be interpreted as booleans.
Returns
-------
flattened_mask : ndarray of bools
The flattened input.
Examples
--------
>>> mask = np.array([0, 0, 1], dtype=np.bool)
>>> flatten_mask(mask)
array([False, False, True], dtype=bool)
>>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
>>> flatten_mask(mask)
array([False, False, False, True], dtype=bool)
>>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
>>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)
>>> flatten_mask(mask)
array([False, False, False, False, False, True], dtype=bool)
"""
#
def _flatmask(mask):
"Flatten the mask and returns a (maybe nested) sequence of booleans."
mnames = mask.dtype.names
if mnames:
return [flatten_mask(mask[name]) for name in mnames]
else:
return mask
#
def _flatsequence(sequence):
"Generates a flattened version of the sequence."
try:
for element in sequence:
if hasattr(element, '__iter__'):
for f in _flatsequence(element):
yield f
else:
yield element
except TypeError:
yield sequence
#
mask = np.asarray(mask)
flattened = _flatsequence(_flatmask(mask))
return np.array([_ for _ in flattened], dtype=bool)
def _check_mask_axis(mask, axis):
"Check whether there are masked values along the given axis"
if mask is not nomask:
return mask.all(axis=axis)
return nomask
#####--------------------------------------------------------------------------
#--- --- Masking functions ---
#####--------------------------------------------------------------------------
def masked_where(condition, a, copy=True):
"""
Mask an array where a condition is met.
Return `a` as an array masked where `condition` is True.
Any masked values of `a` or `condition` are also masked in the output.
Parameters
----------
condition : array_like
Masking condition. When `condition` tests floating point values for
equality, consider using ``masked_values`` instead.
a : array_like
Array to mask.
copy : bool
If True (default) make a copy of `a` in the result. If False modify
`a` in place and return a view.
Returns
-------
result : MaskedArray
The result of masking `a` where `condition` is True.
See Also
--------
masked_values : Mask using floating point equality.
masked_equal : Mask where equal to a given value.
masked_not_equal : Mask where `not` equal to a given value.
masked_less_equal : Mask where less than or equal to a given value.
masked_greater_equal : Mask where greater than or equal to a given value.
masked_less : Mask where less than a given value.
masked_greater : Mask where greater than a given value.
masked_inside : Mask inside a given interval.
masked_outside : Mask outside a given interval.
masked_invalid : Mask invalid values (NaNs or infs).
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_where(a <= 2, a)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
Mask array `b` conditional on `a`.
>>> b = ['a', 'b', 'c', 'd']
>>> ma.masked_where(a == 2, b)
masked_array(data = [a b -- d],
mask = [False False True False],
fill_value=N/A)
Effect of the `copy` argument.
>>> c = ma.masked_where(a <= 2, a)
>>> c
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([0, 1, 2, 3])
>>> c = ma.masked_where(a <= 2, a, copy=False)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([99, 1, 2, 3])
When `condition` or `a` contain masked values.
>>> a = np.arange(4)
>>> a = ma.masked_where(a == 2, a)
>>> a
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
>>> b = np.arange(4)
>>> b = ma.masked_where(b == 0, b)
>>> b
masked_array(data = [-- 1 2 3],
mask = [ True False False False],
fill_value=999999)
>>> ma.masked_where(a == 3, b)
masked_array(data = [-- 1 -- --],
mask = [ True False True True],
fill_value=999999)
"""
# Make sure that condition is a valid standard-type mask.
cond = make_mask(condition)
a = np.array(a, copy=copy, subok=True)
(cshape, ashape) = (cond.shape, a.shape)
if cshape and cshape != ashape:
raise IndexError("Inconsistant shape between the condition and the input"
" (got %s and %s)" % (cshape, ashape))
if hasattr(a, '_mask'):
cond = mask_or(cond, a._mask)
cls = type(a)
else:
cls = MaskedArray
result = a.view(cls)
result._mask = cond
return result
def masked_greater(x, value, copy=True):
"""
Mask an array where greater than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x > value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater(a, 2)
masked_array(data = [0 1 2 --],
mask = [False False False True],
fill_value=999999)
"""
return masked_where(greater(x, value), x, copy=copy)
def masked_greater_equal(x, value, copy=True):
"""
Mask an array where greater than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x >= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater_equal(a, 2)
masked_array(data = [0 1 -- --],
mask = [False False True True],
fill_value=999999)
"""
return masked_where(greater_equal(x, value), x, copy=copy)
def masked_less(x, value, copy=True):
"""
Mask an array where less than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x < value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less(a, 2)
masked_array(data = [-- -- 2 3],
mask = [ True True False False],
fill_value=999999)
"""
return masked_where(less(x, value), x, copy=copy)
def masked_less_equal(x, value, copy=True):
"""
Mask an array where less than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x <= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less_equal(a, 2)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
"""
return masked_where(less_equal(x, value), x, copy=copy)
def masked_not_equal(x, value, copy=True):
"""
Mask an array where `not` equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x != value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_not_equal(a, 2)
masked_array(data = [-- -- 2 --],
mask = [ True True False True],
fill_value=999999)
"""
return masked_where(not_equal(x, value), x, copy=copy)
def masked_equal(x, value, copy=True):
"""
Mask an array where equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x == value). For floating point arrays,
consider using ``masked_values(x, value)``.
See Also
--------
masked_where : Mask where a condition is met.
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_equal(a, 2)
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
"""
# An alternative implementation relies on filling first: probably not needed.
# d = filled(x, 0)
# c = umath.equal(d, value)
# m = mask_or(c, getmask(x))
# return array(d, mask=m, copy=copy)
output = masked_where(equal(x, value), x, copy=copy)
output.fill_value = value
return output
def masked_inside(x, v1, v2, copy=True):
"""
Mask an array inside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` inside
the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2`
can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_inside(x, -0.3, 0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_inside(x, 0.3, -0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf >= v1) & (xf <= v2)
return masked_where(condition, x, copy=copy)
def masked_outside(x, v1, v2, copy=True):
"""
Mask an array outside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` outside
the interval [v1,v2] (x < v1)|(x > v2).
The boundaries `v1` and `v2` can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_outside(x, -0.3, 0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_outside(x, 0.3, -0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf < v1) | (xf > v2)
return masked_where(condition, x, copy=copy)
def masked_object(x, value, copy=True, shrink=True):
"""
Mask the array `x` where the data are exactly equal to value.
This function is similar to `masked_values`, but only suitable
for object arrays: for floating point, use `masked_values` instead.
Parameters
----------
x : array_like
Array to mask
value : object
Comparison value
copy : {True, False}, optional
Whether to return a copy of `x`.
shrink : {True, False}, optional
Whether to collapse a mask full of False to nomask
Returns
-------
result : MaskedArray
The result of masking `x` where equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> food = np.array(['green_eggs', 'ham'], dtype=object)
>>> # don't eat spoiled food
>>> eat = ma.masked_object(food, 'green_eggs')
>>> print eat
[-- ham]
>>> # plain ol` ham is boring
>>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)
>>> eat = ma.masked_object(fresh_food, 'green_eggs')
>>> print eat
[cheese ham pineapple]
Note that `mask` is set to ``nomask`` if possible.
>>> eat
masked_array(data = [cheese ham pineapple],
mask = False,
fill_value=?)
"""
if isMaskedArray(x):
condition = umath.equal(x._data, value)
mask = x._mask
else:
condition = umath.equal(np.asarray(x), value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(x, mask=mask, copy=copy, fill_value=value)
def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
"""
Mask using floating point equality.
Return a MaskedArray, masked where the data in array `x` are approximately
equal to `value`, i.e. where the following condition is True
(abs(x - value) <= atol+rtol*abs(value))
The fill_value is set to `value` and the mask is set to ``nomask`` if
possible. For integers, consider using ``masked_equal``.
Parameters
----------
x : array_like
Array to mask.
value : float
Masking value.
rtol : float, optional
Tolerance parameter.
atol : float, optional
Tolerance parameter (1e-8).
copy : bool, optional
Whether to return a copy of `x`.
shrink : bool, optional
Whether to collapse a mask full of False to ``nomask``.
Returns
-------
result : MaskedArray
The result of masking `x` where approximately equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
Examples
--------
>>> import numpy.ma as ma
>>> x = np.array([1, 1.1, 2, 1.1, 3])
>>> ma.masked_values(x, 1.1)
masked_array(data = [1.0 -- 2.0 -- 3.0],
mask = [False True False True False],
fill_value=1.1)
Note that `mask` is set to ``nomask`` if possible.
>>> ma.masked_values(x, 1.5)
masked_array(data = [ 1. 1.1 2. 1.1 3. ],
mask = False,
fill_value=1.5)
For integers, the fill value will be different in general to the
result of ``masked_equal``.
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> ma.masked_values(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=2)
>>> ma.masked_equal(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=999999)
"""
mabs = umath.absolute
xnew = filled(x, value)
if issubclass(xnew.dtype.type, np.floating):
condition = umath.less_equal(mabs(xnew - value), atol + rtol * mabs(value))
mask = getattr(x, '_mask', nomask)
else:
condition = umath.equal(xnew, value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(xnew, mask=mask, copy=copy, fill_value=value)
def masked_invalid(a, copy=True):
"""
Mask an array where invalid values occur (NaNs or infs).
This function is a shortcut to ``masked_where``, with
`condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved.
Only applies to arrays with a dtype where NaNs or infs make sense
(i.e. floating point types), but accepts any array_like object.
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5, dtype=np.float)
>>> a[2] = np.NaN
>>> a[3] = np.PINF
>>> a
array([ 0., 1., NaN, Inf, 4.])
>>> ma.masked_invalid(a)
masked_array(data = [0.0 1.0 -- -- 4.0],
mask = [False False True True False],
fill_value=1e+20)
"""
a = np.array(a, copy=copy, subok=True)
mask = getattr(a, '_mask', None)
if mask is not None:
condition = ~(np.isfinite(getdata(a)))
if mask is not nomask:
condition |= mask
cls = type(a)
else:
condition = ~(np.isfinite(a))
cls = MaskedArray
result = a.view(cls)
result._mask = condition
return result
#####--------------------------------------------------------------------------
#---- --- Printing options ---
#####--------------------------------------------------------------------------
class _MaskedPrintOption:
"""
Handle the string used to represent missing data in a masked array.
"""
def __init__ (self, display):
"Create the masked_print_option object."
self._display = display
self._enabled = True
def display(self):
"Display the string to print for masked values."
return self._display
def set_display (self, s):
"Set the string to print for masked values."
self._display = s
def enabled(self):
"Is the use of the display value enabled?"
return self._enabled
def enable(self, shrink=1):
"Set the enabling shrink to `shrink`."
self._enabled = shrink
def __str__ (self):
return str(self._display)
__repr__ = __str__
#if you single index into a masked location you get this object.
masked_print_option = _MaskedPrintOption('--')
def _recursive_printoption(result, mask, printopt):
"""
Puts printoptions in result where mask is True.
Private function allowing for recursion
"""
names = result.dtype.names
for name in names:
(curdata, curmask) = (result[name], mask[name])
if curdata.dtype.names:
_recursive_printoption(curdata, curmask, printopt)
else:
np.copyto(curdata, printopt, where=curmask)
return
_print_templates = dict(long_std="""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s)
""",
short_std="""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s)
""",
long_flx="""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""",
short_flx="""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""")
#####--------------------------------------------------------------------------
#---- --- MaskedArray class ---
#####--------------------------------------------------------------------------
def _recursive_filled(a, mask, fill_value):
"""
Recursively fill `a` with `fill_value`.
Private function
"""
names = a.dtype.names
for name in names:
current = a[name]
if current.dtype.names:
_recursive_filled(current, mask[name], fill_value[name])
else:
np.copyto(current, fill_value[name], where=mask[name])
def flatten_structured_array(a):
"""
Flatten a structured array.
The data type of the output is chosen such that it can represent all of the
(nested) fields.
Parameters
----------
a : structured array
Returns
-------
output : masked array or ndarray
A flattened masked array if the input is a masked array, otherwise a
standard ndarray.
Examples
--------
>>> ndtype = [('a', int), ('b', float)]
>>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
>>> flatten_structured_array(a)
array([[1., 1.],
[2., 2.]])
"""
#
def flatten_sequence(iterable):
"""Flattens a compound of nested iterables."""
for elm in iter(iterable):
if hasattr(elm, '__iter__'):
for f in flatten_sequence(elm):
yield f
else:
yield elm
#
a = np.asanyarray(a)
inishape = a.shape
a = a.ravel()
if isinstance(a, MaskedArray):
out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])
out = out.view(MaskedArray)
out._mask = np.array([tuple(flatten_sequence(d.item()))
for d in getmaskarray(a)])
else:
out = np.array([tuple(flatten_sequence(d.item())) for d in a])
if len(inishape) > 1:
newshape = list(out.shape)
newshape[0] = inishape
out.shape = tuple(flatten_sequence(newshape))
return out
class _arraymethod(object):
"""
Define a wrapper for basic array methods.
Upon call, returns a masked array, where the new ``_data`` array is
the output of the corresponding method called on the original
``_data``.
If `onmask` is True, the new mask is the output of the method called
on the initial mask. Otherwise, the new mask is just a reference
to the initial mask.
Attributes
----------
_onmask : bool
Holds the `onmask` parameter.
obj : object
The object calling `_arraymethod`.
Parameters
----------
funcname : str
Name of the function to apply on data.
onmask : bool
Whether the mask must be processed also (True) or left
alone (False). Default is True. Make available as `_onmask`
attribute.
"""
def __init__(self, funcname, onmask=True):
self.__name__ = funcname
self._onmask = onmask
self.obj = None
self.__doc__ = self.getdoc()
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
methdoc = getattr(ndarray, self.__name__, None) or \
getattr(np, self.__name__, None)
if methdoc is not None:
return methdoc.__doc__
#
def __get__(self, obj, objtype=None):
self.obj = obj
return self
#
def __call__(self, *args, **params):
methodname = self.__name__
instance = self.obj
# Fallback : if the instance has not been initialized, use the first arg
if instance is None:
args = list(args)
instance = args.pop(0)
data = instance._data
mask = instance._mask
cls = type(instance)
result = getattr(data, methodname)(*args, **params).view(cls)
result._update_from(instance)
if result.ndim:
if not self._onmask:
result.__setmask__(mask)
elif mask is not nomask:
result.__setmask__(getattr(mask, methodname)(*args, **params))
else:
if mask.ndim and (not mask.dtype.names and mask.all()):
return masked
return result
class MaskedIterator(object):
"""
Flat iterator object to iterate over masked arrays.
A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array
`x`. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
MaskedArray.flat : Return a flat iterator over an array.
MaskedArray.flatten : Returns a flattened copy of an array.
Notes
-----
`MaskedIterator` is not exported by the `ma` module. Instead of
instantiating a `MaskedIterator` directly, use `MaskedArray.flat`.
Examples
--------
>>> x = np.ma.array(arange(6).reshape(2, 3))
>>> fl = x.flat
>>> type(fl)
<class 'numpy.ma.core.MaskedIterator'>
>>> for item in fl:
... print item
...
0
1
2
3
4
5
Extracting more than a single element b indexing the `MaskedIterator`
returns a masked array:
>>> fl[2:4]
masked_array(data = [2 3],
mask = False,
fill_value = 999999)
"""
def __init__(self, ma):
self.ma = ma
self.dataiter = ma._data.flat
#
if ma._mask is nomask:
self.maskiter = None
else:
self.maskiter = ma._mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
result = self.dataiter.__getitem__(indx).view(type(self.ma))
if self.maskiter is not None:
_mask = self.maskiter.__getitem__(indx)
if isinstance(_mask, ndarray):
# set shape to match that of data; this is needed for matrices
_mask.shape = result.shape
result._mask = _mask
elif isinstance(_mask, np.void):
return mvoid(result, mask=_mask, hardmask=self.ma._hardmask)
elif _mask: # Just a scalar, masked
return masked
return result
### This won't work is ravel makes a copy
def __setitem__(self, index, value):
self.dataiter[index] = getdata(value)
if self.maskiter is not None:
self.maskiter[index] = getmaskarray(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
Examples
--------
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
>>> fl.next()
3
>>> fl.next()
masked_array(data = --,
mask = True,
fill_value = 1e+20)
>>> fl.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next
d = self.dataiter.next()
StopIteration
"""
d = next(self.dataiter)
if self.maskiter is not None:
m = next(self.maskiter)
if isinstance(m, np.void):
return mvoid(d, mask=m, hardmask=self.ma._hardmask)
elif m: # Just a scalar, masked
return masked
return d
next = __next__
class MaskedArray(ndarray):
"""
An array class with possibly masked values.
Masked values of True exclude the corresponding element from any
computation.
Construction::
x = MaskedArray(data, mask=nomask, dtype=None,
copy=False, subok=True, ndmin=0, fill_value=None,
keep_mask=True, hard_mask=None, shrink=True)
Parameters
----------
data : array_like
Input data.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
dtype : dtype, optional
Data type of the output.
If `dtype` is None, the type of the data argument (``data.dtype``)
is used. If `dtype` is not None and different from ``data.dtype``,
a copy is performed.
copy : bool, optional
Whether to copy the input data (True), or to use a reference instead.
Default is False.
subok : bool, optional
Whether to return a subclass of `MaskedArray` if possible (True) or a
plain `MaskedArray`. Default is True.
ndmin : int, optional
Minimum number of dimensions. Default is 0.
fill_value : scalar, optional
Value used to fill in the masked values when necessary.
If None, a default based on the data-type is used.
keep_mask : bool, optional
Whether to combine `mask` with the mask of the input data, if any
(True), or to use only `mask` for the output (False). Default is True.
hard_mask : bool, optional
Whether to use a hard mask or not. With a hard mask, masked values
cannot be unmasked. Default is False.
shrink : bool, optional
Whether to force compression of an empty mask. Default is True.
"""
__array_priority__ = 15
_defaultmask = nomask
_defaulthardmask = False
_baseclass = ndarray
def __new__(cls, data=None, mask=nomask, dtype=None, copy=False,
subok=True, ndmin=0, fill_value=None,
keep_mask=True, hard_mask=None, shrink=True,
**options):
"""
Create a new masked array from scratch.
Notes
-----
A masked array can also be created by taking a .view(MaskedArray).
"""
# Process data............
_data = np.array(data, dtype=dtype, copy=copy, subok=True, ndmin=ndmin)
_baseclass = getattr(data, '_baseclass', type(_data))
# Check that we're not erasing the mask..........
if isinstance(data, MaskedArray) and (data.shape != _data.shape):
copy = True
# Careful, cls might not always be MaskedArray...
if not isinstance(data, cls) or not subok:
_data = ndarray.view(_data, cls)
else:
_data = ndarray.view(_data, type(data))
# Backwards compatibility w/ numpy.core.ma .......
if hasattr(data, '_mask') and not isinstance(data, ndarray):
_data._mask = data._mask
_sharedmask = True
# Process mask ...............................
# Number of named fields (or zero if none)
names_ = _data.dtype.names or ()
# Type of the mask
if names_:
mdtype = make_mask_descr(_data.dtype)
else:
mdtype = MaskType
# Case 1. : no mask in input ............
if mask is nomask:
# Erase the current mask ?
if not keep_mask:
# With a reduced version
if shrink:
_data._mask = nomask
# With full version
else:
_data._mask = np.zeros(_data.shape, dtype=mdtype)
# Check whether we missed something
elif isinstance(data, (tuple, list)):
try:
# If data is a sequence of masked array
mask = np.array([getmaskarray(m) for m in data],
dtype=mdtype)
except ValueError:
# If data is nested
mask = nomask
# Force shrinking of the mask if needed (and possible)
if (mdtype == MaskType) and mask.any():
_data._mask = mask
_data._sharedmask = False
else:
if copy:
_data._mask = _data._mask.copy()
_data._sharedmask = False
# Reset the shape of the original mask
if getmask(data) is not nomask:
data._mask.shape = data.shape
else:
_data._sharedmask = True
# Case 2. : With a mask in input ........
else:
# Read the mask with the current mdtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Make sure the mask and the data have the same shape
if mask.shape != _data.shape:
(nd, nm) = (_data.size, mask.size)
if nm == 1:
mask = np.resize(mask, _data.shape)
elif nm == nd:
mask = np.reshape(mask, _data.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MaskError(msg % (nd, nm))
copy = True
# Set the mask to the new value
if _data._mask is nomask:
_data._mask = mask
_data._sharedmask = not copy
else:
if not keep_mask:
_data._mask = mask
_data._sharedmask = not copy
else:
if names_:
def _recursive_or(a, b):
"do a|=b on each field of a, recursively"
for name in a.dtype.names:
(af, bf) = (a[name], b[name])
if af.dtype.names:
_recursive_or(af, bf)
else:
af |= bf
return
_recursive_or(_data._mask, mask)
else:
_data._mask = np.logical_or(mask, _data._mask)
_data._sharedmask = False
# Update fill_value.......
if fill_value is None:
fill_value = getattr(data, '_fill_value', None)
# But don't run the check unless we have something to check....
if fill_value is not None:
_data._fill_value = _check_fill_value(fill_value, _data.dtype)
# Process extra options ..
if hard_mask is None:
_data._hardmask = getattr(data, '_hardmask', False)
else:
_data._hardmask = hard_mask
_data._baseclass = _baseclass
return _data
#
def _update_from(self, obj):
"""Copies some attributes of obj to self.
"""
if obj is not None and isinstance(obj, ndarray):
_baseclass = type(obj)
else:
_baseclass = ndarray
# We need to copy the _basedict to avoid backward propagation
_optinfo = {}
_optinfo.update(getattr(obj, '_optinfo', {}))
_optinfo.update(getattr(obj, '_basedict', {}))
if not isinstance(obj, MaskedArray):
_optinfo.update(getattr(obj, '__dict__', {}))
_dict = dict(_fill_value=getattr(obj, '_fill_value', None),
_hardmask=getattr(obj, '_hardmask', False),
_sharedmask=getattr(obj, '_sharedmask', False),
_isfield=getattr(obj, '_isfield', False),
_baseclass=getattr(obj, '_baseclass', _baseclass),
_optinfo=_optinfo,
_basedict=_optinfo)
self.__dict__.update(_dict)
self.__dict__.update(_optinfo)
return
def __array_finalize__(self, obj):
"""Finalizes the masked array.
"""
# Get main attributes .........
self._update_from(obj)
# We have to decide how to initialize self.mask, based on
# obj.mask. This is very difficult. There might be some
# correspondence between the elements in the array we are being
# created from (= obj) and us. Or... there might not. This method can
# be called in all kinds of places for all kinds of reasons -- could
# be empty_like, could be slicing, could be a ufunc, could be a view,
# ... The numpy subclassing interface simply doesn't give us any way
# to know, which means that at best this method will be based on
# guesswork and heuristics. To make things worse, there isn't even any
# clear consensus about what the desired behavior is. For instance,
# most users think that np.empty_like(marr) -- which goes via this
# method -- should return a masked array with an empty mask (see
# gh-3404 and linked discussions), but others disagree, and they have
# existing code which depends on empty_like returning an array that
# matches the input mask.
#
# Historically our algorithm was: if the template object mask had the
# same *number of elements* as us, then we used *it's mask object
# itself* as our mask, so that writes to us would also write to the
# original array. This is horribly broken in multiple ways.
#
# Now what we do instead is, if the template object mask has the same
# number of elements as us, and we do not have the same base pointer
# as the template object (b/c views like arr[...] should keep the same
# mask), then we make a copy of the template object mask and use
# that. This is also horribly broken but somewhat less so. Maybe.
if isinstance(obj, ndarray):
# XX: This looks like a bug -- shouldn't it check self.dtype
# instead?
if obj.dtype.names:
_mask = getattr(obj, '_mask',
make_mask_none(obj.shape, obj.dtype))
else:
_mask = getattr(obj, '_mask', nomask)
# If self and obj point to exactly the same data, then probably
# self is a simple view of obj (e.g., self = obj[...]), so they
# should share the same mask. (This isn't 100% reliable, e.g. self
# could be the first row of obj, or have strange strides, but as a
# heuristic it's not bad.) In all other cases, we make a copy of
# the mask, so that future modifications to 'self' do not end up
# side-effecting 'obj' as well.
if (obj.__array_interface__["data"][0]
!= self.__array_interface__["data"][0]):
_mask = _mask.copy()
else:
_mask = nomask
self._mask = _mask
# Finalize the mask ...........
if self._mask is not nomask:
try:
self._mask.shape = self.shape
except ValueError:
self._mask = nomask
except (TypeError, AttributeError):
# When _mask.shape is not writable (because it's a void)
pass
# Finalize the fill_value for structured arrays
if self.dtype.names:
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
return
def __array_wrap__(self, obj, context=None):
"""
Special hook for ufuncs.
Wraps the numpy array and sets the mask according to context.
"""
result = obj.view(type(self))
result._update_from(self)
#..........
if context is not None:
result._mask = result._mask.copy()
(func, args, _) = context
m = reduce(mask_or, [getmaskarray(arg) for arg in args])
# Get the domain mask................
domain = ufunc_domain.get(func, None)
if domain is not None:
# Take the domain, and make sure it's a ndarray
if len(args) > 2:
d = filled(reduce(domain, args), True)
else:
d = filled(domain(*args), True)
# Fill the result where the domain is wrong
try:
# Binary domain: take the last value
fill_value = ufunc_fills[func][-1]
except TypeError:
# Unary domain: just use this one
fill_value = ufunc_fills[func]
except KeyError:
# Domain not recognized, use fill_value instead
fill_value = self.fill_value
result = result.copy()
np.copyto(result, fill_value, where=d)
# Update the mask
if m is nomask:
if d is not nomask:
m = d
else:
# Don't modify inplace, we risk back-propagation
m = (m | d)
# Make sure the mask has the proper size
if result.shape == () and m:
return masked
else:
result._mask = m
result._sharedmask = False
#....
return result
def view(self, dtype=None, type=None, fill_value=None):
"""
Return a view of the MaskedArray data
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16.
The default, None, results in the view having the same data-type
as `a`. As with ``ndarray.view``, dtype can also be specified as
an ndarray sub-class, which then specifies the type of the
returned object (this is equivalent to setting the ``type``
parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
If `fill_value` is not specified, but `dtype` is specified (and is not
an ndarray sub-class), the `fill_value` of the MaskedArray will be
reset. If neither `fill_value` nor `dtype` are specified (or if
`dtype` is an ndarray sub-class), then the fill value is preserved.
Finally, if `fill_value` is specified, but `dtype` is not, the fill
value is set to the specified value.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
"""
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
except TypeError:
output = ndarray.view(self, dtype)
else:
output = ndarray.view(self, dtype, type)
# Should we update the mask ?
if (getattr(output, '_mask', nomask) is not nomask):
if dtype is None:
dtype = output.dtype
mdtype = make_mask_descr(dtype)
output._mask = self._mask.view(mdtype, ndarray)
# Try to reset the shape of the mask (if we don't have a void)
try:
output._mask.shape = output.shape
except (AttributeError, TypeError):
pass
# Make sure to reset the _fill_value if needed
if getattr(output, '_fill_value', None) is not None:
if fill_value is None:
if dtype is None:
pass # leave _fill_value as is
else:
output._fill_value = None
else:
output.fill_value = fill_value
return output
view.__doc__ = ndarray.view.__doc__
def astype(self, newtype):
"""
Returns a copy of the MaskedArray cast to given newtype.
Returns
-------
output : MaskedArray
A copy of self cast to input newtype.
The returned record shape matches self.shape.
Examples
--------
>>> x = np.ma.array([[1,2,3.1],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1.0 -- 3.1]
[-- 5.0 --]
[7.0 -- 9.0]]
>>> print x.astype(int32)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
"""
newtype = np.dtype(newtype)
output = self._data.astype(newtype).view(type(self))
output._update_from(self)
names = output.dtype.names
if names is None:
output._mask = self._mask.astype(bool)
else:
if self._mask is nomask:
output._mask = nomask
else:
output._mask = self._mask.astype([(n, bool) for n in names])
# Don't check _fill_value if it's None, that'll speed things up
if self._fill_value is not None:
output._fill_value = _check_fill_value(self._fill_value, newtype)
return output
def __getitem__(self, indx):
"""x.__getitem__(y) <==> x[y]
Return the item described by i, as a masked array.
"""
# This test is useful, but we should keep things light...
# if getmask(indx) is not nomask:
# msg = "Masked arrays must be filled before they can be used as indices!"
# raise IndexError(msg)
_data = ndarray.view(self, ndarray)
dout = ndarray.__getitem__(_data, indx)
# We could directly use ndarray.__getitem__ on self...
# But then we would have to modify __array_finalize__ to prevent the
# mask of being reshaped if it hasn't been set up properly yet...
# So it's easier to stick to the current version
_mask = self._mask
if not getattr(dout, 'ndim', False):
# A record ................
if isinstance(dout, np.void):
mask = _mask[indx]
# We should always re-cast to mvoid, otherwise users can
# change masks on rows that already have masked values, but not
# on rows that have no masked values, which is inconsistent.
dout = mvoid(dout, mask=mask, hardmask=self._hardmask)
# Just a scalar............
elif _mask is not nomask and _mask[indx]:
return masked
else:
# Force dout to MA ........
dout = dout.view(type(self))
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value ....
if isinstance(indx, basestring):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
dout._isfield = True
# Update the mask if needed
if _mask is not nomask:
dout._mask = _mask[indx]
dout._sharedmask = True
# Note: Don't try to check for m.any(), that'll take too long...
return dout
def __setitem__(self, indx, value):
"""x.__setitem__(i, y) <==> x[i]=y
Set item described by index. If value is masked, masks those
locations.
"""
if self is masked:
raise MaskError('Cannot alter the masked element.')
# This test is useful, but we should keep things light...
# if getmask(indx) is not nomask:
# msg = "Masked arrays must be filled before they can be used as indices!"
# raise IndexError(msg)
_data = ndarray.view(self, ndarray.__getattribute__(self, '_baseclass'))
_mask = ndarray.__getattribute__(self, '_mask')
if isinstance(indx, basestring):
ndarray.__setitem__(_data, indx, value)
if _mask is nomask:
self._mask = _mask = make_mask_none(self.shape, self.dtype)
_mask[indx] = getmask(value)
return
#........................................
_dtype = ndarray.__getattribute__(_data, 'dtype')
nbfields = len(_dtype.names or ())
#........................................
if value is masked:
# The mask wasn't set: create a full version...
if _mask is nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
# Now, set the mask to its value.
if nbfields:
_mask[indx] = tuple([True] * nbfields)
else:
_mask[indx] = True
if not self._isfield:
self._sharedmask = False
return
#........................................
# Get the _data part of the new value
dval = value
# Get the _mask part of the new value
mval = getattr(value, '_mask', nomask)
if nbfields and mval is nomask:
mval = tuple([False] * nbfields)
if _mask is nomask:
# Set the data, then the mask
ndarray.__setitem__(_data, indx, dval)
if mval is not nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
ndarray.__setitem__(_mask, indx, mval)
elif not self._hardmask:
# Unshare the mask if necessary to avoid propagation
if not self._isfield:
self.unshare_mask()
_mask = ndarray.__getattribute__(self, '_mask')
# Set the data, then the mask
ndarray.__setitem__(_data, indx, dval)
ndarray.__setitem__(_mask, indx, mval)
elif hasattr(indx, 'dtype') and (indx.dtype == MaskType):
indx = indx * umath.logical_not(_mask)
ndarray.__setitem__(_data, indx, dval)
else:
if nbfields:
err_msg = "Flexible 'hard' masks are not yet supported..."
raise NotImplementedError(err_msg)
mindx = mask_or(_mask[indx], mval, copy=True)
dindx = self._data[indx]
if dindx.size > 1:
np.copyto(dindx, dval, where=~mindx)
elif mindx is nomask:
dindx = dval
ndarray.__setitem__(_data, indx, dindx)
_mask[indx] = mindx
return
def __getslice__(self, i, j):
"""x.__getslice__(i, j) <==> x[i:j]
Return the slice described by (i, j). The use of negative
indices is not supported.
"""
return self.__getitem__(slice(i, j))
def __setslice__(self, i, j, value):
"""x.__setslice__(i, j, value) <==> x[i:j]=value
Set the slice (i,j) of a to value. If value is masked, mask
those locations.
"""
self.__setitem__(slice(i, j), value)
def __setmask__(self, mask, copy=False):
"""Set the mask.
"""
idtype = ndarray.__getattribute__(self, 'dtype')
current_mask = ndarray.__getattribute__(self, '_mask')
if mask is masked:
mask = True
# Make sure the mask is set
if (current_mask is nomask):
# Just don't do anything is there's nothing to do...
if mask is nomask:
return
current_mask = self._mask = make_mask_none(self.shape, idtype)
# No named fields.........
if idtype.names is None:
# Hardmask: don't unmask the data
if self._hardmask:
current_mask |= mask
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method...
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# ...otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
# Named fields w/ ............
else:
mdtype = current_mask.dtype
mask = np.array(mask, copy=False)
# Mask is a singleton
if not mask.ndim:
# It's a boolean : make a record
if mask.dtype.kind == 'b':
mask = np.array(tuple([mask.item()]*len(mdtype)),
dtype=mdtype)
# It's a record: make sure the dtype is correct
else:
mask = mask.astype(mdtype)
# Mask is a sequence
else:
# Make sure the new mask is a ndarray with the proper dtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Hardmask: don't unmask the data
if self._hardmask:
for n in idtype.names:
current_mask[n] |= mask[n]
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method...
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# ...otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
# Reshape if needed
if current_mask.shape:
current_mask.shape = self.shape
return
_set_mask = __setmask__
#....
def _get_mask(self):
"""Return the current mask.
"""
# We could try to force a reshape, but that wouldn't work in some cases.
# return self._mask.reshape(self.shape)
return self._mask
mask = property(fget=_get_mask, fset=__setmask__, doc="Mask")
def _get_recordmask(self):
"""
Return the mask of the records.
A record is masked when all the fields are masked.
"""
_mask = ndarray.__getattribute__(self, '_mask').view(ndarray)
if _mask.dtype.names is None:
return _mask
return np.all(flatten_structured_array(_mask), axis= -1)
def _set_recordmask(self):
"""Return the mask of the records.
A record is masked when all the fields are masked.
"""
raise NotImplementedError("Coming soon: setting the mask per records!")
recordmask = property(fget=_get_recordmask)
#............................................
def harden_mask(self):
"""
Force the mask to hard.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `harden_mask` sets `hardmask` to True.
See Also
--------
hardmask
"""
self._hardmask = True
return self
def soften_mask(self):
"""
Force the mask to soft.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `soften_mask` sets `hardmask` to False.
See Also
--------
hardmask
"""
self._hardmask = False
return self
hardmask = property(fget=lambda self: self._hardmask,
doc="Hardness of the mask")
def unshare_mask(self):
"""
Copy the mask and set the sharedmask flag to False.
Whether the mask is shared between masked arrays can be seen from
the `sharedmask` property. `unshare_mask` ensures the mask is not shared.
A copy of the mask is only made if it was shared.
See Also
--------
sharedmask
"""
if self._sharedmask:
self._mask = self._mask.copy()
self._sharedmask = False
return self
sharedmask = property(fget=lambda self: self._sharedmask,
doc="Share status of the mask (read-only).")
def shrink_mask(self):
"""
Reduce a mask to nomask when possible.
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4)
>>> x.mask
array([[False, False],
[False, False]], dtype=bool)
>>> x.shrink_mask()
>>> x.mask
False
"""
m = self._mask
if m.ndim and not m.any():
self._mask = nomask
return self
#............................................
baseclass = property(fget=lambda self:self._baseclass,
doc="Class of the underlying data (read-only).")
def _get_data(self):
"""Return the current data, as a view of the original
underlying data.
"""
return ndarray.view(self, self._baseclass)
_data = property(fget=_get_data)
data = property(fget=_get_data)
def _get_flat(self):
"Return a flat iterator."
return MaskedIterator(self)
#
def _set_flat (self, value):
"Set a flattened version of self to value."
y = self.ravel()
y[:] = value
#
flat = property(fget=_get_flat, fset=_set_flat,
doc="Flat version of the array.")
def get_fill_value(self):
"""
Return the filling value of the masked array.
Returns
-------
fill_value : scalar
The filling value.
Examples
--------
>>> for dt in [np.int32, np.int64, np.float64, np.complex128]:
... np.ma.array([0, 1], dtype=dt).get_fill_value()
...
999999
999999
1e+20
(1e+20+0j)
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.get_fill_value()
-inf
"""
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
return self._fill_value[()]
def set_fill_value(self, value=None):
"""
Set the filling value of the masked array.
Parameters
----------
value : scalar, optional
The new filling value. Default is None, in which case a default
based on the data type is used.
See Also
--------
ma.set_fill_value : Equivalent function.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.fill_value
-inf
>>> x.set_fill_value(np.pi)
>>> x.fill_value
3.1415926535897931
Reset to default:
>>> x.set_fill_value()
>>> x.fill_value
1e+20
"""
target = _check_fill_value(value, self.dtype)
_fill_value = self._fill_value
if _fill_value is None:
# Create the attribute if it was undefined
self._fill_value = target
else:
# Don't overwrite the attribute, just fill it (for propagation)
_fill_value[()] = target
fill_value = property(fget=get_fill_value, fset=set_fill_value,
doc="Filling value.")
def filled(self, fill_value=None):
"""
Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute of the array is used instead.
Returns
-------
filled_array : ndarray
A copy of ``self`` with invalid entries replaced by *fill_value*
(be it the function argument or the attribute of ``self``.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([1, 2, -999, 4, -999])
>>> type(x.filled())
<type 'numpy.ndarray'>
Subclassing is preserved. This means that if the data part of the masked
array is a matrix, `filled` returns a matrix:
>>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.filled()
matrix([[ 1, 999999],
[999999, 4]])
"""
m = self._mask
if m is nomask:
return self._data
#
if fill_value is None:
fill_value = self.fill_value
else:
fill_value = _check_fill_value(fill_value, self.dtype)
#
if self is masked_singleton:
return np.asanyarray(fill_value)
#
if m.dtype.names:
result = self._data.copy('K')
_recursive_filled(result, self._mask, fill_value)
elif not m.any():
return self._data
else:
result = self._data.copy('K')
try:
np.copyto(result, fill_value, where=m)
except (TypeError, AttributeError):
fill_value = narray(fill_value, dtype=object)
d = result.astype(object)
result = np.choose(m, (d, fill_value))
except IndexError:
#ok, if scalar
if self._data.shape:
raise
elif m:
result = np.array(fill_value, dtype=self.dtype)
else:
result = self._data
return result
def compressed(self):
"""
Return all the non-masked data as a 1-D array.
Returns
-------
data : ndarray
A new `ndarray` holding the non-masked data is returned.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3)
>>> x.compressed()
array([0, 1])
>>> type(x.compressed())
<type 'numpy.ndarray'>
"""
data = ndarray.ravel(self._data)
if self._mask is not nomask:
data = data.compress(np.logical_not(ndarray.ravel(self._mask)))
return data
def compress(self, condition, axis=None, out=None):
"""
Return `a` where condition is ``True``.
If condition is a `MaskedArray`, missing values are considered
as ``False``.
Parameters
----------
condition : var
Boolean 1-d array selecting which entries to return. If len(condition)
is less than the size of a along the axis, then output is truncated
to length of condition array.
axis : {None, int}, optional
Axis along which the operation must be performed.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
result : MaskedArray
A :class:`MaskedArray` object.
Notes
-----
Please note the difference with :meth:`compressed` !
The output of :meth:`compress` has a mask, the output of
:meth:`compressed` does not.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.compress([1, 0, 1])
masked_array(data = [1 3],
mask = [False False],
fill_value=999999)
>>> x.compress([1, 0, 1], axis=1)
masked_array(data =
[[1 3]
[-- --]
[7 9]],
mask =
[[False False]
[ True True]
[False False]],
fill_value=999999)
"""
# Get the basic components
(_data, _mask) = (self._data, self._mask)
# Force the condition to a regular ndarray (forget the missing values...)
condition = np.array(condition, copy=False, subok=False)
#
_new = _data.compress(condition, axis=axis, out=out).view(type(self))
_new._update_from(self)
if _mask is not nomask:
_new._mask = _mask.compress(condition, axis=axis)
return _new
#............................................
def __str__(self):
"""String representation.
"""
if masked_print_option.enabled():
f = masked_print_option
if self is masked:
return str(f)
m = self._mask
if m is nomask:
res = self._data
else:
if m.shape == ():
if m.dtype.names:
m = m.view((bool, len(m.dtype)))
if m.any():
return str(tuple((f if _m else _d) for _d, _m in
zip(self._data.tolist(), m)))
else:
return str(self._data)
elif m:
return str(f)
else:
return str(self._data)
# convert to object array to make filled work
names = self.dtype.names
if names is None:
res = self._data.astype("O")
res.view(ndarray)[m] = f
else:
rdtype = _recursive_make_descr(self.dtype, "O")
res = self._data.astype(rdtype)
_recursive_printoption(res, m, f)
else:
res = self.filled(self.fill_value)
return str(res)
def __repr__(self):
"""Literal string representation.
"""
n = len(self.shape)
if self._baseclass is np.ndarray:
name = 'array'
else:
name = self._baseclass.__name__
parameters = dict(name=name, nlen=" " * len(name),
data=str(self), mask=str(self._mask),
fill=str(self.fill_value), dtype=str(self.dtype))
if self.dtype.names:
if n <= 1:
return _print_templates['short_flx'] % parameters
return _print_templates['long_flx'] % parameters
elif n <= 1:
return _print_templates['short_std'] % parameters
return _print_templates['long_std'] % parameters
def __eq__(self, other):
"Check whether other equals self elementwise"
if self is masked:
return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = ndarray.__eq__(self.filled(0), other)
try:
check = check.view(type(self))
check._mask = self._mask
except AttributeError:
# Dang, we have a bool instead of an array: return the bool
return check
else:
odata = filled(other, 0)
check = ndarray.__eq__(self.filled(0), odata).view(type(self))
if self._mask is nomask:
check._mask = omask
else:
mask = mask_or(self._mask, omask)
if mask.dtype.names:
if mask.size > 1:
axis = 1
else:
axis = None
try:
mask = mask.view((bool_, len(self.dtype))).all(axis)
except ValueError:
mask = np.all([[f[n].all() for n in mask.dtype.names]
for f in mask], axis=axis)
check._mask = mask
return check
#
def __ne__(self, other):
"Check whether other doesn't equal self elementwise"
if self is masked:
return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = ndarray.__ne__(self.filled(0), other)
try:
check = check.view(type(self))
check._mask = self._mask
except AttributeError:
# In case check is a boolean (or a numpy.bool)
return check
else:
odata = filled(other, 0)
check = ndarray.__ne__(self.filled(0), odata).view(type(self))
if self._mask is nomask:
check._mask = omask
else:
mask = mask_or(self._mask, omask)
if mask.dtype.names:
if mask.size > 1:
axis = 1
else:
axis = None
try:
mask = mask.view((bool_, len(self.dtype))).all(axis)
except ValueError:
mask = np.all([[f[n].all() for n in mask.dtype.names]
for f in mask], axis=axis)
check._mask = mask
return check
#
def __add__(self, other):
"Add other to self, and return a new masked array."
return add(self, other)
#
def __radd__(self, other):
"Add other to self, and return a new masked array."
return add(self, other)
#
def __sub__(self, other):
"Subtract other to self, and return a new masked array."
return subtract(self, other)
#
def __rsub__(self, other):
"Subtract other to self, and return a new masked array."
return subtract(other, self)
#
def __mul__(self, other):
"Multiply other by self, and return a new masked array."
return multiply(self, other)
#
def __rmul__(self, other):
"Multiply other by self, and return a new masked array."
return multiply(self, other)
#
def __div__(self, other):
"Divide other into self, and return a new masked array."
return divide(self, other)
#
def __truediv__(self, other):
"Divide other into self, and return a new masked array."
return true_divide(self, other)
#
def __rtruediv__(self, other):
"Divide other into self, and return a new masked array."
return true_divide(other, self)
#
def __floordiv__(self, other):
"Divide other into self, and return a new masked array."
return floor_divide(self, other)
#
def __rfloordiv__(self, other):
"Divide other into self, and return a new masked array."
return floor_divide(other, self)
#
def __pow__(self, other):
"Raise self to the power other, masking the potential NaNs/Infs"
return power(self, other)
#
def __rpow__(self, other):
"Raise self to the power other, masking the potential NaNs/Infs"
return power(other, self)
#............................................
def __iadd__(self, other):
"Add other to self in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
else:
if m is not nomask:
self._mask += m
ndarray.__iadd__(self._data, np.where(self._mask, 0, getdata(other)))
return self
#....
def __isub__(self, other):
"Subtract other from self in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
ndarray.__isub__(self._data, np.where(self._mask, 0, getdata(other)))
return self
#....
def __imul__(self, other):
"Multiply self by other in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
ndarray.__imul__(self._data, np.where(self._mask, 1, getdata(other)))
return self
#....
def __idiv__(self, other):
"Divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__idiv__(self._data, np.where(self._mask, 1, other_data))
return self
#....
def __ifloordiv__(self, other):
"Floor divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.floor_divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__ifloordiv__(self._data, np.where(self._mask, 1, other_data))
return self
#....
def __itruediv__(self, other):
"True divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.true_divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__itruediv__(self._data, np.where(self._mask, 1, other_data))
return self
#...
def __ipow__(self, other):
"Raise self to the power other, in place."
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate(divide='ignore', invalid='ignore'):
ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
#............................................
def __float__(self):
"Convert to float."
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
warnings.warn("Warning: converting a masked element to nan.")
return np.nan
return float(self.item())
def __int__(self):
"Convert to int."
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
raise MaskError('Cannot convert masked element to a Python int.')
return int(self.item())
def get_imag(self):
"""
Return the imaginary part of the masked array.
The returned array is a view on the imaginary part of the `MaskedArray`
whose `get_imag` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The imaginary part of the masked array.
See Also
--------
get_real, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_imag()
masked_array(data = [1.0 -- 1.6],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.imag.view(type(self))
result.__setmask__(self._mask)
return result
imag = property(fget=get_imag, doc="Imaginary part.")
def get_real(self):
"""
Return the real part of the masked array.
The returned array is a view on the real part of the `MaskedArray`
whose `get_real` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The real part of the masked array.
See Also
--------
get_imag, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_real()
masked_array(data = [1.0 -- 3.45],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.real.view(type(self))
result.__setmask__(self._mask)
return result
real = property(fget=get_real, doc="Real part")
#............................................
def count(self, axis=None):
"""
Count the non-masked elements of the array along the given axis.
Parameters
----------
axis : int, optional
Axis along which to count the non-masked elements. If `axis` is
`None`, all non-masked elements are counted.
Returns
-------
result : int or ndarray
If `axis` is `None`, an integer count is returned. When `axis` is
not `None`, an array with shape determined by the lengths of the
remaining axes, is returned.
See Also
--------
count_masked : Count masked elements in array or along a given axis.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(6).reshape((2, 3))
>>> a[1, :] = ma.masked
>>> a
masked_array(data =
[[0 1 2]
[-- -- --]],
mask =
[[False False False]
[ True True True]],
fill_value = 999999)
>>> a.count()
3
When the `axis` keyword is specified an array of appropriate size is
returned.
>>> a.count(axis=0)
array([1, 1, 1])
>>> a.count(axis=1)
array([3, 0])
"""
m = self._mask
s = self.shape
if m is nomask:
if axis is None:
return self.size
else:
n = s[axis]
t = list(s)
del t[axis]
return np.full(t, n, dtype=np.intp)
n1 = np.size(m, axis)
n2 = np.sum(m, axis=axis, dtype=np.intp)
if axis is None:
return (n1 - n2)
else:
return narray(n1 - n2)
#............................................
flatten = _arraymethod('flatten')
#
def ravel(self):
"""
Returns a 1D version of self, as a view.
Returns
-------
MaskedArray
Output view is of shape ``(self.size,)`` (or
``(np.ma.product(self.shape),)``).
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.ravel()
[1 -- 3 -- 5 -- 7 -- 9]
"""
r = ndarray.ravel(self._data).view(type(self))
r._update_from(self)
if self._mask is not nomask:
r._mask = ndarray.ravel(self._mask).reshape(r.shape)
else:
r._mask = nomask
return r
#
repeat = _arraymethod('repeat')
#
def reshape (self, *s, **kwargs):
"""
Give a new shape to the array without changing its data.
Returns a masked array containing the same data, but with a new shape.
The result is a view on the original array; if this is not possible, a
ValueError is raised.
Parameters
----------
shape : int or tuple of ints
The new shape should be compatible with the original shape. If an
integer is supplied, then the result will be a 1-D array of that
length.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) or FORTRAN (column-major) order.
Returns
-------
reshaped_array : array
A new view on the array.
See Also
--------
reshape : Equivalent function in the masked array module.
numpy.ndarray.reshape : Equivalent method on ndarray object.
numpy.reshape : Equivalent function in the NumPy module.
Notes
-----
The reshaping operation cannot guarantee that a copy will not be made,
to modify the shape in place, use ``a.shape = s``
Examples
--------
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
>>> print x
[[-- 2]
[3 --]]
>>> x = x.reshape((4,1))
>>> print x
[[--]
[2]
[3]
[--]]
"""
kwargs.update(order=kwargs.get('order', 'C'))
result = self._data.reshape(*s, **kwargs).view(type(self))
result._update_from(self)
mask = self._mask
if mask is not nomask:
result._mask = mask.reshape(*s, **kwargs)
return result
#
def resize(self, newshape, refcheck=True, order=False):
"""
.. warning::
This method does nothing, except raise a ValueError exception. A
masked array does not own its data and therefore cannot safely be
resized in place. Use the `numpy.ma.resize` function instead.
This method is difficult to implement safely and may be deprecated in
future releases of NumPy.
"""
# Note : the 'order' keyword looks broken, let's just drop it
# try:
# ndarray.resize(self, newshape, refcheck=refcheck)
# if self.mask is not nomask:
# self._mask.resize(newshape, refcheck=refcheck)
# except ValueError:
# raise ValueError("Cannot resize an array that has been referenced "
# "or is referencing another array in this way.\n"
# "Use the numpy.ma.resize function.")
# return None
errmsg = "A masked array does not own its data "\
"and therefore cannot be resized.\n" \
"Use the numpy.ma.resize function instead."
raise ValueError(errmsg)
#
def put(self, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
Sets self._data.flat[n] = values[n] for each n in indices.
If `values` is shorter than `indices` then it will repeat.
If `values` has some masked values, the initial mask is updated
in consequence, else the corresponding values are unmasked.
Parameters
----------
indices : 1-D array_like
Target indices, interpreted as integers.
values : array_like
Values to place in self._data copy at target indices.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
'raise' : raise an error.
'wrap' : wrap around.
'clip' : clip to the range.
Notes
-----
`values` can be a scalar or length 1 array.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.put([0,4,8],[10,20,30])
>>> print x
[[10 -- 3]
[-- 20 --]
[7 -- 30]]
>>> x.put(4,999)
>>> print x
[[10 -- 3]
[-- 999 --]
[7 -- 30]]
"""
m = self._mask
# Hard mask: Get rid of the values/indices that fall on masked data
if self._hardmask and self._mask is not nomask:
mask = self._mask[indices]
indices = narray(indices, copy=False)
values = narray(values, copy=False, subok=True)
values.resize(indices.shape)
indices = indices[~mask]
values = values[~mask]
#....
self._data.put(indices, values, mode=mode)
#....
if m is nomask:
m = getmask(values)
else:
m = m.copy()
if getmask(values) is nomask:
m.put(indices, False, mode=mode)
else:
m.put(indices, values._mask, mode=mode)
m = make_mask(m, copy=False, shrink=True)
self._mask = m
#............................................
def ids (self):
"""
Return the addresses of the data and mask areas.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1])
>>> x.ids()
(166670640, 166659832)
If the array has no mask, the address of `nomask` is returned. This address
is typically not close to the data in memory:
>>> x = np.ma.array([1, 2, 3])
>>> x.ids()
(166691080, 3083169284L)
"""
if self._mask is nomask:
return (self.ctypes.data, id(nomask))
return (self.ctypes.data, self._mask.ctypes.data)
def iscontiguous(self):
"""
Return a boolean indicating whether the data is contiguous.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3])
>>> x.iscontiguous()
True
`iscontiguous` returns one of the flags of the masked array:
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : True
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
return self.flags['CONTIGUOUS']
#............................................
def all(self, axis=None, out=None):
"""
Check if all of the elements of `a` are true.
Performs a :func:`logical_and` over the given axis and returns the result.
Masked values are considered as True during computation.
For convenience, the output array is masked where ALL the values along the
current axis are masked: if the output would have been a scalar and that
all the values are masked, then the output is `masked`.
Parameters
----------
axis : {None, integer}
Axis to perform the operation over.
If None, perform over flattened array.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
See Also
--------
all : equivalent function
Examples
--------
>>> np.ma.array([1,2,3]).all()
True
>>> a = np.ma.array([1,2,3], mask=True)
>>> (a.all() is np.ma.masked)
True
"""
mask = _check_mask_axis(self._mask, axis)
if out is None:
d = self.filled(True).all(axis=axis).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
return masked
return d
self.filled(True).all(axis=axis, out=out)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def any(self, axis=None, out=None):
"""
Check if any of the elements of `a` are true.
Performs a logical_or over the given axis and returns the result.
Masked values are considered as False during computation.
Parameters
----------
axis : {None, integer}
Axis to perform the operation over.
If None, perform over flattened array and return a scalar.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
See Also
--------
any : equivalent function
"""
mask = _check_mask_axis(self._mask, axis)
if out is None:
d = self.filled(False).any(axis=axis).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
d = masked
return d
self.filled(False).any(axis=axis, out=out)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def nonzero(self):
"""
Return the indices of unmasked elements that are not zero.
Returns a tuple of arrays, one for each dimension, containing the
indices of the non-zero elements in that dimension. The corresponding
non-zero values can be obtained with::
a[a.nonzero()]
To group the indices by element, rather than dimension, use
instead::
np.transpose(a.nonzero())
The result of this is always a 2d array, with a row for each non-zero
element.
Parameters
----------
None
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
numpy.nonzero :
Function operating on ndarrays.
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array(np.eye(3))
>>> x
masked_array(data =
[[ 1. 0. 0.]
[ 0. 1. 0.]
[ 0. 0. 1.]],
mask =
False,
fill_value=1e+20)
>>> x.nonzero()
(array([0, 1, 2]), array([0, 1, 2]))
Masked elements are ignored.
>>> x[1, 1] = ma.masked
>>> x
masked_array(data =
[[1.0 0.0 0.0]
[0.0 -- 0.0]
[0.0 0.0 1.0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=1e+20)
>>> x.nonzero()
(array([0, 2]), array([0, 2]))
Indices can also be grouped by element.
>>> np.transpose(x.nonzero())
array([[0, 0],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, ma.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
masked_array(data =
[[False False False]
[ True True True]
[ True True True]],
mask =
False,
fill_value=999999)
>>> ma.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the condition array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return narray(self.filled(0), copy=False).nonzero()
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
(this docstring should be overwritten)
"""
#!!!: implement out + test!
m = self._mask
if m is nomask:
result = super(MaskedArray, self).trace(offset=offset, axis1=axis1,
axis2=axis2, out=out)
return result.astype(dtype)
else:
D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return D.astype(dtype).filled(0).sum(axis=None, out=out)
trace.__doc__ = ndarray.trace.__doc__
def sum(self, axis=None, dtype=None, out=None):
"""
Return the sum of the array elements over the given axis.
Masked elements are set to 0 internally.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the sum is computed. The default
(`axis` = None) is to compute over the flattened array.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and
the type of a is an integer type of precision less than the default
platform integer, then the default platform integer precision is
used. Otherwise, the dtype is the same as that of a.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
sum_along_axis : MaskedArray or scalar
An array with the same shape as self, with the specified
axis removed. If self is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.sum()
25
>>> print x.sum(axis=1)
[4 5 16]
>>> print x.sum(axis=0)
[8 5 12]
>>> print type(x.sum(axis=0, dtype=np.int64)[0])
<type 'numpy.int64'>
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
# No explicit output
if out is None:
result = self.filled(0).sum(axis, dtype=dtype)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(0).sum(axis, dtype=dtype, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
def cumsum(self, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along the given axis.
The cumulative sum is calculated over the flattened array by
default, otherwise over the specified axis.
Masked values are set to 0 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the sum is computed. The default (`axis` = None) is to
compute over the flattened array. `axis` may be negative, in which case
it counts from the last to the first axis.
dtype : {None, dtype}, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
cumsum : ndarray.
A new array holding the result is returned unless ``out`` is
specified, in which case a reference to ``out`` is returned.
Notes
-----
The mask is lost if `out` is not a valid :class:`MaskedArray` !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
>>> print marr.cumsum()
[0 1 3 -- -- -- 9 16 24 33]
"""
result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self.mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def prod(self, axis=None, dtype=None, out=None):
"""
Return the product of the array elements over the given axis.
Masked elements are set to 1 internally for computation.
Parameters
----------
axis : {None, int}, optional
Axis over which the product is taken. If None is used, then the
product is over all the array elements.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are multiplied. If ``dtype`` has the value ``None``
and the type of a is an integer type of precision less than the default
platform integer, then the default platform integer precision is
used. Otherwise, the dtype is the same as that of a.
out : {None, array}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
product_along_axis : {array, scalar}, see dtype parameter above.
Returns an array whose shape is the same as a with the specified
axis removed. Returns a 0d array when a is 1d or axis=None.
Returns a reference to the specified output array if specified.
See Also
--------
prod : equivalent function
Notes
-----
Arithmetic is modular when using integer types, and no error is raised
on overflow.
Examples
--------
>>> np.prod([1.,2.])
2.0
>>> np.prod([1.,2.], dtype=np.int32)
2
>>> np.prod([[1.,2.],[3.,4.]])
24.0
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
# No explicit output
if out is None:
result = self.filled(1).prod(axis, dtype=dtype)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(1).prod(axis, dtype=dtype, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
product = prod
def cumprod(self, axis=None, dtype=None, out=None):
"""
Return the cumulative product of the elements along the given axis.
The cumulative product is taken over the flattened array by
default, otherwise over the specified axis.
Masked values are set to 1 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the product is computed. The default
(`axis` = None) is to compute over the flattened array.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are multiplied. If ``dtype`` has the value ``None``
and the type of ``a`` is an integer type of precision less than the
default platform integer, then the default platform integer precision
is used. Otherwise, the dtype is the same as that of ``a``.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
Notes
-----
The mask is lost if `out` is not a valid MaskedArray !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
"""
result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def mean(self, axis=None, dtype=None, out=None):
"""
Returns the average of the array elements.
Masked entries are ignored.
The average is taken over the flattened array by default, otherwise over
the specified axis. Refer to `numpy.mean` for the full documentation.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : dtype, optional
Type to use in computing the mean. For integer inputs, the default
is float64; for floating point, inputs it is the same as the input
dtype.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
mean : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
numpy.ma.mean : Equivalent function.
numpy.mean : Equivalent function on non-masked arrays.
numpy.ma.average: Weighted average.
Examples
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data = [1 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.mean()
1.5
"""
if self._mask is nomask:
result = super(MaskedArray, self).mean(axis=axis, dtype=dtype)
else:
dsum = self.sum(axis=axis, dtype=dtype)
cnt = self.count(axis=axis)
if cnt.shape == () and (cnt == 0):
result = masked
else:
result = dsum * 1. / cnt
if out is not None:
out.flat = result
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getattr(result, '_mask', nomask)
return out
return result
def anom(self, axis=None, dtype=None):
"""
Compute the anomalies (deviations from the arithmetic mean)
along the given axis.
Returns an array of anomalies, with the same shape as the input and
where the arithmetic mean is computed along the given axis.
Parameters
----------
axis : int, optional
Axis over which the anomalies are taken.
The default is to use the mean of the flattened array as reference.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
See Also
--------
mean : Compute the mean of the array.
Examples
--------
>>> a = np.ma.array([1,2,3])
>>> a.anom()
masked_array(data = [-1. 0. 1.],
mask = False,
fill_value = 1e+20)
"""
m = self.mean(axis, dtype)
if not axis:
return (self - m)
else:
return (self - expand_dims(m, axis))
def var(self, axis=None, dtype=None, out=None, ddof=0):
""
# Easy case: nomask, business as usual
if self._mask is nomask:
return self._data.var(axis=axis, dtype=dtype, out=out, ddof=ddof)
# Some data are masked, yay!
cnt = self.count(axis=axis) - ddof
danom = self.anom(axis=axis, dtype=dtype)
if iscomplexobj(self):
danom = umath.absolute(danom) ** 2
else:
danom *= danom
dvar = divide(danom.sum(axis), cnt).view(type(self))
# Apply the mask if it's not a scalar
if dvar.ndim:
dvar._mask = mask_or(self._mask.all(axis), (cnt <= 0))
dvar._update_from(self)
elif getattr(dvar, '_mask', False):
# Make sure that masked is returned when the scalar is masked.
dvar = masked
if out is not None:
if isinstance(out, MaskedArray):
out.flat = 0
out.__setmask__(True)
elif out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or "\
"more location."
raise MaskError(errmsg)
else:
out.flat = np.nan
return out
# In case with have an explicit output
if out is not None:
# Set the data
out.flat = dvar
# Set the mask if needed
if isinstance(out, MaskedArray):
out.__setmask__(dvar.mask)
return out
return dvar
var.__doc__ = np.var.__doc__
def std(self, axis=None, dtype=None, out=None, ddof=0):
""
dvar = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof)
if dvar is not masked:
if out is not None:
np.power(out, 0.5, out=out, casting='unsafe')
return out
dvar = sqrt(dvar)
return dvar
std.__doc__ = np.std.__doc__
#............................................
def round(self, decimals=0, out=None):
"""
Return an array rounded a to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
result._mask = self._mask
result._update_from(self)
# No explicit output: we're done
if out is None:
return result
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
round.__doc__ = ndarray.round.__doc__
#............................................
def argsort(self, axis=None, kind='quicksort', order=None, fill_value=None):
"""
Return an ndarray of indices that sort the array along the
specified axis. Masked values are filled beforehand to
`fill_value`.
Parameters
----------
axis : int, optional
Axis along which to sort. The default is -1 (last axis).
If None, the flattened array is used.
fill_value : var, optional
Value used to fill the array before sorting.
The default is the `fill_value` attribute of the input array.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.ma.array([3,2,1], mask=[False, False, True])
>>> a
masked_array(data = [3 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.argsort()
array([1, 0, 2])
"""
if fill_value is None:
fill_value = default_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argsort(axis=axis, kind=kind, order=order)
def argmin(self, axis=None, fill_value=None, out=None):
"""
Return array of indices to the minimum values along the given axis.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
minimum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
{ndarray, scalar}
If multi-dimension input, returns a new ndarray of indices to the
minimum values along the given axis. Otherwise, returns a scalar
of index to the minimum values along the given axis.
Examples
--------
>>> x = np.ma.array(arange(4), mask=[1,1,0,0])
>>> x.shape = (2,2)
>>> print x
[[-- --]
[2 3]]
>>> print x.argmin(axis=0, fill_value=-1)
[0 0]
>>> print x.argmin(axis=0, fill_value=9)
[1 1]
"""
if fill_value is None:
fill_value = minimum_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argmin(axis, out=out)
def argmax(self, axis=None, fill_value=None, out=None):
"""
Returns array of indices of the maximum values along the given axis.
Masked values are treated as if they had the value fill_value.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
maximum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
index_array : {integer_array}
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a.argmax()
5
>>> a.argmax(0)
array([1, 1, 1])
>>> a.argmax(1)
array([2, 2])
"""
if fill_value is None:
fill_value = maximum_fill_value(self._data)
d = self.filled(fill_value).view(ndarray)
return d.argmax(axis, out=out)
def sort(self, axis= -1, kind='quicksort', order=None,
endwith=True, fill_value=None):
"""
Sort the array, in-place
Parameters
----------
a : array_like
Array to be sorted.
axis : int, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
endwith : {True, False}, optional
Whether missing values (if any) should be forced in the upper indices
(at the end of the array) (True) or lower indices (at the beginning).
When the array contains unmasked values of the largest (or smallest if
False) representable value of the datatype the ordering of these values
and the masked values is undefined. To enforce the masked values are
at the end (beginning) in this case one must sort the mask.
fill_value : {var}, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Default
>>> a.sort()
>>> print a
[1 3 5 -- --]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Put missing values in the front
>>> a.sort(endwith=False)
>>> print a
[-- -- 1 3 5]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # fill_value takes over endwith
>>> a.sort(endwith=False, fill_value=3)
>>> print a
[1 -- -- 3 5]
"""
if self._mask is nomask:
ndarray.sort(self, axis=axis, kind=kind, order=order)
else:
if self is masked:
return self
if fill_value is None:
if endwith:
# nan > inf
if np.issubdtype(self.dtype, np.floating):
filler = np.nan
else:
filler = minimum_fill_value(self)
else:
filler = maximum_fill_value(self)
else:
filler = fill_value
sidx = self.filled(filler).argsort(axis=axis, kind=kind,
order=order)
# save meshgrid memory for 1d arrays
if self.ndim == 1:
idx = sidx
else:
idx = np.meshgrid(*[np.arange(x) for x in self.shape], sparse=True,
indexing='ij')
idx[axis] = sidx
tmp_mask = self._mask[idx].flat
tmp_data = self._data[idx].flat
self._data.flat = tmp_data
self._mask.flat = tmp_mask
return
#............................................
def min(self, axis=None, out=None, fill_value=None):
"""
Return the minimum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must be of
the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of `minimum_fill_value`.
Returns
-------
amin : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
minimum_fill_value
Returns the minimum filling value for a given datatype.
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
if fill_value is None:
fill_value = minimum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).min(axis=axis, out=out).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).min(axis=axis, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
def mini(self, axis=None):
"""
Return the array minimum along the specified axis.
Parameters
----------
axis : int, optional
The axis along which to find the minima. Default is None, in which case
the minimum value in the whole array is returned.
Returns
-------
min : scalar or MaskedArray
If `axis` is None, the result is a scalar. Otherwise, if `axis` is
given and the array is at least 2-D, the result is a masked array with
dimension one smaller than the array on which `mini` is called.
Examples
--------
>>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2)
>>> print x
[[0 --]
[2 3]
[4 --]]
>>> x.mini()
0
>>> x.mini(axis=0)
masked_array(data = [0 3],
mask = [False False],
fill_value = 999999)
>>> print x.mini(axis=1)
[0 2 4]
"""
if axis is None:
return minimum(self)
else:
return minimum.reduce(self, axis)
#........................
def max(self, axis=None, out=None, fill_value=None):
"""
Return the maximum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of maximum_fill_value().
Returns
-------
amax : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
maximum_fill_value
Returns the maximum filling value for a given datatype.
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
if fill_value is None:
fill_value = maximum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).max(axis=axis, out=out).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).max(axis=axis, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
def ptp(self, axis=None, out=None, fill_value=None):
"""
Return (maximum - minimum) along the the given dimension
(i.e. peak-to-peak value).
Parameters
----------
axis : {None, int}, optional
Axis along which to find the peaks. If None (default) the
flattened array is used.
out : {None, array_like}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
fill_value : {var}, optional
Value used to fill in the masked values.
Returns
-------
ptp : ndarray.
A new array holding the result, unless ``out`` was
specified, in which case a reference to ``out`` is returned.
"""
if out is None:
result = self.max(axis=axis, fill_value=fill_value)
result -= self.min(axis=axis, fill_value=fill_value)
return result
out.flat = self.max(axis=axis, out=out, fill_value=fill_value)
min_value = self.min(axis=axis, fill_value=fill_value)
np.subtract(out, min_value, out=out, casting='unsafe')
return out
def take(self, indices, axis=None, out=None, mode='raise'):
"""
"""
(_data, _mask) = (self._data, self._mask)
cls = type(self)
# Make sure the indices are not masked
maskindices = getattr(indices, '_mask', nomask)
if maskindices is not nomask:
indices = indices.filled(0)
# Get the data
if out is None:
out = _data.take(indices, axis=axis, mode=mode).view(cls)
else:
np.take(_data, indices, axis=axis, mode=mode, out=out)
# Get the mask
if isinstance(out, MaskedArray):
if _mask is nomask:
outmask = maskindices
else:
outmask = _mask.take(indices, axis=axis, mode=mode)
outmask |= maskindices
out.__setmask__(outmask)
return out
# Array methods ---------------------------------------
copy = _arraymethod('copy')
diagonal = _arraymethod('diagonal')
transpose = _arraymethod('transpose')
T = property(fget=lambda self:self.transpose())
swapaxes = _arraymethod('swapaxes')
clip = _arraymethod('clip', onmask=False)
copy = _arraymethod('copy')
squeeze = _arraymethod('squeeze')
#--------------------------------------------
def tolist(self, fill_value=None):
"""
Return the data portion of the masked array as a hierarchical Python list.
Data items are converted to the nearest compatible Python type.
Masked values are converted to `fill_value`. If `fill_value` is None,
the corresponding entries in the output list will be ``None``.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries. Default is None.
Returns
-------
result : list
The Python list representation of the masked array.
Examples
--------
>>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4)
>>> x.tolist()
[[1, None, 3], [None, 5, None], [7, None, 9]]
>>> x.tolist(-999)
[[1, -999, 3], [-999, 5, -999], [7, -999, 9]]
"""
_mask = self._mask
# No mask ? Just return .data.tolist ?
if _mask is nomask:
return self._data.tolist()
# Explicit fill_value: fill the array and get the list
if fill_value is not None:
return self.filled(fill_value).tolist()
# Structured array .............
names = self.dtype.names
if names:
result = self._data.astype([(_, object) for _ in names])
for n in names:
result[n][_mask[n]] = None
return result.tolist()
# Standard arrays ...............
if _mask is nomask:
return [None]
# Set temps to save time when dealing w/ marrays...
inishape = self.shape
result = np.array(self._data.ravel(), dtype=object)
result[_mask.ravel()] = None
result.shape = inishape
return result.tolist()
# if fill_value is not None:
# return self.filled(fill_value).tolist()
# result = self.filled().tolist()
# # Set temps to save time when dealing w/ mrecarrays...
# _mask = self._mask
# if _mask is nomask:
# return result
# nbdims = self.ndim
# dtypesize = len(self.dtype)
# if nbdims == 0:
# return tuple([None] * dtypesize)
# elif nbdims == 1:
# maskedidx = _mask.nonzero()[0].tolist()
# if dtypesize:
# nodata = tuple([None] * dtypesize)
# else:
# nodata = None
# [operator.setitem(result, i, nodata) for i in maskedidx]
# else:
# for idx in zip(*[i.tolist() for i in _mask.nonzero()]):
# tmp = result
# for i in idx[:-1]:
# tmp = tmp[i]
# tmp[idx[-1]] = None
# return result
#........................
def tostring(self, fill_value=None, order='C'):
"""
This function is a compatibility alias for tobytes. Despite its name it
returns bytes not strings.
"""
return self.tobytes(fill_value, order='C')
#........................
def tobytes(self, fill_value=None, order='C'):
"""
Return the array data as a string containing the raw bytes in the array.
The array is filled with a fill value before the string conversion.
.. versionadded:: 1.9.0
Parameters
----------
fill_value : scalar, optional
Value used to fill in the masked values. Deafult is None, in which
case `MaskedArray.fill_value` is used.
order : {'C','F','A'}, optional
Order of the data item in the copy. Default is 'C'.
- 'C' -- C order (row major).
- 'F' -- Fortran order (column major).
- 'A' -- Any, current order of array.
- None -- Same as 'A'.
See Also
--------
ndarray.tobytes
tolist, tofile
Notes
-----
As for `ndarray.tobytes`, information about the shape, dtype, etc.,
but also about `fill_value`, will be lost.
Examples
--------
>>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.tobytes()
'\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00'
"""
return self.filled(fill_value).tobytes(order=order)
#........................
def tofile(self, fid, sep="", format="%s"):
"""
Save a masked array to a file in binary format.
.. warning::
This function is not implemented yet.
Raises
------
NotImplementedError
When `tofile` is called.
"""
raise NotImplementedError("Not implemented yet, sorry...")
def toflex(self):
"""
Transforms a masked array into a flexible-type array.
The flexible type array that is returned will have two fields:
* the ``_data`` field stores the ``_data`` part of the array.
* the ``_mask`` field stores the ``_mask`` part of the array.
Parameters
----------
None
Returns
-------
record : ndarray
A new flexible-type `ndarray` with two fields: the first element
containing a value, the second element containing the corresponding
mask boolean. The returned record shape matches self.shape.
Notes
-----
A side-effect of transforming a masked array into a flexible `ndarray` is
that meta information (``fill_value``, ...) will be lost.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.toflex()
[[(1, False) (2, True) (3, False)]
[(4, True) (5, False) (6, True)]
[(7, False) (8, True) (9, False)]]
"""
# Get the basic dtype ....
ddtype = self.dtype
# Make sure we have a mask
_mask = self._mask
if _mask is None:
_mask = make_mask_none(self.shape, ddtype)
# And get its dtype
mdtype = self._mask.dtype
#
record = np.ndarray(shape=self.shape,
dtype=[('_data', ddtype), ('_mask', mdtype)])
record['_data'] = self._data
record['_mask'] = self._mask
return record
torecords = toflex
#--------------------------------------------
# Pickling
def __getstate__(self):
"""Return the internal state of the masked array, for pickling
purposes.
"""
cf = 'CF'[self.flags.fnc]
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
self._data.tobytes(cf),
#self._data.tolist(),
getmaskarray(self).tobytes(cf),
#getmaskarray(self).tolist(),
self._fill_value,
)
return state
#
def __setstate__(self, state):
"""Restore the internal state of the masked array, for
pickling purposes. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(_, shp, typ, isf, raw, msk, flv) = state
ndarray.__setstate__(self, (shp, typ, isf, raw))
self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk))
self.fill_value = flv
#
def __reduce__(self):
"""Return a 3-tuple for pickling a MaskedArray.
"""
return (_mareconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
#
def __deepcopy__(self, memo=None):
from copy import deepcopy
copied = MaskedArray.__new__(type(self), self, copy=True)
if memo is None:
memo = {}
memo[id(self)] = copied
for (k, v) in self.__dict__.items():
copied.__dict__[k] = deepcopy(v, memo)
return copied
def _mareconstruct(subtype, baseclass, baseshape, basetype,):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype)
_mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype))
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
class mvoid(MaskedArray):
"""
Fake a 'void' object to use for masked array with structured dtypes.
"""
#
def __new__(self, data, mask=nomask, dtype=None, fill_value=None,
hardmask=False, copy=False, subok=True):
_data = np.array(data, copy=copy, subok=subok, dtype=dtype)
_data = _data.view(self)
_data._hardmask = hardmask
if mask is not nomask:
if isinstance(mask, np.void):
_data._mask = mask
else:
try:
# Mask is already a 0D array
_data._mask = np.void(mask)
except TypeError:
# Transform the mask to a void
mdtype = make_mask_descr(dtype)
_data._mask = np.array(mask, dtype=mdtype)[()]
if fill_value is not None:
_data.fill_value = fill_value
return _data
def _get_data(self):
# Make sure that the _data part is a np.void
return self.view(ndarray)[()]
_data = property(fget=_get_data)
def __getitem__(self, indx):
"Get the index..."
m = self._mask
if m is not nomask and m[indx]:
return masked
return self._data[indx]
def __setitem__(self, indx, value):
self._data[indx] = value
if self._hardmask:
self._mask[indx] |= getattr(value, "_mask", False)
else:
self._mask[indx] = getattr(value, "_mask", False)
def __str__(self):
m = self._mask
if (m is nomask):
return self._data.__str__()
m = tuple(m)
if (not any(m)):
return self._data.__str__()
r = self._data.tolist()
p = masked_print_option
if not p.enabled():
p = 'N/A'
else:
p = str(p)
r = [(str(_), p)[int(_m)] for (_, _m) in zip(r, m)]
return "(%s)" % ", ".join(r)
def __repr__(self):
m = self._mask
if (m is nomask):
return self._data.__repr__()
m = tuple(m)
if not any(m):
return self._data.__repr__()
p = masked_print_option
if not p.enabled():
return self.filled(self.fill_value).__repr__()
p = str(p)
r = [(str(_), p)[int(_m)] for (_, _m) in zip(self._data.tolist(), m)]
return "(%s)" % ", ".join(r)
def __iter__(self):
"Defines an iterator for mvoid"
(_data, _mask) = (self._data, self._mask)
if _mask is nomask:
for d in _data:
yield d
else:
for (d, m) in zip(_data, _mask):
if m:
yield masked
else:
yield d
def __len__(self):
return self._data.__len__()
def filled(self, fill_value=None):
"""
Return a copy with masked fields filled with a given value.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute is used instead.
Returns
-------
filled_void
A `np.void` object
See Also
--------
MaskedArray.filled
"""
return asarray(self).filled(fill_value)[()]
def tolist(self):
"""
Transforms the mvoid object into a tuple.
Masked fields are replaced by None.
Returns
-------
returned_tuple
Tuple of fields
"""
_mask = self._mask
if _mask is nomask:
return self._data.tolist()
result = []
for (d, m) in zip(self._data, self._mask):
if m:
result.append(None)
else:
# .item() makes sure we return a standard Python object
result.append(d.item())
return tuple(result)
#####--------------------------------------------------------------------------
#---- --- Shortcuts ---
#####---------------------------------------------------------------------------
def isMaskedArray(x):
"""
Test whether input is an instance of MaskedArray.
This function returns True if `x` is an instance of MaskedArray
and returns False otherwise. Any object is accepted as input.
Parameters
----------
x : object
Object to test.
Returns
-------
result : bool
True if `x` is a MaskedArray.
See Also
--------
isMA : Alias to isMaskedArray.
isarray : Alias to isMaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.eye(3, 3)
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> m = ma.masked_values(a, 0)
>>> m
masked_array(data =
[[1.0 -- --]
[-- 1.0 --]
[-- -- 1.0]],
mask =
[[False True True]
[ True False True]
[ True True False]],
fill_value=0.0)
>>> ma.isMaskedArray(a)
False
>>> ma.isMaskedArray(m)
True
>>> ma.isMaskedArray([0, 1, 2])
False
"""
return isinstance(x, MaskedArray)
isarray = isMaskedArray
isMA = isMaskedArray #backward compatibility
# We define the masked singleton as a float for higher precedence...
# Note that it can be tricky sometimes w/ type comparison
class MaskedConstant(MaskedArray):
#
_data = data = np.array(0.)
_mask = mask = np.array(True)
_baseclass = ndarray
#
def __new__(self):
return self._data.view(self)
#
def __array_finalize__(self, obj):
return
#
def __array_wrap__(self, obj):
return self
#
def __str__(self):
return str(masked_print_option._display)
#
def __repr__(self):
return 'masked'
#
def flatten(self):
return masked_array([self._data], dtype=float, mask=[True])
def __reduce__(self):
"""Override of MaskedArray's __reduce__.
"""
return (self.__class__, ())
masked = masked_singleton = MaskedConstant()
masked_array = MaskedArray
def array(data, dtype=None, copy=False, order=False,
mask=nomask, fill_value=None,
keep_mask=True, hard_mask=False, shrink=True, subok=True, ndmin=0,
):
"""array(data, dtype=None, copy=False, order=False, mask=nomask,
fill_value=None, keep_mask=True, hard_mask=False, shrink=True,
subok=True, ndmin=0)
Acts as shortcut to MaskedArray, with options in a different order
for convenience. And backwards compatibility...
"""
#!!!: we should try to put 'order' somwehere
return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok,
keep_mask=keep_mask, hard_mask=hard_mask,
fill_value=fill_value, ndmin=ndmin, shrink=shrink)
array.__doc__ = masked_array.__doc__
def is_masked(x):
"""
Determine whether input has masked values.
Accepts any object as input, but always returns False unless the
input is a MaskedArray containing masked values.
Parameters
----------
x : array_like
Array to check for masked values.
Returns
-------
result : bool
True if `x` is a MaskedArray with masked values, False otherwise.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> x
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_masked(x)
True
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 42)
>>> x
masked_array(data = [0 1 0 2 3],
mask = False,
fill_value=999999)
>>> ma.is_masked(x)
False
Always returns False if `x` isn't a MaskedArray.
>>> x = [False, True, False]
>>> ma.is_masked(x)
False
>>> x = 'a string'
>>> ma.is_masked(x)
False
"""
m = getmask(x)
if m is nomask:
return False
elif m.any():
return True
return False
#####---------------------------------------------------------------------------
#---- --- Extrema functions ---
#####---------------------------------------------------------------------------
class _extrema_operation(object):
"""
Generic class for maximum/minimum functions.
.. note::
This is the base class for `_maximum_operation` and
`_minimum_operation`.
"""
def __call__(self, a, b=None):
"Executes the call behavior."
if b is None:
return self.reduce(a)
return where(self.compare(a, b), a, b)
#.........
def reduce(self, target, axis=None):
"Reduce target along the given axis."
target = narray(target, copy=False, subok=True)
m = getmask(target)
if axis is not None:
kargs = { 'axis' : axis }
else:
kargs = {}
target = target.ravel()
if not (m is nomask):
m = m.ravel()
if m is nomask:
t = self.ufunc.reduce(target, **kargs)
else:
target = target.filled(self.fill_value_func(target)).view(type(target))
t = self.ufunc.reduce(target, **kargs)
m = umath.logical_and.reduce(m, **kargs)
if hasattr(t, '_mask'):
t._mask = m
elif m:
t = masked
return t
#.........
def outer (self, a, b):
"Return the function applied to the outer product of a and b."
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
result = self.ufunc.outer(filled(a), filled(b))
if not isinstance(result, MaskedArray):
result = result.view(MaskedArray)
result._mask = m
return result
#............................
class _minimum_operation(_extrema_operation):
"Object to calculate minima"
def __init__ (self):
"""minimum(a, b) or minimum(a)
In one argument case, returns the scalar minimum.
"""
self.ufunc = umath.minimum
self.afunc = amin
self.compare = less
self.fill_value_func = minimum_fill_value
#............................
class _maximum_operation(_extrema_operation):
"Object to calculate maxima"
def __init__ (self):
"""maximum(a, b) or maximum(a)
In one argument case returns the scalar maximum.
"""
self.ufunc = umath.maximum
self.afunc = amax
self.compare = greater
self.fill_value_func = maximum_fill_value
#..........................................................
def min(obj, axis=None, out=None, fill_value=None):
try:
return obj.min(axis=axis, fill_value=fill_value, out=out)
except (AttributeError, TypeError):
# If obj doesn't have a min method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out)
min.__doc__ = MaskedArray.min.__doc__
def max(obj, axis=None, out=None, fill_value=None):
try:
return obj.max(axis=axis, fill_value=fill_value, out=out)
except (AttributeError, TypeError):
# If obj doesn't have a max method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).max(axis=axis, fill_value=fill_value, out=out)
max.__doc__ = MaskedArray.max.__doc__
def ptp(obj, axis=None, out=None, fill_value=None):
"""a.ptp(axis=None) = a.max(axis)-a.min(axis)"""
try:
return obj.ptp(axis, out=out, fill_value=fill_value)
except (AttributeError, TypeError):
# If obj doesn't have a ptp method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out)
ptp.__doc__ = MaskedArray.ptp.__doc__
#####---------------------------------------------------------------------------
#---- --- Definition of functions from the corresponding methods ---
#####---------------------------------------------------------------------------
class _frommethod:
"""
Define functions from existing MaskedArray methods.
Parameters
----------
methodname : str
Name of the method to transform.
"""
def __init__(self, methodname, reversed=False):
self.__name__ = methodname
self.__doc__ = self.getdoc()
self.reversed = reversed
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
meth = getattr(MaskedArray, self.__name__, None) or\
getattr(np, self.__name__, None)
signature = self.__name__ + get_object_signature(meth)
if meth is not None:
doc = """ %s\n%s""" % (signature, getattr(meth, '__doc__', None))
return doc
#
def __call__(self, a, *args, **params):
if self.reversed:
args = list(args)
arr = args[0]
args[0] = a
a = arr
# Get the method from the array (if possible)
method_name = self.__name__
method = getattr(a, method_name, None)
if method is not None:
return method(*args, **params)
# Still here ? Then a is not a MaskedArray
method = getattr(MaskedArray, method_name, None)
if method is not None:
return method(MaskedArray(a), *args, **params)
# Still here ? OK, let's call the corresponding np function
method = getattr(np, method_name)
return method(a, *args, **params)
all = _frommethod('all')
anomalies = anom = _frommethod('anom')
any = _frommethod('any')
compress = _frommethod('compress', reversed=True)
cumprod = _frommethod('cumprod')
cumsum = _frommethod('cumsum')
copy = _frommethod('copy')
diagonal = _frommethod('diagonal')
harden_mask = _frommethod('harden_mask')
ids = _frommethod('ids')
maximum = _maximum_operation()
mean = _frommethod('mean')
minimum = _minimum_operation()
nonzero = _frommethod('nonzero')
prod = _frommethod('prod')
product = _frommethod('prod')
ravel = _frommethod('ravel')
repeat = _frommethod('repeat')
shrink_mask = _frommethod('shrink_mask')
soften_mask = _frommethod('soften_mask')
std = _frommethod('std')
sum = _frommethod('sum')
swapaxes = _frommethod('swapaxes')
#take = _frommethod('take')
trace = _frommethod('trace')
var = _frommethod('var')
def take(a, indices, axis=None, out=None, mode='raise'):
"""
"""
a = masked_array(a)
return a.take(indices, axis=axis, out=out, mode=mode)
#..............................................................................
def power(a, b, third=None):
"""
Returns element-wise base array raised to power from second array.
This is the masked array version of `numpy.power`. For details see
`numpy.power`.
See Also
--------
numpy.power
Notes
-----
The *out* argument to `numpy.power` is not supported, `third` has to be
None.
"""
if third is not None:
raise MaskError("3-argument power not supported.")
# Get the masks
ma = getmask(a)
mb = getmask(b)
m = mask_or(ma, mb)
# Get the rawdata
fa = getdata(a)
fb = getdata(b)
# Get the type of the result (so that we preserve subclasses)
if isinstance(a, MaskedArray):
basetype = type(a)
else:
basetype = MaskedArray
# Get the result and view it as a (subclass of) MaskedArray
with np.errstate(divide='ignore', invalid='ignore'):
result = np.where(m, fa, umath.power(fa, fb)).view(basetype)
result._update_from(a)
# Find where we're in trouble w/ NaNs and Infs
invalid = np.logical_not(np.isfinite(result.view(ndarray)))
# Add the initial mask
if m is not nomask:
if not (result.ndim):
return masked
result._mask = np.logical_or(m, invalid)
# Fix the invalid parts
if invalid.any():
if not result.ndim:
return masked
elif result._mask is nomask:
result._mask = invalid
result._data[invalid] = result.fill_value
return result
# if fb.dtype.char in typecodes["Integer"]:
# return masked_array(umath.power(fa, fb), m)
# m = mask_or(m, (fa < 0) & (fb != fb.astype(int)))
# if m is nomask:
# return masked_array(umath.power(fa, fb))
# else:
# fa = fa.copy()
# if m.all():
# fa.flat = 1
# else:
# np.copyto(fa, 1, where=m)
# return masked_array(umath.power(fa, fb), m)
#..............................................................................
def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
d = filled(a, fill_value)
if axis is None:
return d.argsort(kind=kind, order=order)
return d.argsort(axis, kind=kind, order=order)
argsort.__doc__ = MaskedArray.argsort.__doc__
def argmin(a, axis=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
d = filled(a, fill_value)
return d.argmin(axis=axis)
argmin.__doc__ = MaskedArray.argmin.__doc__
def argmax(a, axis=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
try:
fill_value = -fill_value
except:
pass
d = filled(a, fill_value)
return d.argmax(axis=axis)
argmax.__doc__ = MaskedArray.argmax.__doc__
def sort(a, axis= -1, kind='quicksort', order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = narray(a, copy=True, subok=True)
if axis is None:
a = a.flatten()
axis = 0
if fill_value is None:
if endwith:
# nan > inf
if np.issubdtype(a.dtype, np.floating):
filler = np.nan
else:
filler = minimum_fill_value(a)
else:
filler = maximum_fill_value(a)
else:
filler = fill_value
sindx = filled(a, filler).argsort(axis=axis, kind=kind, order=order)
# save meshgrid memory for 1d arrays
if a.ndim == 1:
indx = sindx
else:
indx = np.meshgrid(*[np.arange(x) for x in a.shape], sparse=True,
indexing='ij')
indx[axis] = sindx
return a[indx]
sort.__doc__ = MaskedArray.sort.__doc__
def compressed(x):
"""
Return all the non-masked data as a 1-D array.
This function is equivalent to calling the "compressed" method of a
`MaskedArray`, see `MaskedArray.compressed` for details.
See Also
--------
MaskedArray.compressed
Equivalent method.
"""
if not isinstance(x, MaskedArray):
x = asanyarray(x)
return x.compressed()
def concatenate(arrays, axis=0):
"""
Concatenate a sequence of arrays along the given axis.
Parameters
----------
arrays : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
result : MaskedArray
The concatenated array with any masked entries preserved.
See Also
--------
numpy.concatenate : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(3)
>>> a[1] = ma.masked
>>> b = ma.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
masked_array(data = [2 3 4],
mask = False,
fill_value = 999999)
>>> ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
"""
d = np.concatenate([getdata(a) for a in arrays], axis)
rcls = get_masked_subclass(*arrays)
data = d.view(rcls)
# Check whether one of the arrays has a non-empty mask...
for x in arrays:
if getmask(x) is not nomask:
break
else:
return data
# OK, so we have to concatenate the masks
dm = np.concatenate([getmaskarray(a) for a in arrays], axis)
# If we decide to keep a '_shrinkmask' option, we want to check that ...
# ... all of them are True, and then check for dm.any()
# shrink = numpy.logical_or.reduce([getattr(a,'_shrinkmask',True) for a in arrays])
# if shrink and not dm.any():
if not dm.dtype.fields and not dm.any():
data._mask = nomask
else:
data._mask = dm.reshape(d.shape)
return data
def count(a, axis=None):
if isinstance(a, MaskedArray):
return a.count(axis)
return masked_array(a, copy=False).count(axis)
count.__doc__ = MaskedArray.count.__doc__
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
This function is the equivalent of `numpy.diag` that takes masked
values into account, see `numpy.diag` for details.
See Also
--------
numpy.diag : Equivalent function for ndarrays.
"""
output = np.diag(v, k).view(MaskedArray)
if getmask(v) is not nomask:
output._mask = np.diag(v._mask, k)
return output
def expand_dims(x, axis):
"""
Expand the shape of an array.
Expands the shape of the array by including a new axis before the one
specified by the `axis` parameter. This function behaves the same as
`numpy.expand_dims` but preserves masked elements.
See Also
--------
numpy.expand_dims : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array([1, 2, 4])
>>> x[1] = ma.masked
>>> x
masked_array(data = [1 -- 4],
mask = [False True False],
fill_value = 999999)
>>> np.expand_dims(x, axis=0)
array([[1, 2, 4]])
>>> ma.expand_dims(x, axis=0)
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
The same result can be achieved using slicing syntax with `np.newaxis`.
>>> x[np.newaxis, :]
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
"""
result = n_expand_dims(x, axis)
if isinstance(x, MaskedArray):
new_shape = result.shape
result = x.view()
result.shape = new_shape
if result._mask is not nomask:
result._mask.shape = new_shape
return result
#......................................
def left_shift (a, n):
"""
Shift the bits of an integer to the left.
This is the masked array version of `numpy.left_shift`, for details
see that function.
See Also
--------
numpy.left_shift
"""
m = getmask(a)
if m is nomask:
d = umath.left_shift(filled(a), n)
return masked_array(d)
else:
d = umath.left_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def right_shift (a, n):
"""
Shift the bits of an integer to the right.
This is the masked array version of `numpy.right_shift`, for details
see that function.
See Also
--------
numpy.right_shift
"""
m = getmask(a)
if m is nomask:
d = umath.right_shift(filled(a), n)
return masked_array(d)
else:
d = umath.right_shift(filled(a, 0), n)
return masked_array(d, mask=m)
#......................................
def put(a, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
This function is equivalent to `MaskedArray.put`, see that method
for details.
See Also
--------
MaskedArray.put
"""
# We can't use 'frommethod', the order of arguments is different
try:
return a.put(indices, values, mode=mode)
except AttributeError:
return narray(a, copy=False).put(indices, values, mode=mode)
def putmask(a, mask, values): #, mode='raise'):
"""
Changes elements of an array based on conditional and input values.
This is the masked array version of `numpy.putmask`, for details see
`numpy.putmask`.
See Also
--------
numpy.putmask
Notes
-----
Using a masked array as `values` will **not** transform a `ndarray` into
a `MaskedArray`.
"""
# We can't use 'frommethod', the order of arguments is different
if not isinstance(a, MaskedArray):
a = a.view(MaskedArray)
(valdata, valmask) = (getdata(values), getmask(values))
if getmask(a) is nomask:
if valmask is not nomask:
a._sharedmask = True
a._mask = make_mask_none(a.shape, a.dtype)
np.copyto(a._mask, valmask, where=mask)
elif a._hardmask:
if valmask is not nomask:
m = a._mask.copy()
np.copyto(m, valmask, where=mask)
a.mask |= m
else:
if valmask is nomask:
valmask = getmaskarray(values)
np.copyto(a._mask, valmask, where=mask)
np.copyto(a._data, valdata, where=mask)
return
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
This function is exactly equivalent to `numpy.transpose`.
See Also
--------
numpy.transpose : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.arange(4).reshape((2,2))
>>> x[1, 1] = ma.masked
>>>> x
masked_array(data =
[[0 1]
[2 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
>>> ma.transpose(x)
masked_array(data =
[[0 2]
[1 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
"""
#We can't use 'frommethod', as 'transpose' doesn't take keywords
try:
return a.transpose(axes)
except AttributeError:
return narray(a, copy=False).transpose(axes).view(MaskedArray)
def reshape(a, new_shape, order='C'):
"""
Returns an array containing the same data with a new shape.
Refer to `MaskedArray.reshape` for full documentation.
See Also
--------
MaskedArray.reshape : equivalent function
"""
#We can't use 'frommethod', it whine about some parameters. Dmmit.
try:
return a.reshape(new_shape, order=order)
except AttributeError:
_tmp = narray(a, copy=False).reshape(new_shape, order=order)
return _tmp.view(MaskedArray)
def resize(x, new_shape):
"""
Return a new masked array with the specified size and shape.
This is the masked equivalent of the `numpy.resize` function. The new
array is filled with repeated copies of `x` (in the order that the
data are stored in memory). If `x` is masked, the new array will be
masked, and the new mask will be a repetition of the old one.
See Also
--------
numpy.resize : Equivalent function in the top level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.array([[1, 2] ,[3, 4]])
>>> a[0, 1] = ma.masked
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value = 999999)
>>> np.resize(a, (3, 3))
array([[1, 2, 3],
[4, 1, 2],
[3, 4, 1]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 -- 3]
[4 1 --]
[3 4 1]],
mask =
[[False True False]
[False False True]
[False False False]],
fill_value = 999999)
A MaskedArray is always returned, regardless of the input type.
>>> a = np.array([[1, 2] ,[3, 4]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 2 3]
[4 1 2]
[3 4 1]],
mask =
False,
fill_value = 999999)
"""
# We can't use _frommethods here, as N.resize is notoriously whiny.
m = getmask(x)
if m is not nomask:
m = np.resize(m, new_shape)
result = np.resize(x, new_shape).view(get_masked_subclass(x))
if result.ndim:
result._mask = m
return result
#................................................
def rank(obj):
"maskedarray version of the numpy function."
return np.rank(getdata(obj))
rank.__doc__ = np.rank.__doc__
#
def shape(obj):
"maskedarray version of the numpy function."
return np.shape(getdata(obj))
shape.__doc__ = np.shape.__doc__
#
def size(obj, axis=None):
"maskedarray version of the numpy function."
return np.size(getdata(obj), axis)
size.__doc__ = np.size.__doc__
#................................................
#####--------------------------------------------------------------------------
#---- --- Extra functions ---
#####--------------------------------------------------------------------------
def where (condition, x=None, y=None):
"""
Return a masked array with elements from x or y, depending on condition.
Returns a masked array, shaped like condition, where the elements
are from `x` when `condition` is True, and from `y` otherwise.
If neither `x` nor `y` are given, the function returns a tuple of
indices where `condition` is True (the result of
``condition.nonzero()``).
Parameters
----------
condition : array_like, bool
The condition to meet. For each True element, yield the corresponding
element from `x`, otherwise from `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same shape
as condition, or be broadcast-able to that shape.
Returns
-------
out : MaskedArray or tuple of ndarrays
The resulting masked array if `x` and `y` were given, otherwise
the result of ``condition.nonzero()``.
See Also
--------
numpy.where : Equivalent function in the top-level NumPy module.
Examples
--------
>>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
... [1, 0, 1],
... [0, 1, 0]])
>>> print x
[[0.0 -- 2.0]
[-- 4.0 --]
[6.0 -- 8.0]]
>>> np.ma.where(x > 5) # return the indices where x > 5
(array([2, 2]), array([0, 2]))
>>> print np.ma.where(x > 5, x, -3.1416)
[[-3.1416 -- -3.1416]
[-- -3.1416 --]
[6.0 -- 8.0]]
"""
if x is None and y is None:
return filled(condition, 0).nonzero()
elif x is None or y is None:
raise ValueError("Either both or neither x and y should be given.")
# Get the condition ...............
fc = filled(condition, 0).astype(MaskType)
notfc = np.logical_not(fc)
# Get the data ......................................
xv = getdata(x)
yv = getdata(y)
if x is masked:
ndtype = yv.dtype
elif y is masked:
ndtype = xv.dtype
else:
ndtype = np.find_common_type([xv.dtype, yv.dtype], [])
# Construct an empty array and fill it
d = np.empty(fc.shape, dtype=ndtype).view(MaskedArray)
_data = d._data
np.copyto(_data, xv.astype(ndtype), where=fc)
np.copyto(_data, yv.astype(ndtype), where=notfc)
# Create an empty mask and fill it
_mask = d._mask = np.zeros(fc.shape, dtype=MaskType)
np.copyto(_mask, getmask(x), where=fc)
np.copyto(_mask, getmask(y), where=notfc)
_mask |= getmaskarray(condition)
if not _mask.any():
d._mask = nomask
return d
def choose (indices, choices, out=None, mode='raise'):
"""
Use an index array to construct a new array from a set of choices.
Given an array of integers and a set of n choice arrays, this method
will create a new array that merges each of the choice arrays. Where a
value in `a` is i, the new array will have the value that choices[i]
contains in the same place.
Parameters
----------
a : ndarray of ints
This array must contain integers in ``[0, n-1]``, where n is the
number of choices.
choices : sequence of arrays
Choice arrays. The index array and all of the choices should be
broadcastable to the same shape.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and `dtype`.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' : raise an error
* 'wrap' : wrap around
* 'clip' : clip to the range
Returns
-------
merged_array : array
See Also
--------
choose : equivalent function
Examples
--------
>>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]])
>>> a = np.array([2, 1, 0])
>>> np.ma.choose(a, choice)
masked_array(data = [3 2 1],
mask = False,
fill_value=999999)
"""
def fmask (x):
"Returns the filled array, or True if masked."
if x is masked:
return True
return filled(x)
def nmask (x):
"Returns the mask, True if ``masked``, False if ``nomask``."
if x is masked:
return True
return getmask(x)
# Get the indices......
c = filled(indices, 0)
# Get the masks........
masks = [nmask(x) for x in choices]
data = [fmask(x) for x in choices]
# Construct the mask
outputmask = np.choose(c, masks, mode=mode)
outputmask = make_mask(mask_or(outputmask, getmask(indices)),
copy=0, shrink=True)
# Get the choices......
d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(outputmask)
return out
d.__setmask__(outputmask)
return d
def round_(a, decimals=0, out=None):
"""
Return a copy of a, rounded to 'decimals' places.
When 'decimals' is negative, it specifies the number of positions
to the left of the decimal point. The real and imaginary parts of
complex numbers are rounded separately. Nothing is done if the
array is not of float type and 'decimals' is greater than or equal
to 0.
Parameters
----------
decimals : int
Number of decimals to round to. May be negative.
out : array_like
Existing array to use for output.
If not given, returns a default copy of a.
Notes
-----
If out is given and does not have a mask attribute, the mask of a
is lost!
"""
if out is None:
return np.round_(a, decimals, out)
else:
np.round_(getdata(a), decimals, out)
if hasattr(out, '_mask'):
out._mask = getmask(a)
return out
round = round_
def inner(a, b):
"""
Returns the inner product of a and b for arrays of floating point types.
Like the generic NumPy equivalent the product sum is over the last dimension
of a and b.
Notes
-----
The first argument is not conjugated.
"""
fa = filled(a, 0)
fb = filled(b, 0)
if len(fa.shape) == 0:
fa.shape = (1,)
if len(fb.shape) == 0:
fb.shape = (1,)
return np.inner(fa, fb).view(MaskedArray)
inner.__doc__ = doc_note(np.inner.__doc__,
"Masked values are replaced by 0.")
innerproduct = inner
def outer(a, b):
"maskedarray version of the numpy function."
fa = filled(a, 0).ravel()
fb = filled(b, 0).ravel()
d = np.outer(fa, fb)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
return masked_array(d)
ma = getmaskarray(a)
mb = getmaskarray(b)
m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0)
return masked_array(d, mask=m)
outer.__doc__ = doc_note(np.outer.__doc__,
"Masked values are replaced by 0.")
outerproduct = outer
def allequal (a, b, fill_value=True):
"""
Return True if all entries of a and b are equal, using
fill_value as a truth value where either or both are masked.
Parameters
----------
a, b : array_like
Input arrays to compare.
fill_value : bool, optional
Whether masked values in a or b are considered equal (True) or not
(False).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN,
then False is returned.
See Also
--------
all, any
numpy.ma.allclose
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value=1e+20)
>>> b = array([1e10, 1e-7, -42.0])
>>> b
array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01])
>>> ma.allequal(a, b, fill_value=False)
False
>>> ma.allequal(a, b)
True
"""
m = mask_or(getmask(a), getmask(b))
if m is nomask:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
return d.all()
elif fill_value:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
dm = array(d, mask=m, copy=False)
return dm.filled(True).all(None)
else:
return False
def allclose (a, b, masked_equal=True, rtol=1e-5, atol=1e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
This function is equivalent to `allclose` except that masked values
are treated as equal (default) or unequal, depending on the `masked_equal`
argument.
Parameters
----------
a, b : array_like
Input arrays to compare.
masked_equal : bool, optional
Whether masked values in `a` and `b` are considered equal (True) or not
(False). They are considered equal by default.
rtol : float, optional
Relative tolerance. The relative difference is equal to ``rtol * b``.
Default is 1e-5.
atol : float, optional
Absolute tolerance. The absolute difference is equal to `atol`.
Default is 1e-8.
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any
numpy.allclose : the non-masked `allclose`.
Notes
-----
If the following equation is element-wise True, then `allclose` returns
True::
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Return True if all elements of `a` and `b` are equal subject to
given tolerances.
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value = 1e+20)
>>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
False
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
Masked values are not compared directly.
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
"""
x = masked_array(a, copy=False)
y = masked_array(b, copy=False)
# make sure y is an inexact type to avoid abs(MIN_INT); will cause
# casting of x later.
dtype = np.result_type(y, 1.)
if y.dtype != dtype:
y = masked_array(y, dtype=dtype, copy=False)
m = mask_or(getmask(x), getmask(y))
xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False)
# If we have some infs, they should fall at the same place.
if not np.all(xinf == filled(np.isinf(y), False)):
return False
# No infs at all
if not np.any(xinf):
d = filled(umath.less_equal(umath.absolute(x - y),
atol + rtol * umath.absolute(y)),
masked_equal)
return np.all(d)
if not np.all(filled(x[xinf] == y[xinf], masked_equal)):
return False
x = x[~xinf]
y = y[~xinf]
d = filled(umath.less_equal(umath.absolute(x - y),
atol + rtol * umath.absolute(y)),
masked_equal)
return np.all(d)
#..............................................................................
def asarray(a, dtype=None, order=None):
"""
Convert the input to a masked array of the given data-type.
No copy is performed if the input is already an `ndarray`. If `a` is
a subclass of `MaskedArray`, a base class `MaskedArray` is returned.
Parameters
----------
a : array_like
Input data, in any form that can be converted to a masked array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists, ndarrays and masked arrays.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
Masked array interpretation of `a`.
See Also
--------
asanyarray : Similar to `asarray`, but conserves subclasses.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False)
def asanyarray(a, dtype=None):
"""
Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)
#####--------------------------------------------------------------------------
#---- --- Pickling ---
#####--------------------------------------------------------------------------
def dump(a, F):
"""
Pickle a masked array to a file.
This is a wrapper around ``cPickle.dump``.
Parameters
----------
a : MaskedArray
The array to be pickled.
F : str or file-like object
The file to pickle `a` to. If a string, the full path to the file.
"""
if not hasattr(F, 'readline'):
F = open(F, 'w')
return pickle.dump(a, F)
def dumps(a):
"""
Return a string corresponding to the pickling of a masked array.
This is a wrapper around ``cPickle.dumps``.
Parameters
----------
a : MaskedArray
The array for which the string representation of the pickle is
returned.
"""
return pickle.dumps(a)
def load(F):
"""
Wrapper around ``cPickle.load`` which accepts either a file-like object
or a filename.
Parameters
----------
F : str or file
The file or file name to load.
See Also
--------
dump : Pickle an array
Notes
-----
This is different from `numpy.load`, which does not use cPickle but loads
the NumPy binary .npy format.
"""
if not hasattr(F, 'readline'):
F = open(F, 'r')
return pickle.load(F)
def loads(strg):
"""
Load a pickle from the current string.
The result of ``cPickle.loads(strg)`` is returned.
Parameters
----------
strg : str
The string to load.
See Also
--------
dumps : Return a string corresponding to the pickling of a masked array.
"""
return pickle.loads(strg)
################################################################################
def fromfile(file, dtype=float, count= -1, sep=''):
raise NotImplementedError("Not yet implemented. Sorry")
def fromflex(fxarray):
"""
Build a masked array from a suitable flexible-type array.
The input array has to have a data-type with ``_data`` and ``_mask``
fields. This type of array is output by `MaskedArray.toflex`.
Parameters
----------
fxarray : ndarray
The structured input array, containing ``_data`` and ``_mask``
fields. If present, other fields are discarded.
Returns
-------
result : MaskedArray
The constructed masked array.
See Also
--------
MaskedArray.toflex : Build a flexible-type array from a masked array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4)
>>> rec = x.toflex()
>>> rec
array([[(0, False), (1, True), (2, False)],
[(3, True), (4, False), (5, True)],
[(6, False), (7, True), (8, False)]],
dtype=[('_data', '<i4'), ('_mask', '|b1')])
>>> x2 = np.ma.fromflex(rec)
>>> x2
masked_array(data =
[[0 -- 2]
[-- 4 --]
[6 -- 8]],
mask =
[[False True False]
[ True False True]
[False True False]],
fill_value = 999999)
Extra fields can be present in the structured array but are discarded:
>>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]
>>> rec2 = np.zeros((2, 2), dtype=dt)
>>> rec2
array([[(0, False, 0.0), (0, False, 0.0)],
[(0, False, 0.0), (0, False, 0.0)]],
dtype=[('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')])
>>> y = np.ma.fromflex(rec2)
>>> y
masked_array(data =
[[0 0]
[0 0]],
mask =
[[False False]
[False False]],
fill_value = 999999)
"""
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
class _convert2ma:
"""
Convert functions from numpy to numpy.ma.
Parameters
----------
_methodname : string
Name of the method to transform.
"""
__doc__ = None
#
def __init__(self, funcname, params=None):
self._func = getattr(np, funcname)
self.__doc__ = self.getdoc()
self._extras = params or {}
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
doc = getattr(self._func, '__doc__', None)
sig = get_object_signature(self._func)
if doc:
# Add the signature of the function at the beginning of the doc
if sig:
sig = "%s%s\n" % (self._func.__name__, sig)
doc = sig + doc
return doc
#
def __call__(self, a, *args, **params):
# Find the common parameters to the call and the definition
_extras = self._extras
common_params = set(params).intersection(_extras)
# Drop the common parameters from the call
for p in common_params:
_extras[p] = params.pop(p)
# Get the result
result = self._func.__call__(a, *args, **params).view(MaskedArray)
if "fill_value" in common_params:
result.fill_value = _extras.get("fill_value", None)
if "hardmask" in common_params:
result._hardmask = bool(_extras.get("hard_mask", False))
return result
arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False))
clip = np.clip
diff = np.diff
empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False))
empty_like = _convert2ma('empty_like')
frombuffer = _convert2ma('frombuffer')
fromfunction = _convert2ma('fromfunction')
identity = _convert2ma('identity', params=dict(fill_value=None, hardmask=False))
indices = np.indices
ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False))
ones_like = np.ones_like
squeeze = np.squeeze
zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False))
zeros_like = np.zeros_like
###############################################################################
def append(a, b, axis=None):
"""Append values to the end of an array.
.. versionadded:: 1.9.0
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If `axis`
is not specified, `values` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not given,
both `arr` and `values` are flattened before use.
Returns
-------
append : MaskedArray
A copy of `arr` with `values` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, the result is a flattened array.
See Also
--------
numpy.append : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_values([1, 2, 3], 2)
>>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
>>> print(ma.append(a, b))
[1 -- 3 4 5 6 -- 8 9]
"""
return concatenate([a, b], axis)
| bsd-3-clause |
alexiip/lolapichallengepredict | RiotChallengeWeb/requests/packages/urllib3/util/connection.py | 252 | 3265 | import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
# This is the only addition urllib3 makes to this function.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
| apache-2.0 |
wagnerand/olympia | src/olympia/translations/fields.py | 2 | 11109 | from collections import namedtuple
from django import forms
from django.conf import settings
from django.db import models
from django.db.models.fields import related
from django.utils import translation as translation_utils
from django.utils.translation.trans_real import to_language
import six
from .hold import add_translation, make_key, save_translations
from .widgets import TransInput, TransTextarea
LocaleErrorMessage = namedtuple('LocaleErrorMessage', 'message locale')
class TranslationDescriptor(related.ForwardManyToOneDescriptor):
"""
Descriptor that handles creating and updating Translations given strings.
"""
def __get__(self, instance, cls=None):
if instance is None:
return self
# If Django doesn't find find the value in the cache (which would only
# happen if the field was set or accessed already), it does a db query
# to follow the foreign key. We expect translations to be set by
# queryset transforms, so doing a query is the wrong thing here.
try:
return getattr(instance, self.cache_name)
except AttributeError:
return None
def __set__(self, instance, value):
lang = translation_utils.get_language()
if isinstance(value, six.string_types):
value = self.translation_from_string(instance, lang, value)
elif hasattr(value, 'items'):
value = self.translation_from_dict(instance, lang, value)
# Don't let this be set to None, because Django will then blank out the
# foreign key for this object. That's incorrect for translations.
if value is not None:
# We always get these back from the database as Translations, but
# we may want them to be a more specific Purified/Linkified child
# class.
if not isinstance(value, self.field.related_model):
value = switch(value, self.field.related_model)
super(TranslationDescriptor, self).__set__(instance, value)
elif getattr(instance, self.field.name, None) is None:
super(TranslationDescriptor, self).__set__(instance, None)
def translation_from_string(self, instance, lang, string):
"""Create, save, and return a Translation from a string."""
trans_id = instance.__dict__.get(self.field.attname)
if trans_id is None:
# We don't have a translation for this field in any language,
# create a brand new one.
translation = self.field.related_model.new(string, lang)
else:
try:
trans = getattr(instance, self.field.name)
except (AttributeError, instance.DoesNotExist):
trans = None
if trans is not None and to_language(trans.locale) == lang.lower():
# Replace the translation in the current language.
trans.localized_string = string
translation = trans
else:
# We either could not find a translation (but know that one
# already exist, because trans_id is set) or are looking to
# create one in a different language anyway.
translation = self.field.related_model.new(
string, lang, id=trans_id)
# A new translation has been created and it might need to be saved.
# This adds the translation to the queue of translation that need
# to be saved for this instance.
add_translation(make_key(instance), translation)
return translation
def translation_from_dict(self, instance, lang, dict_):
"""
Create Translations from a {'locale': 'string'} mapping.
If one of the locales matches lang, that Translation will be returned.
"""
from olympia.amo.utils import to_language as amo_to_language
rv = None
for locale, string in dict_.items():
loc = amo_to_language(locale)
if loc not in settings.AMO_LANGUAGES:
continue
# The Translation is created and saved in here.
trans = self.translation_from_string(instance, locale, string)
# Set the Translation on the object because translation_from_string
# doesn't expect Translations to be created but not attached.
self.__set__(instance, trans)
# If we're setting the current locale, set it to the object so
# callers see the expected effect.
if to_language(locale) == lang:
rv = trans
return rv
class TranslatedField(models.ForeignKey):
"""
A foreign key to the translations table.
If require_locale=False, the fallback join will not use a locale. Instead,
we will look for 1) a translation in the current locale and 2) fallback
with any translation matching the foreign key.
"""
to = 'translations.Translation'
requires_unique_target = False
forward_related_accessor_class = TranslationDescriptor
def __init__(self, **kwargs):
# to_field: The field on the related object that the relation is to.
# Django wants to default to translations.autoid, but we need id.
kwargs.update({
'null': True,
'to_field': 'id',
'unique': True,
'blank': True,
'on_delete': models.SET_NULL
})
self.short = kwargs.pop('short', True)
self.require_locale = kwargs.pop('require_locale', True)
# "to" is passed here from the migration framework; we ignore it
# since it's the same for every instance.
kwargs.pop('to', None)
super(TranslatedField, self).__init__(self.to, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(TranslatedField, self).deconstruct()
kwargs['to'] = self.to
kwargs['short'] = self.short
kwargs['require_locale'] = self.require_locale
return (name, path, args, kwargs)
@property
def db_column(self):
# Django wants to call the db_column ('%s_id' % self.name), but our
# translations foreign keys aren't set up that way.
return self._db_column if hasattr(self, '_db_column') else self.name
@db_column.setter
def db_column(self, value):
# Django sets db_column=None to initialize it. I don't think anyone
# would set the db_column otherwise.
if value is not None:
self._db_column = value
def contribute_to_class(self, cls, name, private_only=False, **kwargs):
"""Add this Translation to ``cls._meta.translated_fields``."""
super(TranslatedField, self).contribute_to_class(
cls, name, private_only=private_only, **kwargs)
# Add self to the list of translated fields.
if hasattr(cls._meta, 'translated_fields'):
cls._meta.translated_fields.append(self)
else:
cls._meta.translated_fields = [self]
# Set up a unique related name. The + means it's hidden.
self.remote_field.related_name = '%s_%s_set+' % (cls.__name__, name)
def formfield(self, **kw):
widget = TransInput if self.short else TransTextarea
defaults = {'form_class': TransField, 'widget': widget}
defaults.update(kw)
return super(TranslatedField, self).formfield(**defaults)
def validate(self, value, model_instance):
# Skip ForeignKey.validate since that expects only one Translation when
# doing .get(id=id)
return models.Field.validate(self, value, model_instance)
class PurifiedField(TranslatedField):
to = 'translations.PurifiedTranslation'
class LinkifiedField(TranslatedField):
to = 'translations.LinkifiedTranslation'
class NoLinksNoMarkupField(TranslatedField):
to = 'translations.NoLinksNoMarkupTranslation'
def switch(obj, new_model):
"""Switch between Translation and Purified/Linkified Translations."""
fields = [
(f.name, getattr(obj, f.name, None))
for f in new_model._meta.fields]
return new_model(**dict(fields))
class _TransField(object):
def __init__(self, *args, **kwargs):
self.default_locale = settings.LANGUAGE_CODE
self.widget = kwargs.pop('widget', TransInput)
# Our translation fields behave mostly like a `CharField` but
# do inherit `ForeignKey`. Django does forward `queryset`,
# `to_field_name` and `limit_choices_to` to the form field
# for `ForeignKey`. So let's drop them before forwarding
# since `CharField` doesn't accept them.
for k in ('queryset', 'to_field_name', 'limit_choices_to'):
if k in kwargs:
del kwargs[k]
super(_TransField, self).__init__(*args, **kwargs)
def set_default_values(self, field_name, parent_form, default_locale):
self.parent_form = parent_form
self.default_locale = default_locale
self.widget.default_locale = default_locale
self._field_name = field_name
def clean(self, value):
value = {k: v.strip() if v else v for (k, v) in value.items()}
# Raise an exception if the default locale is required and not present
if self.default_locale.lower() not in value:
value[self.default_locale.lower()] = None
# Now, loop through them and validate them separately.
for locale, val in value.items():
try:
# Only the default locale can be required; all non-default
# fields are automatically optional.
if self.default_locale.lower() == locale:
super(_TransField, self).validate(val)
super(_TransField, self).run_validators(val)
except forms.ValidationError as e:
for message in e.messages:
self.parent_form.add_error(
self._field_name,
LocaleErrorMessage(message=message, locale=locale))
return value
def has_changed(self, initial, data):
# This used to be called on the field's widget and always returned
# False!
return False
class TransField(_TransField, forms.CharField):
"""
A CharField subclass that can deal with multiple locales.
Most validators are run over the data for each locale. The required
validator is only run on the default_locale, which is hooked up to the
instance with TranslationFormMixin.
"""
@staticmethod
def adapt(cls, opts=None):
"""Get a new TransField that subclasses cls instead of CharField."""
if opts is None:
opts = {}
return type('Trans%s' % cls.__name__, (_TransField, cls), opts)
def save_signal(sender, instance, **kw):
"""
Use this signal on a model to iterate through all the translations added
to the hold queue and save them all. Hook this up to the pre_save signal
on the model.
"""
if not kw.get('raw'):
save_translations(make_key(instance))
| bsd-3-clause |
david12341235/django_selenium_phantomjs_optimize | nose-picker.py | 1 | 2992 | # Copyright (c) 2014, Eventbrite and Contributors
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# Neither the name of Eventbrite nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing
import subprocess
import sys
import threading
TEST_CMD_TEMPLATE = 'python manage.py test -x --nologcapture \
--with-nose-picker --which-process=%d --total-processes=%d --noinput'
def main():
num_processes = int(multiprocessing.cpu_count())
tests = []
for i in range(num_processes):
test_command = TEST_CMD_TEMPLATE % (
i,
num_processes,
)
tests.append(TestWatcher(test_command))
returncode = 0
for test_watcher in tests:
test_watcher.join()
if test_watcher.returncode > 0:
returncode += test_watcher.returncode
for line in test_watcher.stderr.splitlines():
if not (
line.endswith(' ... ok') or
'... SKIP' in line
):
sys.stderr.write(line + '\n')
return returncode
class TestWatcher(threading.Thread):
def __init__(self, command):
super(TestWatcher, self).__init__()
self.command = command
self.stdout = ''
self.stderr = ''
self.start()
self.returncode = 0
def run(self):
p = subprocess.Popen(
self.command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
self.stdout, self.stderr = p.communicate()
self.returncode = p.returncode
if __name__ == '__main__':
sys.exit(main())
| mit |
NikNitro/Python-iBeacon-Scan | sympy/utilities/memoization.py | 92 | 1533 | from __future__ import print_function, division
from sympy.core.decorators import wraps
from sympy.core.compatibility import range
def recurrence_memo(initial):
"""
Memo decorator for sequences defined by recurrence
See usage examples e.g. in the specfun/combinatorial module
"""
cache = initial
def decorator(f):
@wraps(f)
def g(n):
L = len(cache)
if n <= L - 1:
return cache[n]
for i in range(L, n + 1):
cache.append(f(i, cache))
return cache[-1]
return g
return decorator
def assoc_recurrence_memo(base_seq):
"""
Memo decorator for associated sequences defined by recurrence starting from base
base_seq(n) -- callable to get base sequence elements
XXX works only for Pn0 = base_seq(0) cases
XXX works only for m <= n cases
"""
cache = []
def decorator(f):
@wraps(f)
def g(n, m):
L = len(cache)
if n < L:
return cache[n][m]
for i in range(L, n + 1):
# get base sequence
F_i0 = base_seq(i)
F_i_cache = [F_i0]
cache.append(F_i_cache)
# XXX only works for m <= n cases
# generate assoc sequence
for j in range(1, i + 1):
F_ij = f(i, j, cache)
F_i_cache.append(F_ij)
return cache[n][m]
return g
return decorator
| gpl-3.0 |
sebgoa/kubeless | vendor/github.com/prometheus/client_model/python/prometheus/client/model/metrics_pb2.py | 250 | 20064 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: metrics.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='metrics.proto',
package='io.prometheus.client',
serialized_pb=_b('\n\rmetrics.proto\x12\x14io.prometheus.client\"(\n\tLabelPair\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\x16\n\x05Gauge\x12\r\n\x05value\x18\x01 \x01(\x01\"\x18\n\x07\x43ounter\x12\r\n\x05value\x18\x01 \x01(\x01\"+\n\x08Quantile\x12\x10\n\x08quantile\x18\x01 \x01(\x01\x12\r\n\x05value\x18\x02 \x01(\x01\"e\n\x07Summary\x12\x14\n\x0csample_count\x18\x01 \x01(\x04\x12\x12\n\nsample_sum\x18\x02 \x01(\x01\x12\x30\n\x08quantile\x18\x03 \x03(\x0b\x32\x1e.io.prometheus.client.Quantile\"\x18\n\x07Untyped\x12\r\n\x05value\x18\x01 \x01(\x01\"c\n\tHistogram\x12\x14\n\x0csample_count\x18\x01 \x01(\x04\x12\x12\n\nsample_sum\x18\x02 \x01(\x01\x12,\n\x06\x62ucket\x18\x03 \x03(\x0b\x32\x1c.io.prometheus.client.Bucket\"7\n\x06\x42ucket\x12\x18\n\x10\x63umulative_count\x18\x01 \x01(\x04\x12\x13\n\x0bupper_bound\x18\x02 \x01(\x01\"\xbe\x02\n\x06Metric\x12.\n\x05label\x18\x01 \x03(\x0b\x32\x1f.io.prometheus.client.LabelPair\x12*\n\x05gauge\x18\x02 \x01(\x0b\x32\x1b.io.prometheus.client.Gauge\x12.\n\x07\x63ounter\x18\x03 \x01(\x0b\x32\x1d.io.prometheus.client.Counter\x12.\n\x07summary\x18\x04 \x01(\x0b\x32\x1d.io.prometheus.client.Summary\x12.\n\x07untyped\x18\x05 \x01(\x0b\x32\x1d.io.prometheus.client.Untyped\x12\x32\n\thistogram\x18\x07 \x01(\x0b\x32\x1f.io.prometheus.client.Histogram\x12\x14\n\x0ctimestamp_ms\x18\x06 \x01(\x03\"\x88\x01\n\x0cMetricFamily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04help\x18\x02 \x01(\t\x12.\n\x04type\x18\x03 \x01(\x0e\x32 .io.prometheus.client.MetricType\x12,\n\x06metric\x18\x04 \x03(\x0b\x32\x1c.io.prometheus.client.Metric*M\n\nMetricType\x12\x0b\n\x07\x43OUNTER\x10\x00\x12\t\n\x05GAUGE\x10\x01\x12\x0b\n\x07SUMMARY\x10\x02\x12\x0b\n\x07UNTYPED\x10\x03\x12\r\n\tHISTOGRAM\x10\x04\x42\x16\n\x14io.prometheus.client')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_METRICTYPE = _descriptor.EnumDescriptor(
name='MetricType',
full_name='io.prometheus.client.MetricType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='COUNTER', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GAUGE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUMMARY', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNTYPED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HISTOGRAM', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=923,
serialized_end=1000,
)
_sym_db.RegisterEnumDescriptor(_METRICTYPE)
MetricType = enum_type_wrapper.EnumTypeWrapper(_METRICTYPE)
COUNTER = 0
GAUGE = 1
SUMMARY = 2
UNTYPED = 3
HISTOGRAM = 4
_LABELPAIR = _descriptor.Descriptor(
name='LabelPair',
full_name='io.prometheus.client.LabelPair',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='io.prometheus.client.LabelPair.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='io.prometheus.client.LabelPair.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=39,
serialized_end=79,
)
_GAUGE = _descriptor.Descriptor(
name='Gauge',
full_name='io.prometheus.client.Gauge',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='io.prometheus.client.Gauge.value', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=81,
serialized_end=103,
)
_COUNTER = _descriptor.Descriptor(
name='Counter',
full_name='io.prometheus.client.Counter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='io.prometheus.client.Counter.value', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=105,
serialized_end=129,
)
_QUANTILE = _descriptor.Descriptor(
name='Quantile',
full_name='io.prometheus.client.Quantile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='quantile', full_name='io.prometheus.client.Quantile.quantile', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='io.prometheus.client.Quantile.value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=174,
)
_SUMMARY = _descriptor.Descriptor(
name='Summary',
full_name='io.prometheus.client.Summary',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sample_count', full_name='io.prometheus.client.Summary.sample_count', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sample_sum', full_name='io.prometheus.client.Summary.sample_sum', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quantile', full_name='io.prometheus.client.Summary.quantile', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=176,
serialized_end=277,
)
_UNTYPED = _descriptor.Descriptor(
name='Untyped',
full_name='io.prometheus.client.Untyped',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='io.prometheus.client.Untyped.value', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=279,
serialized_end=303,
)
_HISTOGRAM = _descriptor.Descriptor(
name='Histogram',
full_name='io.prometheus.client.Histogram',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sample_count', full_name='io.prometheus.client.Histogram.sample_count', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sample_sum', full_name='io.prometheus.client.Histogram.sample_sum', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bucket', full_name='io.prometheus.client.Histogram.bucket', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=305,
serialized_end=404,
)
_BUCKET = _descriptor.Descriptor(
name='Bucket',
full_name='io.prometheus.client.Bucket',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cumulative_count', full_name='io.prometheus.client.Bucket.cumulative_count', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='upper_bound', full_name='io.prometheus.client.Bucket.upper_bound', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=406,
serialized_end=461,
)
_METRIC = _descriptor.Descriptor(
name='Metric',
full_name='io.prometheus.client.Metric',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='label', full_name='io.prometheus.client.Metric.label', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gauge', full_name='io.prometheus.client.Metric.gauge', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='counter', full_name='io.prometheus.client.Metric.counter', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='summary', full_name='io.prometheus.client.Metric.summary', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='untyped', full_name='io.prometheus.client.Metric.untyped', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='histogram', full_name='io.prometheus.client.Metric.histogram', index=5,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timestamp_ms', full_name='io.prometheus.client.Metric.timestamp_ms', index=6,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=464,
serialized_end=782,
)
_METRICFAMILY = _descriptor.Descriptor(
name='MetricFamily',
full_name='io.prometheus.client.MetricFamily',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='io.prometheus.client.MetricFamily.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='help', full_name='io.prometheus.client.MetricFamily.help', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='io.prometheus.client.MetricFamily.type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='metric', full_name='io.prometheus.client.MetricFamily.metric', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=785,
serialized_end=921,
)
_SUMMARY.fields_by_name['quantile'].message_type = _QUANTILE
_HISTOGRAM.fields_by_name['bucket'].message_type = _BUCKET
_METRIC.fields_by_name['label'].message_type = _LABELPAIR
_METRIC.fields_by_name['gauge'].message_type = _GAUGE
_METRIC.fields_by_name['counter'].message_type = _COUNTER
_METRIC.fields_by_name['summary'].message_type = _SUMMARY
_METRIC.fields_by_name['untyped'].message_type = _UNTYPED
_METRIC.fields_by_name['histogram'].message_type = _HISTOGRAM
_METRICFAMILY.fields_by_name['type'].enum_type = _METRICTYPE
_METRICFAMILY.fields_by_name['metric'].message_type = _METRIC
DESCRIPTOR.message_types_by_name['LabelPair'] = _LABELPAIR
DESCRIPTOR.message_types_by_name['Gauge'] = _GAUGE
DESCRIPTOR.message_types_by_name['Counter'] = _COUNTER
DESCRIPTOR.message_types_by_name['Quantile'] = _QUANTILE
DESCRIPTOR.message_types_by_name['Summary'] = _SUMMARY
DESCRIPTOR.message_types_by_name['Untyped'] = _UNTYPED
DESCRIPTOR.message_types_by_name['Histogram'] = _HISTOGRAM
DESCRIPTOR.message_types_by_name['Bucket'] = _BUCKET
DESCRIPTOR.message_types_by_name['Metric'] = _METRIC
DESCRIPTOR.message_types_by_name['MetricFamily'] = _METRICFAMILY
DESCRIPTOR.enum_types_by_name['MetricType'] = _METRICTYPE
LabelPair = _reflection.GeneratedProtocolMessageType('LabelPair', (_message.Message,), dict(
DESCRIPTOR = _LABELPAIR,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.LabelPair)
))
_sym_db.RegisterMessage(LabelPair)
Gauge = _reflection.GeneratedProtocolMessageType('Gauge', (_message.Message,), dict(
DESCRIPTOR = _GAUGE,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Gauge)
))
_sym_db.RegisterMessage(Gauge)
Counter = _reflection.GeneratedProtocolMessageType('Counter', (_message.Message,), dict(
DESCRIPTOR = _COUNTER,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Counter)
))
_sym_db.RegisterMessage(Counter)
Quantile = _reflection.GeneratedProtocolMessageType('Quantile', (_message.Message,), dict(
DESCRIPTOR = _QUANTILE,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Quantile)
))
_sym_db.RegisterMessage(Quantile)
Summary = _reflection.GeneratedProtocolMessageType('Summary', (_message.Message,), dict(
DESCRIPTOR = _SUMMARY,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Summary)
))
_sym_db.RegisterMessage(Summary)
Untyped = _reflection.GeneratedProtocolMessageType('Untyped', (_message.Message,), dict(
DESCRIPTOR = _UNTYPED,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Untyped)
))
_sym_db.RegisterMessage(Untyped)
Histogram = _reflection.GeneratedProtocolMessageType('Histogram', (_message.Message,), dict(
DESCRIPTOR = _HISTOGRAM,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Histogram)
))
_sym_db.RegisterMessage(Histogram)
Bucket = _reflection.GeneratedProtocolMessageType('Bucket', (_message.Message,), dict(
DESCRIPTOR = _BUCKET,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Bucket)
))
_sym_db.RegisterMessage(Bucket)
Metric = _reflection.GeneratedProtocolMessageType('Metric', (_message.Message,), dict(
DESCRIPTOR = _METRIC,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.Metric)
))
_sym_db.RegisterMessage(Metric)
MetricFamily = _reflection.GeneratedProtocolMessageType('MetricFamily', (_message.Message,), dict(
DESCRIPTOR = _METRICFAMILY,
__module__ = 'metrics_pb2'
# @@protoc_insertion_point(class_scope:io.prometheus.client.MetricFamily)
))
_sym_db.RegisterMessage(MetricFamily)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\024io.prometheus.client'))
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
lnliuxing/Impala | thirdparty/hive-1.1.0-cdh5.7.0-SNAPSHOT/lib/py/thrift/server/THttpServer.py | 71 | 2301 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import BaseHTTPServer
from thrift.server import TServer
from thrift.transport import TTransport
class THttpServer(TServer.TServer):
"""A simple HTTP-based Thrift server
This class is not very performant, but it is useful (for example) for
acting as a mock version of an Apache-based PHP Thrift endpoint."""
def __init__(self, processor, server_address,
inputProtocolFactory, outputProtocolFactory = None):
"""Set up protocol factories and HTTP server.
See BaseHTTPServer for server_address.
See TServer for protocol factories."""
if outputProtocolFactory is None:
outputProtocolFactory = inputProtocolFactory
TServer.TServer.__init__(self, processor, None, None, None,
inputProtocolFactory, outputProtocolFactory)
thttpserver = self
class RequestHander(BaseHTTPServer.BaseHTTPRequestHandler):
def do_POST(self):
# Don't care about the request path.
self.send_response(200)
self.send_header("content-type", "application/x-thrift")
self.end_headers()
itrans = TTransport.TFileObjectTransport(self.rfile)
otrans = TTransport.TFileObjectTransport(self.wfile)
iprot = thttpserver.inputProtocolFactory.getProtocol(itrans)
oprot = thttpserver.outputProtocolFactory.getProtocol(otrans)
thttpserver.processor.process(iprot, oprot)
otrans.flush()
self.httpd = BaseHTTPServer.HTTPServer(server_address, RequestHander)
def serve(self):
self.httpd.serve_forever()
| apache-2.0 |
sanjeevtripurari/hue | desktop/core/ext-py/requests-2.6.0/requests/packages/chardet/cp949prober.py | 2801 | 1782 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
| apache-2.0 |
alrusdi/python-social-auth | social/backends/soundcloud.py | 83 | 2156 | """
Soundcloud OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/soundcloud.html
"""
from social.p3 import urlencode
from social.backends.oauth import BaseOAuth2
class SoundcloudOAuth2(BaseOAuth2):
"""Soundcloud OAuth authentication backend"""
name = 'soundcloud'
AUTHORIZATION_URL = 'https://soundcloud.com/connect'
ACCESS_TOKEN_URL = 'https://api.soundcloud.com/oauth2/token'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ','
REDIRECT_STATE = False
EXTRA_DATA = [
('id', 'id'),
('refresh_token', 'refresh_token'),
('expires', 'expires')
]
def get_user_details(self, response):
"""Return user details from Soundcloud account"""
fullname, first_name, last_name = self.get_user_names(
response.get('full_name')
)
return {'username': response.get('username'),
'email': response.get('email') or '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json('https://api.soundcloud.com/me.json',
params={'oauth_token': access_token})
def auth_url(self):
"""Return redirect url"""
state = None
if self.STATE_PARAMETER or self.REDIRECT_STATE:
# Store state in session for further request validation. The state
# value is passed as state parameter (as specified in OAuth2 spec),
# but also added to redirect_uri, that way we can still verify the
# request if the provider doesn't implement the state parameter.
# Reuse token if any.
name = self.name + '_state'
state = self.strategy.session_get(name) or self.state_token()
self.strategy.session_set(name, state)
params = self.auth_params(state)
params.update(self.get_scope_argument())
params.update(self.auth_extra_arguments())
return self.AUTHORIZATION_URL + '?' + urlencode(params)
| bsd-3-clause |
demon-ru/iml-crm | addons/product/_common.py | 111 | 1418 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
import math
def rounding(f, r):
# TODO for trunk: log deprecation warning
# _logger.warning("Deprecated rounding method, please use tools.float_round to round floats.")
return tools.float_round(f, precision_rounding=r)
# TODO for trunk: add rounding method parameter to tools.float_round and use this method as hook
def ceiling(f, r):
if not r:
return f
return math.ceil(f / r) * r
| agpl-3.0 |
project-voodoo/linux_samsung | Documentation/target/tcm_mod_builder.py | 3119 | 42754 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!(se_nacl_new))\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!(tpg)) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (!(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return -ENOMEM;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
buf += "#endif\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!(nacl)) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('release_cmd_to_pool', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('new_cmd_failure\)\(', fo):
buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
if re.search('pack_lun\)\(', fo):
buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
buf += "{\n"
buf += " WARN_ON(lun >= 256);\n"
buf += " /* Caller wants this byte-swapped */\n"
buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
buf += "}\n\n"
bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
yamahata/neutron | neutron/tests/unit/bigswitch/test_restproxy_agent.py | 4 | 6634 | # Copyright 2014 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Kevin Benton, Big Switch Networks
from contextlib import nested
import mock
from neutron.openstack.common import importutils
from neutron.tests import base
OVSBRIDGE = 'neutron.agent.linux.ovs_lib.OVSBridge'
PLUGINAPI = 'neutron.plugins.bigswitch.agent.restproxy_agent.PluginApi'
CONTEXT = 'neutron.context'
CONSUMERCREATE = 'neutron.agent.rpc.create_consumers'
SGRPC = 'neutron.agent.securitygroups_rpc'
SGAGENT = 'neutron.plugins.bigswitch.agent.restproxy_agent.SecurityGroupAgent'
AGENTMOD = 'neutron.plugins.bigswitch.agent.restproxy_agent'
NEUTRONCFG = 'neutron.common.config'
PLCONFIG = 'neutron.plugins.bigswitch.config'
class BaseAgentTestCase(base.BaseTestCase):
def setUp(self):
super(BaseAgentTestCase, self).setUp()
self.mod_agent = importutils.import_module(AGENTMOD)
class TestRestProxyAgentOVS(BaseAgentTestCase):
def setUp(self):
super(TestRestProxyAgentOVS, self).setUp()
self.plapi = mock.patch(PLUGINAPI).start()
self.ovsbridge = mock.patch(OVSBRIDGE).start()
self.context = mock.patch(CONTEXT).start()
self.rpc = mock.patch(CONSUMERCREATE).start()
self.sg_rpc = mock.patch(SGRPC).start()
self.sg_agent = mock.patch(SGAGENT).start()
def mock_agent(self):
mock_context = mock.Mock(return_value='abc')
self.context.get_admin_context_without_session = mock_context
return self.mod_agent.RestProxyAgent('int-br', 2, 'helper')
def mock_port_update(self, **kwargs):
agent = self.mock_agent()
agent.port_update(mock.Mock(), **kwargs)
def test_port_update(self):
port = {'id': 1, 'security_groups': 'default'}
with mock.patch.object(self.ovsbridge.return_value,
'get_vif_port_by_id',
return_value=1) as get_vif:
self.mock_port_update(port=port)
get_vif.assert_called_once_with(1)
self.sg_agent.assert_has_calls([
mock.call().refresh_firewall()
])
def test_port_update_not_vifport(self):
port = {'id': 1, 'security_groups': 'default'}
with mock.patch.object(self.ovsbridge.return_value,
'get_vif_port_by_id',
return_value=0) as get_vif:
self.mock_port_update(port=port)
get_vif.assert_called_once_with(1)
self.assertFalse(self.sg_agent.return_value.refresh_firewall.called)
def test_port_update_without_secgroup(self):
port = {'id': 1}
with mock.patch.object(self.ovsbridge.return_value,
'get_vif_port_by_id',
return_value=1) as get_vif:
self.mock_port_update(port=port)
get_vif.assert_called_once_with(1)
self.assertFalse(self.sg_agent.return_value.refresh_firewall.called)
def mock_update_ports(self, vif_port_set=None, registered_ports=None):
with mock.patch.object(self.ovsbridge.return_value,
'get_vif_port_set',
return_value=vif_port_set):
agent = self.mock_agent()
return agent._update_ports(registered_ports)
def test_update_ports_unchanged(self):
self.assertIsNone(self.mock_update_ports())
def test_update_ports_changed(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
expected = dict(current=vif_port_set,
added=set([3]),
removed=set([2]))
actual = self.mock_update_ports(vif_port_set, registered_ports)
self.assertEqual(expected, actual)
def mock_process_devices_filter(self, port_info):
agent = self.mock_agent()
agent._process_devices_filter(port_info)
def test_process_devices_filter_add(self):
port_info = {'added': 1}
self.mock_process_devices_filter(port_info)
self.sg_agent.assert_has_calls([
mock.call().prepare_devices_filter(1)
])
def test_process_devices_filter_remove(self):
port_info = {'removed': 2}
self.mock_process_devices_filter(port_info)
self.sg_agent.assert_has_calls([
mock.call().remove_devices_filter(2)
])
def test_process_devices_filter_both(self):
port_info = {'added': 1, 'removed': 2}
self.mock_process_devices_filter(port_info)
self.sg_agent.assert_has_calls([
mock.call().prepare_devices_filter(1),
mock.call().remove_devices_filter(2)
])
def test_process_devices_filter_none(self):
port_info = {}
self.mock_process_devices_filter(port_info)
self.assertFalse(
self.sg_agent.return_value.prepare_devices_filter.called)
self.assertFalse(
self.sg_agent.return_value.remove_devices_filter.called)
class TestRestProxyAgent(BaseAgentTestCase):
def mock_main(self):
cfg_attrs = {'CONF.RESTPROXYAGENT.integration_bridge': 'integ_br',
'CONF.RESTPROXYAGENT.polling_interval': 5,
'CONF.RESTPROXYAGENT.virtual_switch_type': 'ovs',
'CONF.AGENT.root_helper': 'helper'}
with nested(
mock.patch(AGENTMOD + '.cfg', **cfg_attrs),
mock.patch(NEUTRONCFG),
mock.patch(PLCONFIG),
) as (mock_conf, mock_log_conf, mock_pluginconf):
self.mod_agent.main()
mock_log_conf.assert_has_calls([
mock.call(mock_conf),
])
def test_main(self):
agent_attrs = {'daemon_loop.side_effect': SystemExit(0)}
with mock.patch(AGENTMOD + '.RestProxyAgent',
**agent_attrs) as mock_agent:
self.assertRaises(SystemExit, self.mock_main)
mock_agent.assert_has_calls([
mock.call('integ_br', 5, 'helper', 'ovs'),
mock.call().daemon_loop()
])
| apache-2.0 |
vrenaville/OCB | openerp/report/render/render.py | 457 | 2524 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# Why doing some multi-thread instead of using OSE capabilities ?
# For progress bar.
#
# Add a transparant multi-thread layer to all report rendering layers
#
# TODO: method to stock on the disk
class render(object):
""" Represents a report job being rendered.
@param bin_datas a dictionary of name:<binary content> of images etc.
@param path the path in which binary files can be discovered, useful
for components (images) of the report. It can be:
- a string, relative or absolute path to images
- a list, containing strings of paths.
If a string is absolute path, it will be opened as such, else
it will be passed to tools.file_open() which also considers zip
addons.
Reporting classes must subclass this class and redefine the __init__ and
_render methods (not the other methods).
"""
def __init__(self, bin_datas=None, path='.'):
self.done = False
if bin_datas is None:
self.bin_datas = {}
else:
self.bin_datas = bin_datas
self.path = path
def _render(self):
return None
def render(self):
self.done = False
self._result = self._render()
self.done = True
return True
def is_done(self):
return self.done
def get(self):
if self.is_done():
return self._result
else:
return None
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rahuldhote/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
izadorozhna/tempest | tempest/api/compute/admin/test_simple_tenant_usage.py | 2 | 3369 | # Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from tempest.api.compute import base
from tempest import test
from tempest_lib import exceptions as e
# Time that waits for until returning valid response
# TODO(takmatsu): Ideally this value would come from configuration.
VALID_WAIT = 30
class TenantUsagesTestJSON(base.BaseV2ComputeAdminTest):
@classmethod
def setup_clients(cls):
super(TenantUsagesTestJSON, cls).setup_clients()
cls.adm_client = cls.os_adm.tenant_usages_client
cls.client = cls.os.tenant_usages_client
@classmethod
def resource_setup(cls):
super(TenantUsagesTestJSON, cls).resource_setup()
cls.tenant_id = cls.client.tenant_id
# Create a server in the demo tenant
cls.create_test_server(wait_until='ACTIVE')
now = datetime.datetime.now()
cls.start = cls._parse_strtime(now - datetime.timedelta(days=1))
cls.end = cls._parse_strtime(now + datetime.timedelta(days=1))
@classmethod
def _parse_strtime(cls, at):
# Returns formatted datetime
return at.strftime('%Y-%m-%dT%H:%M:%S.%f')
def call_until_valid(self, func, duration, *args, **kwargs):
# Call until get valid response for "duration"
# because tenant usage doesn't become available immediately
# after create VM.
def is_valid():
try:
self.resp = func(*args, **kwargs)
return True
except e.InvalidHTTPResponseBody:
return False
test.call_until_true(is_valid, duration, 1)
return self.resp
@test.idempotent_id('062c8ae9-9912-4249-8b51-e38d664e926e')
def test_list_usage_all_tenants(self):
# Get usage for all tenants
tenant_usage = self.call_until_valid(
self.adm_client.list_tenant_usages, VALID_WAIT,
start=self.start, end=self.end, detailed="1")['tenant_usages'][0]
self.assertEqual(len(tenant_usage), 8)
@test.idempotent_id('94135049-a4c5-4934-ad39-08fa7da4f22e')
def test_get_usage_tenant(self):
# Get usage for a specific tenant
tenant_usage = self.call_until_valid(
self.adm_client.show_tenant_usage, VALID_WAIT,
self.tenant_id, start=self.start, end=self.end)['tenant_usage']
self.assertEqual(len(tenant_usage), 8)
@test.idempotent_id('9d00a412-b40e-4fd9-8eba-97b496316116')
def test_get_usage_tenant_with_non_admin_user(self):
# Get usage for a specific tenant with non admin user
tenant_usage = self.call_until_valid(
self.client.show_tenant_usage, VALID_WAIT,
self.tenant_id, start=self.start, end=self.end)['tenant_usage']
self.assertEqual(len(tenant_usage), 8)
| apache-2.0 |
vijayanant/kunai | kunai/ts.py | 1 | 19445 | import os
import leveldb
import time
import threading
import random
import shutil
import socket
import base64
import cPickle
import marshal
import json
import hashlib
SERIALIZER = cPickle
from kunai.stats import STATS
from kunai.log import logger
from kunai.threadmgr import threader
from kunai.now import NOW
# Will have to forward to graphit intead
class UDPSender(object):
def __init__(self, clust):
self.clust = clust
def push_key(self, k, v, ttl=0):
T0 = time.time()
STATS.incr('ts.graphite.push-key', 1)
v64 = base64.b64encode(v)
logger.debug("PUSH KEY", k, "and value", len(v64))
#self.clust.put_key(k, v64, allow_udp=True, ttl=ttl)
self.clust.stack_put_key(k, v64, ttl=ttl)
STATS.timer('ts.graphite.push-key', (time.time() - T0)*1000)
return
class TSBackend(object):
def __init__(self, usender, clust):
#self.its = its
self.usender = usender
self.data = {}
self.data_lock = threading.RLock()
self.max_data_age = 5
# Get a db with all our metrics listed
# We will save all the metrics we have
self.clust = clust
self.db_dir = os.path.join(clust.data_dir, 'ts')
self.db = leveldb.LevelDB(self.db_dir)
# Ok start our inner thread
self.launch_reaper_thread()
# push a name but return if the name was already there or not
def set_name_if_unset(self, key):
try:
self.db.Get(key, fill_cache=False)
except KeyError:
self.db.Put(key, '')
logger.debug('TS propagating a new key', key)
# now propagate the key to the other ts nodes
self.clust.stack_new_ts_broadcast(key)
return False
# List all keys in our name db that match the begining of key
def list_keys(self, key):
if key:
start = key
l = chr(ord(key[-1]) + 1)
end = key[:-1]+l
print "LOOKUP from %s to %s" % (start, end)
r = self.db.RangeIter(start, end, include_value=False, fill_cache=False)
else: # Full scan? are you crazy?
print "LOOKUP full scan"
r = self.db.RangeIter(include_value=False, fill_cache=False)
return list(r)
# We consume data and create a new data entry if need
def add_value(self, t, key, v):
# do nothing, will have to forward to graphite instead
return
# The reaper thread look at old minute objects that are not updated since long, and
# force to archive them
def launch_reaper_thread(self):
threader.create_and_launch(self.do_reaper_thread, name='TS-reaper-thread')
def do_reaper_thread(self):
while True:
time.sleep(1) # no more need as we will forward ts
class TSListener(object):
def __init__(self, clust):
self.clust = clust
self.addr = '0.0.0.0'
self.statsd_port = 8125
self.graphite_port = 2003
self.last_write = time.time()
self.nb_data = 0
self.stats_interval = 10
# Do not step on your own foot...
self.stats_lock = threading.RLock()
# our main data structs
self.gauges = {}
self.timers = {}
self.histograms = {}
self.counters = {}
# our helper objects
#self.its = IdxTSDatabase(self.clust)
self.usender = UDPSender(self.clust)
self.tsb = TSBackend(self.usender, self.clust)
# Now start our threads
# STATSD
threader.create_and_launch(self.launch_statsd_udp_listener, name='TSL_statsd_thread')
threader.create_and_launch(self.launch_compute_stats_thread, name='TSC_thread')
# GRAPHITE
threader.create_and_launch(self.launch_graphite_udp_listener, name='TSL_graphite_udp_thread')
threader.create_and_launch(self.launch_graphite_tcp_listener, name='TSL_graphite_tcp_thread')
self.graphite_queue = []
threader.create_and_launch(self.graphite_reaper, name='graphite-reaper-thread')
def log(self, *args):
logger.log(*args)
# push a name but return if the name was already there or not
def set_name_if_unset(self, key):
return self.tsb.set_name_if_unset(key)
# list all keys that start with key
def list_keys(self, key):
return self.tsb.list_keys(key)
def to_best_int_float(self, val):
try:
i = int(float(val))
f = float(val)
except ValueError:
return None
# If the f is a .0 value,
# best match is int
if i == f:
return i
return f
# The compute stats thread compute the STATSD values each X
# seconds and push them into the classic TS part
def launch_compute_stats_thread(self):
while True:
now = time.time()
if now > self.last_write + self.stats_interval:
self.compute_stats()
self.last_write = now
time.sleep(0.1)
def compute_stats(self):
now = int(time.time())
logger.debug("Computing stats")
names = []
# First gauges, we take the data and put a void dict instead so the other thread can work now
with self.stats_lock:
gauges = self.gauges
self.gauges = {}
for mname in gauges:
_sum, nb, _min, _max = gauges[mname]
_avg = _sum / float(nb)
key = 'stats.gauges.'+mname
self.tsb.add_value(now, key, _avg)
key = 'stats.gauges.'+mname+'.min'
self.tsb.add_value(now, key, _min)
key = 'stats.gauges.'+mname+'.max'
self.tsb.add_value(now, key, _max)
# Now counters
with self.stats_lock:
counters = self.counters
self.counters = {}
for mname in counters:
cvalue, ccount = counters[mname]
# count
key = 'stats.gauges.'+mname+'.count'
self.tsb.add_value(now, key, cvalue)
# rate
key = 'stats.gauges.'+mname+'.rate'
self.tsb.add_value(now, key, cvalue / self.stats_interval)
# Now timers, lot of funs :)
with self.stats_lock:
timers = self.timers
self.timers = {}
_t = time.time()
for (mname, timer) in timers.iteritems():
# We will need to compute the mean_99, count_99, upper_99, sum_99, sum_quares_99
# but also std, upper, lower, count, count_ps, sum, sum_square, mean, median
_t = time.time()
npvalues = np.array(timer)
# Mean
mean = np.mean(npvalues)
key = 'stats.timers.'+mname+'.mean'
self.tsb.add_value(now, key, mean)
# Upper 99th, percentile
upper_99 = np.percentile(npvalues, 99)
key = 'stats.timers.'+mname+'.upper_99'
self.tsb.add_value(now, key, upper_99)
# Sum 99
sum_99 = npvalues[:(npvalues<upper_99).argmin()].sum()
key = 'stats.timers.'+mname+'.sum_99'
self.tsb.add_value(now, key, sum_99)
# Standard deviation
std = np.std(npvalues)
key = 'stats.timers.'+mname+'.std'
self.tsb.add_value(now, key, std)
# Simple count
count = len(timer)
key = 'stats.timers.'+mname+'.count'
self.tsb.add_value(now, key, count)
# Sum of all
_sum = np.sum(npvalues)
key = 'stats.timers.'+mname+'.sum'
self.tsb.add_value(now, key, _sum)
# Median of all
median = np.percentile(npvalues, 50)
key = 'stats.timers.'+mname+'.median'
self.tsb.add_value(now, key, median)
# Upper of all
upper = np.max(npvalues)
key = 'stats.timers.'+mname+'.upper'
self.tsb.add_value(now, key, upper)
# Lower of all
lower = np.min(npvalues)
key = 'stats.timers.'+mname+'.lower'
self.tsb.add_value(now, key, lower)
# This is ht main STATSD UDP listener thread. Should not block and
# be as fast as possible
def launch_statsd_udp_listener(self):
self.udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
self.udp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1048576)
self.udp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.log(self.udp_sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
self.udp_sock.bind((self.addr, self.statsd_port))
self.log("TS UDP port open", self.statsd_port)
self.log("UDP RCVBUF", self.udp_sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
while True:#not self.interrupted:
try:
data, addr = self.udp_sock.recvfrom(65535) # buffer size is 1024 bytes
except socket.timeout: # loop until we got something
continue
self.log("UDP: received message:", data, addr)
# No data? bail out :)
if len(data) == 0:
continue
self.log("GETDATA", data)
for line in data.splitlines():
# avoid invalid lines
if not '|' in line:
continue
elts = line.split('|', 1)
# invalid, no type in the right part
if len(elts) == 1:
continue
_name_value = elts[0].strip()
# maybe it's an invalid name...
if not ':' in _name_value:
continue
_nvs = _name_value.split(':')
if len(_nvs) != 2:
continue
mname = _nvs[0].strip()
# Two cases: it's for me or not
hkey = hashlib.sha1(mname).hexdigest()
ts_node_manager = self.clust.find_ts_node(hkey)
# if it's me that manage this key, I add it in my backend
if ts_node_manager != self.clust.uuid:
node = self.clust.nodes.get(ts_node_manager, None)
# threads are dangerous things...
if node is None:
continue
# TODO: do bulk send of this, like for graphite
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# do NOT use the node['port'], it's the internal communication, not the graphite one!
sock.sendto(line, (node['addr'], self.statsd_port))
sock.close()
continue
# Here we are sure it's really for us, so manage it :)
value = self.to_best_int_float(_nvs[1].strip())
if not mname or value is None:
continue
# Look at the type of the data
_type = elts[1].strip()
if len(_type) == 0:
continue
## Gauge: <metric name>:<value>|g
elif _type == 'g':
self.nb_data += 1
self.log('GAUGE', mname, value)
with self.stats_lock:
gentry = self.gauges.get(mname, None)
if gentry is None:
# sum, nb, min, max
gentry = (0.0, 0, None, None)
_sum, nb, _min, _max = gentry
_sum += value
nb += 1
if _min is None or value < _min:
_min = value
if _max is None or value > _max:
_max = value
self.gauges[mname] = (_sum, nb, _min, _max)
self.log('NEW GAUGE', mname, self.gauges[mname])
## Timers: <metric name>:<value>|ms
## But also
## Histograms: <metric name>:<value>|h
elif _type == 'ms' or _type == 'h':
self.log('timers', mname, value)
# TODO: avoit the SET each time
timer = self.timers.get(mname, [])
timer.append(value)
self.timers[mname] = timer
## Counters: <metric name>:<value>|c[|@<sample rate>]
elif _type == 'c':
self.nb_data += 1
self.log('COUNTER', mname, value, "rate", 1)
with self.stats_lock:
cvalue, ccount = self.counters.get(mname, (0,0))
self.counters[mname] = (cvalue + value, ccount + 1)
self.log('NEW COUNTER', mname, self.counters[mname])
## Meters: <metric name>:<value>|m
elif _type == 'm':
self.log('METERs', mname, value)
else: # unknow type, maybe a c[|@<sample rate>]
if _type[0] == 'c':
self.nb_data += 1
if not '|' in _type:
continue
srate = _type.split('|')[1].strip()
if len(srate) == 0 or srate[0] != '@':
continue
try:
rate = float(srate[1:])
except ValueError:
continue
# Invalid rate, 0.0 is invalid too ;)
if rate <= 0.0 or rate > 1.0:
continue
self.log('COUNTER', mname, value, "rate", rate)
with self.stats_lock:
cvalue, ccount = self.counters.get(mname, (0,0))
self.log('INCR counter', (value/rate))
self.counters[mname] = (cvalue + (value/rate), ccount + 1/rate)
self.log('NEW COUNTER', mname, self.counters[mname])
# Thread for listening to the graphite port in UDP (2003)
def launch_graphite_udp_listener(self):
self.graphite_udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
self.graphite_udp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.graphite_udp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1048576)
self.log(self.graphite_udp_sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
self.graphite_udp_sock.bind((self.addr, self.graphite_port))
self.log("TS Graphite UDP port open", self.graphite_port)
self.log("UDP RCVBUF", self.graphite_udp_sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
while True:#not self.interrupted:
try:
data, addr = self.graphite_udp_sock.recvfrom(65535)
except socket.timeout: # loop until we got some data
continue
self.log("UDP Graphite: received message:", len(data), addr)
STATS.incr('ts.graphite.udp.receive', 1)
self.graphite_queue.append(data)
# Same but for the TCP connections
# TODO: use a real daemon part for this, this is not ok for fast receive
def launch_graphite_tcp_listener(self):
self.graphite_tcp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.graphite_tcp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.graphite_tcp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1048576)
self.graphite_tcp_sock.bind((self.addr, self.graphite_port))
self.graphite_tcp_sock.listen(5)
self.log("TS Graphite TCP port open", self.graphite_port)
while True:
try:
conn, addr = self.graphite_tcp_sock.accept()
except socket.timeout: # loop until we got some connect
continue
conn.settimeout(5.0)
logger.debug('TCP Graphite Connection address:', addr)
data = ''
while 1:
try:
ldata = conn.recv(1024)
except Exception, exp:
print "TIMEOUT", exp
break
if not ldata:
break
# Look at only full lines, and not the last part
# So we look at the position of the last \n
lst_nidx = ldata.rfind('\n')
# take all finished lines
data += ldata[:lst_nidx+1]
STATS.incr('ts.graphite.tcp.receive', 1)
self.graphite_queue.append(data)
# stack the data with the garbage so we will continue it
# on the next turn
data = ldata[lst_nidx+1:]
conn.close()
# Also stack what the last send
self.graphite_queue.append(data)
# Main graphite reaper thread, that will get data from both tcp and udp flow
# and dispatch it to the others daemons if need
def graphite_reaper(self):
while True:
graphite_queue = self.graphite_queue
self.graphite_queue = []
if len(graphite_queue) > 0:
logger.info("Graphite queue", len(graphite_queue))
for data in graphite_queue:
T0 = time.time()
self.grok_graphite_data(data)
STATS.timer('ts.graphite.grok-graphite-data', (time.time() - T0)*1000)
time.sleep(0.1)
# Lookup at the graphite lines compat, run in the graphite-reaper thread
def grok_graphite_data(self, data):
STATS.incr('ts.graphite.grok.data', 1)
forwards = {}
for line in data.splitlines():
elts = line.split(' ')
elts = [s.strip() for s in elts if s.strip()]
if len(elts) != 3:
return
mname, value, timestamp = elts[0], elts[1], elts[2]
hkey = hashlib.sha1(mname).hexdigest()
ts_node_manager = self.clust.find_ts_node(hkey)
# if it's me that manage this key, I add it in my backend
if ts_node_manager == self.clust.uuid:
logger.debug("I am the TS node manager")
try:
timestamp = int(timestamp)
except ValueError:
return
value = self.to_best_int_float(value)
if value is None:
continue
self.tsb.add_value(timestamp, mname, value)
# not me? stack a forwarder
else:
logger.debug("The node manager for this Ts is ", ts_node_manager)
l = forwards.get(ts_node_manager, [])
l.append(line)
forwards[ts_node_manager] = l
for (uuid, lst) in forwards.iteritems():
node = self.clust.nodes.get(uuid, None)
# maybe the node disapear? bail out, we are not lucky
if node is None:
continue
packets = []
# first compute the packets
buf = ''
for line in lst:
buf += line+'\n'
if len(buf) > 1024:
packets.append(buf)
buf = ''
if buf != '':
packets.append(buf)
# UDP
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for packet in packets:
# do NOT use the node['port'], it's the internal communication, not the graphite one!
sock.sendto(packet, (node['addr'], self.graphite_port))
sock.close()
'''
# TCP mode
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect( (node['addr'], self.graphite_port) )
for packet in packets:
sock.sendall(packet)
sock.close()
'''
| mit |
scotty007/libavg | src/python/app/settings.py | 2 | 8225 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# libavg - Media Playback Engine.
# Copyright (C) 2003-2014 Ulrich von Zadow
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Current versions can be found at www.libavg.de
#
# Original author of this file is OXullo Interecans <x at brainrapers dot org>
import sys
import re
import optparse
import libavg
class Option(object):
def __init__(self, key, value, help=None):
if not isinstance(key, str):
raise ValueError('The type of %s key is not string (value=%s)' % (key, value))
self.__key = key
self.value = value
self.__help = help
def __repr__(self):
return '<%s key=%s value=%s help=%s>' % (self.__class__.__name__,
self.key, self.value, self.help)
@property
def key(self):
return self.__key
@property
def value(self):
return self.__value
@value.setter
def value(self, value):
if not isinstance(value, str):
raise ValueError('The type of %s value (%s) '
'must be string instead of %s' % (self.__key, value, type(value)))
self.__value = value
@property
def group(self):
components = self.__getComponents()
if len(components) == 1:
return 'DEFAULT'
else:
return components[0]
@property
def tail(self):
components = self.__getComponents()
if len(components) == 1:
return self.key
else:
return components[1]
@property
def help(self):
return self.__help
def __getComponents(self):
return self.key.split('_', 1)
class KargsExtender(object):
def __init__(self, optionsKargs):
self.__optionsKargs = optionsKargs
def __call__(self, optionsList):
optionsKeyset = set([option.key for option in optionsList])
kaKeyset = set(self.__optionsKargs.keys())
if not optionsKeyset.issuperset(kaKeyset):
raise libavg.Exception(
'No such option/s: %s' % list(kaKeyset - optionsKeyset))
for option in optionsList:
if option.key in self.__optionsKargs:
option.value = self.__optionsKargs[option.key]
return optionsList
class HelpPrintingOptionParser(optparse.OptionParser):
def error(self, *args, **kargs):
self.print_help()
optparse.OptionParser.error(self, *args, **kargs)
class ArgvExtender(object):
def __init__(self, appVersionInfo, args=None):
self.__appVersionInfo = appVersionInfo
self.__parser = HelpPrintingOptionParser()
self.__args = args
self.__parsedArgs = None
def __call__(self, optionsList):
self.__parser.add_option('-v', '--version', dest='version', action='store_true',
help='print libavg and application version information')
groups = self.__groupOptionsKeys(optionsList)
for group in sorted(groups):
parserGroup = optparse.OptionGroup(self.__parser,
'%s section' % group.title())
keys = sorted(groups[group])
for option in [option for option in optionsList if option.key in keys]:
cliKey = '--%s' % option.key.replace('_', '-').lower()
currentValue = option.value if option.value else '<undefined>'
help = '[Default: %s]' % currentValue
if option.help:
help = '%s %s' % (option.help, help)
parserGroup.add_option(cliKey, help=help)
self.__parser.add_option_group(parserGroup)
if self.__args is None:
self.__args = sys.argv[1:]
self.__parsedArgs = self.__parser.parse_args(args=self.__args)
parsedOptions = self.__parsedArgs[0]
if parsedOptions.version:
print 'libavg'
vi = libavg.VersionInfo()
print ' version : %s' % vi.full
print ' builder : %s (%s)' % (vi.builder, vi.buildtime)
print
print 'application'
print ' version: %s' % self.__appVersionInfo
sys.exit(0)
for key, value in parsedOptions.__dict__.iteritems():
if value is not None:
for option in optionsList:
if option.key == key:
option.value = value
return optionsList
@property
def parsedArgs(self):
if self.__parsedArgs is None:
raise libavg.Exception(
'Cannot provide parsedArgs before applying the extender')
return self.__parsedArgs
@property
def parser(self):
return self.__parser
def __groupOptionsKeys(self, optionsList):
groups = {}
for option in optionsList:
if not option.group in groups:
groups[option.group] = []
groups[option.group].append(option.key)
return groups
class Settings(object):
def __init__(self, defaults=[]):
if (type(defaults) not in (tuple, list) or
not all([isinstance(opt, Option) for opt in defaults])):
raise ValueError('Settings must be initialized with a list '
'of Option instances')
self.__options = []
for option in defaults:
self.addOption(option)
def __iter__(self):
return self.__options.__iter__()
def applyExtender(self, extender):
self.__options = extender(self.__options)
def hasOption(self, key):
return self.__getOptionOrNone(key) is not None
def getOption(self, key):
option = self.__getOptionOrNone(key)
if option is None:
raise libavg.Exception('Cannot find key %s in the settings' % key)
return option
def get(self, key, convertFunc=lambda v: v):
option = self.getOption(key)
try:
return convertFunc(option.value)
except (TypeError, ValueError), e:
raise ValueError('%s (option=%s)' % (e, option))
def getJson(self, key):
import json
return self.get(key, json.loads)
def getPoint2D(self, key):
value = self.get(key)
maybeTuple = re.split(r'\s*[,xX]\s*', value)
if len(maybeTuple) != 2:
raise ValueError('Cannot convert key %s value %s to Point2D' % (key, value))
return libavg.Point2D(map(float, maybeTuple))
def getInt(self, key):
return self.get(key, int)
def getFloat(self, key):
return self.get(key, float)
def getBoolean(self, key):
value = self.get(key).lower()
if value in ('yes', 'true'):
return True
elif value in ('no', 'false'):
return False
else:
raise ValueError('Cannot convert %s to boolean' % value)
def set(self, key, value):
option = self.getOption(key)
option.value = value
def addOption(self, option):
if not isinstance(option, Option):
raise TypeError('Must be an instance of Option')
if self.__getOptionOrNone(option.key):
raise libavg.Exception('Option %s has been already defined' % option.key)
self.__options.append(option)
def __getOptionOrNone(self, key):
for option in self.__options:
if option.key == key:
return option
return None
| lgpl-2.1 |
mypinballs/whirlwind | loader.py | 1 | 3413 | # Loader
__author__="jim"
__date__ ="$Jan 18, 2011 1:36:37 PM$"
import procgame
import locale
from procgame import *
import random
import os
import logging
from procgame._version import __version_info__
base_path = config.value_for_key_path('base_path')
game_path = base_path+"games/whirlwind/"
speech_path = game_path +"speech/"
sound_path = game_path +"sound/"
music_path = game_path +"music/"
machine_config_path = game_path + "config/machine.yaml"
class Loader(game.Mode):
def __init__(self, game, priority):
super(Loader, self).__init__(game, priority)
self.log = logging.getLogger('testrig.loader')
self.game.sound.register_sound('up_down', sound_path+"gun_shot.aiff")
self.game.sound.register_sound('select', sound_path+"evil_laugh.aiff")
proc_game_version = '.'.join(map(str, __version_info__))
self.selection=0
spacer = ' '
self.choices=['Whitewood Tester v'+str(config.value_for_key_path('build_version'))+' p'+proc_game_version] #improve to pull versions from config files
self.dates=[str(config.value_for_key_path('build_date'))+spacer]
self.reset()
def reset(self):
self.bgnd_layer = dmd.FrameLayer(opaque=False, frame=dmd.Animation().load(game_path+"dmd/testrig_bgnd.dmd").frames[0])
self.text_layer = dmd.TextLayer(3, 7, self.game.fonts['tiny7'], "left", opaque=False)
self.date_layer = dmd.TextLayer(3, 14, self.game.fonts['tiny7'], "left", opaque=False)
self.layer = dmd.GroupedLayer(128, 32, [self.bgnd_layer, self.text_layer, self.date_layer])#set clear time
def mode_started(self):
self.select(0)
def mode_tick(self):
pass
def select(self,number=None):
if number:
self.selection=number
else:
self.selection+=1
if self.selection>=len(self.choices):
self.selection=0
self.text_layer.set_text(self.choices[self.selection],blink_frames=10)
self.date_layer.set_text(self.dates[self.selection])
self.game.sound.play('up_down')
def sw_start_active(self, sw):
self.log.info(self.selection)
self.game.sound.play('select')
if self.selection == 0:
self.launch_testrig()
def sw_flipperLwL_active(self, sw):
self.select()
def sw_flipperLwR_active(self, sw):
self.select()
def launch_testrig(self):
self.stop_proc()
# Import and run the startup script, further execution of this script is halted until the run_loop is stopped.
import base
base.main()
# Reset mode & restart P-ROC / pyprocgame
self.mode_started()
self.restart_proc()
def stop_proc(self):
self.game.sound.stop_music()
self.game.end_run_loop()
while len(self.game.dmd.frame_handlers) > 0:
del self.game.dmd.frame_handlers[0]
del self.game.proc
def restart_proc(self):
self.game.proc = self.game.create_pinproc()
self.game.proc.reset(1)
self.game.load_config(self.game.yamlpath)
#self.game.enable_flippers(True)
self.game.dmd.frame_handlers.append(self.game.proc.dmd_draw)
self.game.dmd.frame_handlers.append(self.game.set_last_frame)
self.game.run_loop() | gpl-3.0 |
borysiasty/QGIS | python/plugins/processing/gui/PointSelectionPanel.py | 45 | 3329 | # -*- coding: utf-8 -*-
"""
***************************************************************************
PointSelectionPanel.py
---------------------
Date : February 2016
Copyright : (C) 2016 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Alexander Bruy'
import os
import warnings
from qgis.core import (QgsProject,
QgsReferencedPointXY,
QgsPointXY)
from qgis.PyQt import uic
from qgis.utils import iface
from processing.gui.PointMapTool import PointMapTool
pluginPath = os.path.split(os.path.dirname(__file__))[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetBaseSelector.ui'))
class PointSelectionPanel(BASE, WIDGET):
def __init__(self, dialog, default=None):
super(PointSelectionPanel, self).__init__(None)
self.setupUi(self)
self.btnSelect.clicked.connect(self.selectOnCanvas)
self.dialog = dialog
self.crs = QgsProject.instance().crs()
if iface is not None:
canvas = iface.mapCanvas()
self.prevMapTool = canvas.mapTool()
self.tool = PointMapTool(canvas)
self.tool.canvasClicked.connect(self.updatePoint)
self.tool.complete.connect(self.pointPicked)
else:
self.prevMapTool = None
self.tool = None
if default:
tokens = str(default).split(',')
if len(tokens) == 2:
try:
float(tokens[0])
float(tokens[1])
self.leText.setText(str(default))
except:
pass
def selectOnCanvas(self):
canvas = iface.mapCanvas()
canvas.setMapTool(self.tool)
self.dialog.showMinimized()
def updatePoint(self, point, button):
s = '{},{}'.format(point.x(), point.y())
self.crs = QgsProject.instance().crs()
if self.crs.isValid():
s += ' [' + self.crs.authid() + ']'
self.leText.setText(s)
def pointPicked(self):
canvas = iface.mapCanvas()
canvas.setMapTool(self.prevMapTool)
self.dialog.showNormal()
self.dialog.raise_()
self.dialog.activateWindow()
def getValue(self):
if str(self.leText.text()).strip() != '':
return str(self.leText.text())
else:
return None
def setPointFromString(self, s):
self.leText.setText(s)
| gpl-2.0 |
viranch/exodus | resources/lib/modules/directstream.py | 2 | 8802 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,os,urllib,urlparse,json,binascii
from resources.lib.modules import client
def google(url):
try:
netloc = urlparse.urlparse(url.strip().lower()).netloc
netloc = netloc.split('.google')[0]
if netloc == 'docs' or netloc == 'drive':
url = url.split('/preview', 1)[0]
url = url.replace('drive.google.com', 'docs.google.com')
headers = {'User-Agent': client.agent()}
result = client.request(url, output='extended', headers=headers)
try: headers['Cookie'] = result[2]['Set-Cookie']
except: pass
result = result[0]
if netloc == 'docs' or netloc == 'drive':
result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
result = json.loads(result)
result = [i.split('|')[-1] for i in result.split(',')]
result = sum([googletag(i) for i in result], [])
elif netloc == 'photos':
result = result.replace('\r','').replace('\n','').replace('\t','')
result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0]
result = result.replace('\\u003d','=').replace('\\u0026','&')
result = re.compile('url=(.+?)&').findall(result)
result = [urllib.unquote(i) for i in result]
result = [googletag(i)[0] for i in result]
elif netloc == 'picasaweb':
id = re.compile('#(\d*)').findall(url)[0]
result = re.search('feedPreload:\s*(.*}]}})},', result, re.DOTALL).group(1)
result = json.loads(result)['feed']['entry']
if len(result) > 1: result = [i for i in result if str(id) in i['link'][0]['href']][0]
elif len(result) == 1: result = result[0]
result = result['media']['content']
result = [i['url'] for i in result if 'video' in i['type']]
result = sum([googletag(i) for i in result], [])
elif netloc == 'plus':
id = (urlparse.urlparse(url).path).split('/')[-1]
result = result.replace('\r','').replace('\n','').replace('\t','')
result = result.split('"%s"' % id)[-1].split(']]')[0]
result = result.replace('\\u003d','=').replace('\\u0026','&')
result = re.compile('url=(.+?)&').findall(result)
result = [urllib.unquote(i) for i in result]
result = [googletag(i)[0] for i in result]
url = []
try: url += [[i for i in result if i['quality'] == '1080p'][0]]
except: pass
try: url += [[i for i in result if i['quality'] == 'HD'][0]]
except: pass
try: url += [[i for i in result if i['quality'] == 'SD'][0]]
except: pass
for i in url: i.update({'url': i['url'] + '|%s' % urllib.urlencode(headers)})
if url == []: return
return url
except:
return
def googletag(url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
if quality in ['266', '272', '313']:
return [{'quality': '4K', 'url': url}]
if quality in ['264', '271']:
return [{'quality': '1440p', 'url': url}]
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '59','135', '244', '94']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
def googlepass(url):
try:
try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
except: headers = None
url = url.split('|')[0].replace('\\', '')
url = client.request(url, headers=headers, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
if headers: url += '|%s' % urllib.urlencode(headers)
return url
except:
return
def vk(url):
try:
query = urlparse.parse_qs(urlparse.urlparse(url).query)
try: oid, video_id = query['oid'][0], query['id'][0]
except: oid, video_id = re.findall('\/video(.*)_(.*)', url)[0]
sources_url = 'http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s' % (oid, video_id)
html = client.request(sources_url)
html = re.sub(r'[^\x00-\x7F]+', ' ', html)
sources = re.findall('(\d+)x\d+.+?(http.+?\.m3u8.+?)n', html)
if not sources:
sources = re.findall('"url(\d+)"\s*:\s*"(.+?)"', html)
sources = [(i[0], i[1].replace('\\', '')) for i in sources]
sources = dict(sources)
url = []
try: url += [{'quality': 'HD', 'url': sources['720']}]
except: pass
try: url += [{'quality': 'SD', 'url': sources['540']}]
except: pass
try: url += [{'quality': 'SD', 'url': sources['480']}]
except: pass
if not url == []: return url
try: url += [{'quality': 'SD', 'url': sources['360']}]
except: pass
if not url == []: return url
try: url += [{'quality': 'SD', 'url': sources['240']}]
except: pass
if not url == []: return url
except:
return
def odnoklassniki(url):
try:
url = re.compile('//.+?/.+?/([\w]+)').findall(url)[0]
url = 'http://ok.ru/dk?cmd=videoPlayerMetadata&mid=%s' % url
result = client.request(url)
result = re.sub(r'[^\x00-\x7F]+',' ', result)
result = json.loads(result)['videos']
try: hd = [{'quality': '4K', 'url': i['url']} for i in result if i['name'] == 'ultra']
except: pass
try: hd += [{'quality': '1440p', 'url': i['url']} for i in result if i['name'] == 'quad']
except: pass
try: hd += [{'quality': '1080p', 'url': i['url']} for i in result if i['name'] == 'full']
except: pass
try: hd += [{'quality': 'HD', 'url': i['url']} for i in result if i['name'] == 'hd']
except: pass
try: sd = [{'quality': 'SD', 'url': i['url']} for i in result if i['name'] == 'sd']
except: pass
try: sd += [{'quality': 'SD', 'url': i['url']} for i in result if i['name'] == 'low']
except: pass
try: sd += [{'quality': 'SD', 'url': i['url']} for i in result if i['name'] == 'lowest']
except: pass
try: sd += [{'quality': 'SD', 'url': i['url']} for i in result if i['name'] == 'mobile']
except: pass
url = hd + sd[:1]
if not url == []: return url
except:
return
def cldmailru(url):
try:
v = url.split('public')[-1]
r = client.request(url)
r = re.sub(r'[^\x00-\x7F]+',' ', r)
tok = re.findall('"tokens"\s*:\s*{\s*"download"\s*:\s*"([^"]+)', r)[0]
url = re.findall('"weblink_get"\s*:\s*\[.+?"url"\s*:\s*"([^"]+)', r)[0]
url = '%s%s?key=%s' % (url, v, tok)
return url
except:
return
def yandex(url):
try:
cookie = client.request(url, output='cookie')
r = client.request(url, cookie=cookie)
r = re.sub(r'[^\x00-\x7F]+',' ', r)
sk = re.findall('"sk"\s*:\s*"([^"]+)', r)[0]
idstring = re.findall('"id"\s*:\s*"([^"]+)', r)[0]
idclient = binascii.b2a_hex(os.urandom(16))
post = {'idClient': idclient, 'version': '3.9.2', 'sk': sk, '_model.0': 'do-get-resource-url', 'id.0': idstring}
post = urllib.urlencode(post)
r = client.request('https://yadi.sk/models/?_m=do-get-resource-url', post=post, cookie=cookie)
r = json.loads(r)
url = r['models'][0]['data']['file']
return url
except:
return
| gpl-3.0 |
oasiswork/odoo | addons/portal_project_issue/tests/__init__.py | 260 | 1086 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_access_rights
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jakirkham/ilastik | bin/convert_predictions_to_segmentation.py | 3 | 4606 | import os
import copy
import h5py
from lazyflow.utility import PathComponents
from lazyflow.graph import Graph
from lazyflow.operators.ioOperators import OpInputDataReader, OpFormattedDataExport
from ilastik.applets.pixelClassification.opPixelClassification import OpArgmaxChannel
from ilastik.applets.dataExport.dataExportApplet import DataExportApplet
def convert_predictions_to_segmentation( input_paths, parsed_export_args ):
"""
Read exported pixel predictions and calculate/export the segmentation.
input_path: The path to the prediction output file. If hdf5, must include the internal dataset name.
parsed_export_args: The already-parsed cmd-line arguments generated from a DataExportApplet-compatible ArgumentParser.
"""
graph = Graph()
opReader = OpInputDataReader(graph=graph)
opReader.WorkingDirectory.setValue( os.getcwd() )
opArgmaxChannel = OpArgmaxChannel( graph=graph )
opArgmaxChannel.Input.connect( opReader.Output )
opExport = OpFormattedDataExport( graph=graph )
opExport.Input.connect( opArgmaxChannel.Output )
# Apply command-line arguments.
DataExportApplet._configure_operator_with_parsed_args(parsed_export_args, opExport)
last_progress = [-1]
def print_progress(progress_percent):
if progress_percent != last_progress[0]:
last_progress[0] = progress_percent
sys.stdout.write( " {}".format(progress_percent) )
opExport.progressSignal.subscribe(print_progress)
for input_path in input_paths:
opReader.FilePath.setValue(input_path)
input_pathcomp = PathComponents(input_path)
opExport.OutputFilenameFormat.setValue(str(input_pathcomp.externalPath))
output_path = opExport.ExportPath.value
output_pathcomp = PathComponents( output_path )
output_pathcomp.filenameBase += "_Segmentation"
opExport.OutputFilenameFormat.setValue(str(output_pathcomp.externalPath))
print "Exporting results to : {}".format( opExport.ExportPath.value )
sys.stdout.write("Progress:")
# Begin export
opExport.run_export()
sys.stdout.write("\n")
print "DONE."
def all_dataset_internal_paths(f):
"""
Return a list of all the internal datasets in an hdf5 file.
"""
allkeys = []
f.visit(allkeys.append)
dataset_keys = filter(lambda key: isinstance(f[key], h5py.Dataset),
allkeys)
return dataset_keys
if __name__ == "__main__":
import sys
import argparse
#sys.argv += "/tmp/example_slice.h5/data /tmp/example_slice2.h5/data --export_drange=(0,255) --output_format=png --pipeline_result_drange=(1,2)".split()
# Construct a parser with all the 'normal' export options, and add arg for prediction_image_paths.
parser = DataExportApplet.make_cmdline_parser( argparse.ArgumentParser() )
parser.add_argument("prediction_image_paths", nargs='+', help="Path(s) to your exported predictions.")
parsed_args = parser.parse_args()
parsed_args, unused_args = DataExportApplet.parse_known_cmdline_args( sys.argv[1:], parsed_args )
# As a convenience, auto-determine the internal dataset path if possible.
for index, input_path in enumerate(parsed_args.prediction_image_paths):
path_comp = PathComponents(input_path, os.getcwd())
if not parsed_args.output_internal_path:
parsed_args.output_internal_path = "segmentation"
if path_comp.extension in PathComponents.HDF5_EXTS and path_comp.internalDatasetName == "":
with h5py.File(path_comp.externalPath, 'r') as f:
all_internal_paths = all_dataset_internal_paths(f)
if len(all_internal_paths) == 1:
path_comp.internalPath = all_internal_paths[0]
parsed_args.prediction_image_paths[index] = path_comp.totalPath()
elif len(all_internal_paths) == 0:
sys.stderr.write("Could not find any datasets in your input file:\n"
"{}\n".format(input_path))
sys.exit(1)
else:
sys.stderr.write("Found more than one dataset in your input file:\n"
"{}\n".format(input_path) +
"Please specify the dataset name, e.g. /path/to/myfile.h5/internal/dataset_name\n")
sys.exit(1)
sys.exit( convert_predictions_to_segmentation( parsed_args.prediction_image_paths,
parsed_args ) )
| gpl-3.0 |
neoteric-cm/neoteric | neoteric/modules/openssh/sshd.py | 2 | 2122 | """
"""
import os
from neoteric.util.files import read_file, dir_of_file
from neoteric.parsers.flat import FlatParser
from neoteric.resource.package import Package
from neoteric.resource.service import Service
from neoteric.util.type import type_assert
__all__ = ['SshdConfig', 'configure', 'SSHD_CONFIG_TEMPLATE']
SSHD_CONFIG_TEMPLATE = os.path.join(
dir_of_file(__file__), 'files', 'etc', 'ssh', 'sshd_config')
class SshdConfig(FlatParser):
def __init__(
self,
path='/etc/ssh/sshd_config',
content=read_file(SSHD_CONFIG_TEMPLATE),
comment='#',
split=None,
callbacks=None,
owner='root',
group='root',
mode='0600'):
""" An sshd config file. See FlatParser for methods to modify
the keys and values
@path: str, The destination path that the file will be
written to
@content: str, The contents of the file
@comment: str, Lines beginning with this string are ignored
@split: str, Passed to Python str.split() to split the key
from the value
@callbacks: list or None, the callbacks to pass to the File
@owner: str, The name of the UNIX owner
@group: str, The name of the UNIX group
@mode: str, the file mode to set
"""
FlatParser.__init__(
self, path, content, comment, split, callbacks,
owner=owner, group=group, mode=mode)
class SshdServerRedHat:
def __init__(self):
self.service = Service('sshd')
callbacks = [self.service.restart_callback]
self.package = Package('openssh-server', callbacks=callbacks)
self.sshd_config = SshdConfig(callbacks=callbacks)
def configure(self):
configure(self.sshd_config, self.service, self.package)
def configure(sshd_config, service, package):
type_assert(sshd_config, SshdConfig)
type_assert(package, Package)
type_assert(service, Service)
package.action()
sshd_config.action()
service.action()
| gpl-3.0 |
smartscheduling/scikit-learn-categorical-tree | examples/tree/plot_tree_regression_multioutput.py | 43 | 1791 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_3 = DecisionTreeRegressor(max_depth=8)
clf_1.fit(X, y)
clf_2.fit(X, y)
clf_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
y_3 = clf_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
bluevoda/BloggyBlog | lib/python3.4/site-packages/wheel/pep425tags.py | 233 | 5341 | """Generate and work with PEP 425 Compatibility Tags."""
import sys
import warnings
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import distutils.util
def get_config_var(var):
try:
return sysconfig.get_config_var(var)
except IOError as e: # pip Issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
return None
def get_abbr_impl():
"""Return abbreviated implementation name."""
if hasattr(sys, 'pypy_version_info'):
pyimpl = 'pp'
elif sys.platform.startswith('java'):
pyimpl = 'jy'
elif sys.platform == 'cli':
pyimpl = 'ip'
else:
pyimpl = 'cp'
return pyimpl
def get_impl_ver():
"""Return implementation version."""
impl_ver = get_config_var("py_version_nodot")
if not impl_ver or get_abbr_impl() == 'pp':
impl_ver = ''.join(map(str, get_impl_version_info()))
return impl_ver
def get_impl_version_info():
"""Return sys.version_info-like tuple for use in decrementing the minor
version."""
if get_abbr_impl() == 'pp':
# as per https://github.com/pypa/pip/issues/2882
return (sys.version_info[0], sys.pypy_version_info.major,
sys.pypy_version_info.minor)
else:
return sys.version_info[0], sys.version_info[1]
def get_flag(var, fallback, expected=True, warn=True):
"""Use a fallback method for determining SOABI flags if the needed config
var is unset or unavailable."""
val = get_config_var(var)
if val is None:
if warn:
warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
"be incorrect".format(var), RuntimeWarning, 2)
return fallback()
return val == expected
def get_abi_tag():
"""Return the ABI tag based on SOABI (if available) or emulate SOABI
(CPython 2, PyPy)."""
soabi = get_config_var('SOABI')
impl = get_abbr_impl()
if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
d = ''
m = ''
u = ''
if get_flag('Py_DEBUG',
lambda: hasattr(sys, 'gettotalrefcount'),
warn=(impl == 'cp')):
d = 'd'
if get_flag('WITH_PYMALLOC',
lambda: impl == 'cp',
warn=(impl == 'cp')):
m = 'm'
if get_flag('Py_UNICODE_SIZE',
lambda: sys.maxunicode == 0x10ffff,
expected=4,
warn=(impl == 'cp' and
sys.version_info < (3, 3))) \
and sys.version_info < (3, 3):
u = 'u'
abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
elif soabi and soabi.startswith('cpython-'):
abi = 'cp' + soabi.split('-')[1]
elif soabi:
abi = soabi.replace('.', '_').replace('-', '_')
else:
abi = None
return abi
def get_platform():
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
return distutils.util.get_platform().replace('.', '_').replace('-', '_')
def get_supported(versions=None, supplied_platform=None):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
version_info = get_impl_version_info()
major = version_info[:-1]
# Support all previous minor Python versions.
for minor in range(version_info[-1], -1, -1):
versions.append(''.join(map(str, major + (minor,))))
impl = get_abbr_impl()
abis = []
abi = get_abi_tag()
if abi:
abis[0:0] = [abi]
abi3s = set()
import imp
for suffix in imp.get_suffixes():
if suffix[0].startswith('.abi'):
abi3s.add(suffix[0].split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
platforms = []
if supplied_platform:
platforms.append(supplied_platform)
platforms.append(get_platform())
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in platforms:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# No abi / arch, but requires our implementation:
for i, version in enumerate(versions):
supported.append(('%s%s' % (impl, version), 'none', 'any'))
if i == 0:
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# Major Python version + platform; e.g. binaries not using the Python API
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
| gpl-3.0 |
manastech/de-bee | gdata/oauth/rsa.py | 225 | 4528 | #!/usr/bin/python
"""
requires tlslite - http://trevp.net/tlslite/
"""
import binascii
from gdata.tlslite.utils import keyfactory
from gdata.tlslite.utils import cryptomath
# XXX andy: ugly local import due to module name, oauth.oauth
import gdata.oauth as oauth
class OAuthSignatureMethod_RSA_SHA1(oauth.OAuthSignatureMethod):
def get_name(self):
return "RSA-SHA1"
def _fetch_public_cert(self, oauth_request):
# not implemented yet, ideas are:
# (1) do a lookup in a table of trusted certs keyed off of consumer
# (2) fetch via http using a url provided by the requester
# (3) some sort of specific discovery code based on request
#
# either way should return a string representation of the certificate
raise NotImplementedError
def _fetch_private_cert(self, oauth_request):
# not implemented yet, ideas are:
# (1) do a lookup in a table of trusted certs keyed off of consumer
#
# either way should return a string representation of the certificate
raise NotImplementedError
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
oauth.escape(oauth_request.get_normalized_http_method()),
oauth.escape(oauth_request.get_normalized_http_url()),
oauth.escape(oauth_request.get_normalized_parameters()),
)
key = ''
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
key, base_string = self.build_signature_base_string(oauth_request,
consumer,
token)
# Fetch the private key cert based on the request
cert = self._fetch_private_cert(oauth_request)
# Pull the private key from the certificate
privatekey = keyfactory.parsePrivateKey(cert)
# Convert base_string to bytes
#base_string_bytes = cryptomath.createByteArraySequence(base_string)
# Sign using the key
signed = privatekey.hashAndSign(base_string)
return binascii.b2a_base64(signed)[:-1]
def check_signature(self, oauth_request, consumer, token, signature):
decoded_sig = base64.b64decode(signature);
key, base_string = self.build_signature_base_string(oauth_request,
consumer,
token)
# Fetch the public key cert based on the request
cert = self._fetch_public_cert(oauth_request)
# Pull the public key from the certificate
publickey = keyfactory.parsePEMKey(cert, public=True)
# Check the signature
ok = publickey.hashAndVerify(decoded_sig, base_string)
return ok
class TestOAuthSignatureMethod_RSA_SHA1(OAuthSignatureMethod_RSA_SHA1):
def _fetch_public_cert(self, oauth_request):
cert = """
-----BEGIN CERTIFICATE-----
MIIBpjCCAQ+gAwIBAgIBATANBgkqhkiG9w0BAQUFADAZMRcwFQYDVQQDDA5UZXN0
IFByaW5jaXBhbDAeFw03MDAxMDEwODAwMDBaFw0zODEyMzEwODAwMDBaMBkxFzAV
BgNVBAMMDlRlc3QgUHJpbmNpcGFsMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
gQC0YjCwIfYoprq/FQO6lb3asXrxLlJFuCvtinTF5p0GxvQGu5O3gYytUvtC2JlY
zypSRjVxwxrsuRcP3e641SdASwfrmzyvIgP08N4S0IFzEURkV1wp/IpH7kH41Etb
mUmrXSwfNZsnQRE5SYSOhh+LcK2wyQkdgcMv11l4KoBkcwIDAQABMA0GCSqGSIb3
DQEBBQUAA4GBAGZLPEuJ5SiJ2ryq+CmEGOXfvlTtEL2nuGtr9PewxkgnOjZpUy+d
4TvuXJbNQc8f4AMWL/tO9w0Fk80rWKp9ea8/df4qMq5qlFWlx6yOLQxumNOmECKb
WpkUQDIDJEoFUzKMVuJf4KO/FJ345+BNLGgbJ6WujreoM1X/gYfdnJ/J
-----END CERTIFICATE-----
"""
return cert
def _fetch_private_cert(self, oauth_request):
cert = """
-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V
A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d
7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ
hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H
X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm
uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw
rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z
zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn
qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG
WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno
cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+
3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8
AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54
Lw03eHTNQghS0A==
-----END PRIVATE KEY-----
"""
return cert
| mit |
chyeh727/django | django/middleware/common.py | 26 | 6926 | import logging
import re
from django import http
from django.conf import settings
from django.core import urlresolvers
from django.core.exceptions import PermissionDenied
from django.core.mail import mail_managers
from django.utils.cache import get_conditional_response, set_response_etag
from django.utils.encoding import force_text
logger = logging.getLogger('django.request')
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
This behavior can be customized by subclassing CommonMiddleware and
overriding the response_redirect_class attribute.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
response_redirect_class = http.HttpResponsePermanentRedirect
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
raise PermissionDenied('Forbidden user agent')
# Check for a redirect based on settings.PREPEND_WWW
host = request.get_host()
if settings.PREPEND_WWW and host and not host.startswith('www.'):
host = 'www.' + host
# Check if we also need to append a slash so we can do it all
# with a single redirect.
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
return self.response_redirect_class('%s://%s%s' % (request.scheme, host, path))
def should_redirect_with_slash(self, request):
"""
Return True if settings.APPEND_SLASH is True and appending a slash to
the request path turns an invalid path into a valid one.
"""
if settings.APPEND_SLASH and not request.get_full_path().endswith('/'):
urlconf = getattr(request, 'urlconf', None)
return (
not urlresolvers.is_valid_path(request.path_info, urlconf)
and urlresolvers.is_valid_path('%s/' % request.path_info, urlconf)
)
return False
def get_full_path_with_slash(self, request):
"""
Return the full path of the request with a trailing slash appended.
Raise a RuntimeError if settings.DEBUG is True and request.method is
GET, PUT, or PATCH.
"""
new_path = request.get_full_path(force_append_slash=True)
if settings.DEBUG and request.method in ('POST', 'PUT', 'PATCH'):
raise RuntimeError(
"You called this URL via %(method)s, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining %(method)s data. "
"Change your form to point to %(url)s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django settings." % {
'method': request.method,
'url': request.get_host() + new_path,
}
)
return new_path
def process_response(self, request, response):
"""
Calculate the ETag, if needed.
When the status code of the response is 404, it may redirect to a path
with an appended slash if should_redirect_with_slash() returns True.
"""
# If the given URL is "Not Found", then check if we should redirect to
# a path with a slash appended.
if response.status_code == 404:
if self.should_redirect_with_slash(request):
return self.response_redirect_class(self.get_full_path_with_slash(request))
if settings.USE_ETAGS:
if not response.has_header('ETag'):
set_response_etag(response)
if response.has_header('ETag'):
return get_conditional_response(
request,
etag=response['ETag'],
response=response,
)
return response
class BrokenLinkEmailsMiddleware(object):
def process_response(self, request, response):
"""
Send broken link emails for relevant 404 NOT FOUND responses.
"""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = force_text(request.META.get('HTTP_REFERER', ''), errors='replace')
if not self.is_ignorable_request(request, path, domain, referer):
ua = force_text(request.META.get('HTTP_USER_AGENT', '<none>'), errors='replace')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Returns True if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Returns True if the given request *shouldn't* notify the site managers.
"""
# '?' in referer is identified as search engine source
if (not referer or
(not self.is_internal_request(domain, referer) and '?' in referer)):
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
| bsd-3-clause |
googleapis/python-monitoring-dashboards | google/monitoring/dashboard_v1/types/widget.py | 1 | 2098 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.monitoring.dashboard_v1.types import scorecard as gmd_scorecard
from google.monitoring.dashboard_v1.types import text as gmd_text
from google.monitoring.dashboard_v1.types import xychart
from google.protobuf import empty_pb2 as empty # type: ignore
__protobuf__ = proto.module(
package="google.monitoring.dashboard.v1", manifest={"Widget",},
)
class Widget(proto.Message):
r"""Widget contains a single dashboard component and
configuration of how to present the component in the dashboard.
Attributes:
title (str):
Optional. The title of the widget.
xy_chart (~.xychart.XyChart):
A chart of time series data.
scorecard (~.gmd_scorecard.Scorecard):
A scorecard summarizing time series data.
text (~.gmd_text.Text):
A raw string or markdown displaying textual
content.
blank (~.empty.Empty):
A blank space.
"""
title = proto.Field(proto.STRING, number=1)
xy_chart = proto.Field(
proto.MESSAGE, number=2, oneof="content", message=xychart.XyChart,
)
scorecard = proto.Field(
proto.MESSAGE, number=3, oneof="content", message=gmd_scorecard.Scorecard,
)
text = proto.Field(proto.MESSAGE, number=4, oneof="content", message=gmd_text.Text,)
blank = proto.Field(proto.MESSAGE, number=5, oneof="content", message=empty.Empty,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
abadger/ansible-modules-core | network/nxos/nxos_smu.py | 19 | 12064 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_smu
version_added: "2.2"
short_description: Perform SMUs on Cisco NX-OS devices.
description:
- Perform software maintenance upgrades (SMUs) on Cisco NX-OS devices.
extends_documentation_fragment: nxos
author: Gabriele Gerbino (@GGabriele)
notes:
- The module can only activate and commit a package,
not remove or deactivate it.
- Use C(transport=nxapi) to avoid connection timeout
options:
pkg:
description:
- Name of the remote package.
required: true
file_system:
description:
- The remote file system of the device. If omitted,
devices that support a file_system parameter will use
their default values.
required: false
default: null
'''
EXAMPLES = '''
- nxos_smu:
pkg: "nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm"
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
file_system:
description: The remote file system of the device.
returned: always
type: string
sample: "bootflash:"
pkg:
description: Name of the remote package
type: string
returned: always
sample: "nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm"
updates:
description: commands sent to the device
returned: always
type: list
sample: ["install add bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm",
"install activate bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm force",
"install commit bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import time
import json
import collections
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
cmds = [command]
body = execute_show(cmds, module)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def remote_file_exists(module, dst, file_system='bootflash:'):
command = 'dir {0}/{1}'.format(file_system, dst)
body = execute_show_command(command, module, command_type='cli_show_ascii')
if 'No such file' in body[0]:
return False
return True
def execute_config_command(commands, module):
try:
output = module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
output = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
return output
def apply_patch(module, commands):
for command in commands:
response = execute_config_command([command], module)
time.sleep(5)
if 'failed' in response:
module.fail_json(msg="Operation failed!", response=response)
def get_commands(module, pkg, file_system):
commands = []
splitted_pkg = pkg.split('.')
fixed_pkg = '.'.join(splitted_pkg[0:-1])
command = 'show install inactive'
inactive_body = execute_show_command(command, module,
command_type='cli_show_ascii')
command = 'show install active'
active_body = execute_show_command(command, module,
command_type='cli_show_ascii')
if fixed_pkg not in inactive_body[0] and fixed_pkg not in active_body[0]:
commands.append('install add {0}{1}'.format(file_system, pkg))
if fixed_pkg not in active_body[0]:
commands.append('install activate {0}{1} force'.format(
file_system, pkg))
command = 'show install committed'
install_body = execute_show_command(command, module,
command_type='cli_show_ascii')
if fixed_pkg not in install_body[0]:
commands.append('install commit {0}{1}'.format(file_system, pkg))
return commands
def main():
argument_spec = dict(
pkg=dict(required=True),
file_system=dict(required=False, default='bootflash:'),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
pkg = module.params['pkg']
file_system = module.params['file_system']
changed = False
remote_exists = remote_file_exists(module, pkg, file_system=file_system)
if not remote_exists:
module.fail_json(msg="The requested package doesn't exist "
"on the device")
commands = get_commands(module, pkg, file_system)
if not module.check_mode and commands:
try:
apply_patch(module, commands)
changed = True
except ShellError:
e = get_exception()
module.fail_json(msg=str(e))
if 'configure' in commands:
commands.pop(0)
module.exit_json(changed=changed,
pkg=pkg,
file_system=file_system,
updates=commands)
if __name__ == '__main__':
main()
| gpl-3.0 |
proversity-org/edx-platform | lms/djangoapps/class_dashboard/dashboard_data.py | 15 | 23597 | """
Computes the data to display on the Instructor Dashboard
"""
import json
from django.db.models import Count
from django.utils.translation import ugettext as _
from opaque_keys.edx.locator import BlockUsageLocator
from six import text_type
from courseware import models
from instructor_analytics.csvs import create_csv_response
from util.json_request import JsonResponse
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.inheritance import own_metadata
# Used to limit the length of list displayed to the screen.
MAX_SCREEN_LIST_LENGTH = 250
def get_problem_grade_distribution(course_id):
"""
Returns the grade distribution per problem for the course
`course_id` the course ID for the course interested in
Output is 2 dicts:
'prob-grade_distrib' where the key is the problem 'module_id' and the value is a dict with:
'max_grade' - max grade for this problem
'grade_distrib' - array of tuples (`grade`,`count`).
'total_student_count' where the key is problem 'module_id' and the value is number of students
attempting the problem
"""
# Aggregate query on studentmodule table for grade data for all problems in course
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
grade__isnull=False,
module_type__exact="problem",
).values('module_state_key', 'grade', 'max_grade').annotate(count_grade=Count('grade'))
prob_grade_distrib = {}
total_student_count = {}
# Loop through resultset building data for each problem
for row in db_query:
curr_problem = row['module_state_key'].map_into_course(course_id)
# Build set of grade distributions for each problem that has student responses
if curr_problem in prob_grade_distrib:
prob_grade_distrib[curr_problem]['grade_distrib'].append((row['grade'], row['count_grade']))
if (prob_grade_distrib[curr_problem]['max_grade'] != row['max_grade']) and \
(prob_grade_distrib[curr_problem]['max_grade'] < row['max_grade']):
prob_grade_distrib[curr_problem]['max_grade'] = row['max_grade']
else:
prob_grade_distrib[curr_problem] = {
'max_grade': row['max_grade'],
'grade_distrib': [(row['grade'], row['count_grade'])]
}
# Build set of total students attempting each problem
total_student_count[curr_problem] = total_student_count.get(curr_problem, 0) + row['count_grade']
return prob_grade_distrib, total_student_count
def get_sequential_open_distrib(course_id):
"""
Returns the number of students that opened each subsection/sequential of the course
`course_id` the course ID for the course interested in
Outputs a dict mapping the 'module_id' to the number of students that have opened that subsection/sequential.
"""
# Aggregate query on studentmodule table for "opening a subsection" data
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
module_type__exact="sequential",
).values('module_state_key').annotate(count_sequential=Count('module_state_key'))
# Build set of "opened" data for each subsection that has "opened" data
sequential_open_distrib = {}
for row in db_query:
row_loc = row['module_state_key'].map_into_course(course_id)
sequential_open_distrib[row_loc] = row['count_sequential']
return sequential_open_distrib
def get_problem_set_grade_distrib(course_id, problem_set):
"""
Returns the grade distribution for the problems specified in `problem_set`.
`course_id` the course ID for the course interested in
`problem_set` an array of UsageKeys representing problem module_id's.
Requests from the database the a count of each grade for each problem in the `problem_set`.
Returns a dict, where the key is the problem 'module_id' and the value is a dict with two parts:
'max_grade' - the maximum grade possible for the course
'grade_distrib' - array of tuples (`grade`,`count`) ordered by `grade`
"""
# Aggregate query on studentmodule table for grade data for set of problems in course
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
grade__isnull=False,
module_type__exact="problem",
module_state_key__in=problem_set,
).values(
'module_state_key',
'grade',
'max_grade',
).annotate(count_grade=Count('grade')).order_by('module_state_key', 'grade')
prob_grade_distrib = {}
# Loop through resultset building data for each problem
for row in db_query:
row_loc = row['module_state_key'].map_into_course(course_id)
if row_loc not in prob_grade_distrib:
prob_grade_distrib[row_loc] = {
'max_grade': 0,
'grade_distrib': [],
}
curr_grade_distrib = prob_grade_distrib[row_loc]
curr_grade_distrib['grade_distrib'].append((row['grade'], row['count_grade']))
if curr_grade_distrib['max_grade'] < row['max_grade']:
curr_grade_distrib['max_grade'] = row['max_grade']
return prob_grade_distrib
def get_d3_problem_grade_distrib(course_id):
"""
Returns problem grade distribution information for each section, data already in format for d3 function.
`course_id` the course ID for the course interested in
Returns an array of dicts in the order of the sections. Each dict has:
'display_name' - display name for the section
'data' - data for the d3_stacked_bar_graph function of the grade distribution for that problem
"""
prob_grade_distrib, total_student_count = get_problem_grade_distribution(course_id)
d3_data = []
# Retrieve course object down to problems
course = modulestore().get_course(course_id, depth=4)
# Iterate through sections, subsections, units, problems
for section in course.get_children():
curr_section = {}
curr_section['display_name'] = own_metadata(section).get('display_name', '')
data = []
c_subsection = 0
for subsection in section.get_children():
c_subsection += 1
c_unit = 0
for unit in subsection.get_children():
c_unit += 1
c_problem = 0
for child in unit.get_children():
# Student data is at the problem level
if child.location.block_type == 'problem':
c_problem += 1
stack_data = []
# Construct label to display for this problem
label = "P{0}.{1}.{2}".format(c_subsection, c_unit, c_problem)
# Only problems in prob_grade_distrib have had a student submission.
if child.location in prob_grade_distrib:
# Get max_grade, grade_distribution for this problem
problem_info = prob_grade_distrib[child.location]
# Get problem_name for tooltip
problem_name = own_metadata(child).get('display_name', '')
# Compute percent of this grade over max_grade
max_grade = float(problem_info['max_grade'])
for (grade, count_grade) in problem_info['grade_distrib']:
percent = 0.0
if max_grade > 0:
percent = round((grade * 100.0) / max_grade, 1)
# Compute percent of students with this grade
student_count_percent = 0
if total_student_count.get(child.location, 0) > 0:
student_count_percent = count_grade * 100 / total_student_count[child.location]
# Tooltip parameters for problem in grade distribution view
tooltip = {
'type': 'problem',
'label': label,
'problem_name': problem_name,
'count_grade': count_grade,
'percent': percent,
'grade': grade,
'max_grade': max_grade,
'student_count_percent': student_count_percent,
}
# Construct data to be sent to d3
stack_data.append({
'color': percent,
'value': count_grade,
'tooltip': tooltip,
'module_url': text_type(child.location),
})
problem = {
'xValue': label,
'stackData': stack_data,
}
data.append(problem)
curr_section['data'] = data
d3_data.append(curr_section)
return d3_data
def get_d3_sequential_open_distrib(course_id):
"""
Returns how many students opened a sequential/subsection for each section, data already in format for d3 function.
`course_id` the course ID for the course interested in
Returns an array in the order of the sections and each dict has:
'display_name' - display name for the section
'data' - data for the d3_stacked_bar_graph function of how many students opened each sequential/subsection
"""
sequential_open_distrib = get_sequential_open_distrib(course_id)
d3_data = []
# Retrieve course object down to subsection
course = modulestore().get_course(course_id, depth=2)
# Iterate through sections, subsections
for section in course.get_children():
curr_section = {}
curr_section['display_name'] = own_metadata(section).get('display_name', '')
data = []
c_subsection = 0
# Construct data for each subsection to be sent to d3
for subsection in section.get_children():
c_subsection += 1
subsection_name = own_metadata(subsection).get('display_name', '')
num_students = 0
if subsection.location in sequential_open_distrib:
num_students = sequential_open_distrib[subsection.location]
stack_data = []
# Tooltip parameters for subsection in open_distribution view
tooltip = {
'type': 'subsection',
'num_students': num_students,
'subsection_num': c_subsection,
'subsection_name': subsection_name
}
stack_data.append({
'color': 0,
'value': num_students,
'tooltip': tooltip,
'module_url': text_type(subsection.location),
})
subsection = {
'xValue': "SS {0}".format(c_subsection),
'stackData': stack_data,
}
data.append(subsection)
curr_section['data'] = data
d3_data.append(curr_section)
return d3_data
def get_d3_section_grade_distrib(course_id, section):
"""
Returns the grade distribution for the problems in the `section` section in a format for the d3 code.
`course_id` a string that is the course's ID.
`section` an int that is a zero-based index into the course's list of sections.
Navigates to the section specified to find all the problems associated with that section and then finds the grade
distribution for those problems. Finally returns an object formated the way the d3_stacked_bar_graph.js expects its
data object to be in.
If this is requested multiple times quickly for the same course, it is better to call
get_d3_problem_grade_distrib and pick out the sections of interest.
Returns an array of dicts with the following keys (taken from d3_stacked_bar_graph.js's documentation)
'xValue' - Corresponding value for the x-axis
'stackData' - Array of objects with key, value pairs that represent a bar:
'color' - Defines what "color" the bar will map to
'value' - Maps to the height of the bar, along the y-axis
'tooltip' - (Optional) Text to display on mouse hover
"""
# Retrieve course object down to problems
course = modulestore().get_course(course_id, depth=4)
problem_set = []
problem_info = {}
c_subsection = 0
for subsection in course.get_children()[section].get_children():
c_subsection += 1
c_unit = 0
for unit in subsection.get_children():
c_unit += 1
c_problem = 0
for child in unit.get_children():
if child.location.block_type == 'problem':
c_problem += 1
problem_set.append(child.location)
problem_info[child.location] = {
'id': text_type(child.location),
'x_value': "P{0}.{1}.{2}".format(c_subsection, c_unit, c_problem),
'display_name': own_metadata(child).get('display_name', ''),
}
# Retrieve grade distribution for these problems
grade_distrib = get_problem_set_grade_distrib(course_id, problem_set)
d3_data = []
# Construct data for each problem to be sent to d3
for problem in problem_set:
stack_data = []
if problem in grade_distrib: # Some problems have no data because students have not tried them yet.
max_grade = float(grade_distrib[problem]['max_grade'])
for (grade, count_grade) in grade_distrib[problem]['grade_distrib']:
percent = 0.0
if max_grade > 0:
percent = round((grade * 100.0) / max_grade, 1)
# Construct tooltip for problem in grade distibution view
tooltip = {
'type': 'problem',
'problem_info_x': problem_info[problem]['x_value'],
'count_grade': count_grade,
'percent': percent,
'problem_info_n': problem_info[problem]['display_name'],
'grade': grade,
'max_grade': max_grade,
}
stack_data.append({
'color': percent,
'value': count_grade,
'tooltip': tooltip,
})
d3_data.append({
'xValue': problem_info[problem]['x_value'],
'stackData': stack_data,
})
return d3_data
def get_section_display_name(course_id):
"""
Returns an array of the display names for each section in the course.
`course_id` the course ID for the course interested in
The ith string in the array is the display name of the ith section in the course.
"""
course = modulestore().get_course(course_id, depth=4)
section_display_name = [""] * len(course.get_children())
i = 0
for section in course.get_children():
section_display_name[i] = own_metadata(section).get('display_name', '')
i += 1
return section_display_name
def get_array_section_has_problem(course_id):
"""
Returns an array of true/false whether each section has problems.
`course_id` the course ID for the course interested in
The ith value in the array is true if the ith section in the course contains problems and false otherwise.
"""
course = modulestore().get_course(course_id, depth=4)
b_section_has_problem = [False] * len(course.get_children())
i = 0
for section in course.get_children():
for subsection in section.get_children():
for unit in subsection.get_children():
for child in unit.get_children():
if child.location.block_type == 'problem':
b_section_has_problem[i] = True
break # out of child loop
if b_section_has_problem[i]:
break # out of unit loop
if b_section_has_problem[i]:
break # out of subsection loop
i += 1
return b_section_has_problem
def get_students_opened_subsection(request, csv=False):
"""
Get a list of students that opened a particular subsection.
If 'csv' is False, returns a dict of student's name: username.
If 'csv' is True, returns a header array, and an array of arrays in the format:
student names, usernames for CSV download.
"""
module_state_key = BlockUsageLocator.from_string(request.GET.get('module_id'))
csv = request.GET.get('csv')
# Query for "opened a subsection" students
students = models.StudentModule.objects.select_related('student').filter(
module_state_key__exact=module_state_key,
module_type__exact='sequential',
).values('student__username', 'student__profile__name').order_by('student__profile__name')
results = []
if not csv:
# Restrict screen list length
# Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH
# without doing another select.
for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]:
results.append({
'name': student['student__profile__name'],
'username': student['student__username'],
})
max_exceeded = False
if len(results) > MAX_SCREEN_LIST_LENGTH:
# Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH
del results[-1]
max_exceeded = True
response_payload = {
'results': results,
'max_exceeded': max_exceeded,
}
return JsonResponse(response_payload)
else:
tooltip = request.GET.get('tooltip')
# Subsection name is everything after 3rd space in tooltip
filename = sanitize_filename(' '.join(tooltip.split(' ')[3:]))
header = [_("Name"), _("Username")]
for student in students:
results.append([student['student__profile__name'], student['student__username']])
response = create_csv_response(filename, header, results)
return response
def get_students_problem_grades(request, csv=False):
"""
Get a list of students and grades for a particular problem.
If 'csv' is False, returns a dict of student's name: username: grade: percent.
If 'csv' is True, returns a header array, and an array of arrays in the format:
student names, usernames, grades, percents for CSV download.
"""
module_state_key = BlockUsageLocator.from_string(request.GET.get('module_id'))
csv = request.GET.get('csv')
# Query for "problem grades" students
students = models.StudentModule.objects.select_related('student').filter(
module_state_key=module_state_key,
module_type__exact='problem',
grade__isnull=False,
).values('student__username', 'student__profile__name', 'grade', 'max_grade').order_by('student__profile__name')
results = []
if not csv:
# Restrict screen list length
# Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH
# without doing another select.
for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]:
student_dict = {
'name': student['student__profile__name'],
'username': student['student__username'],
'grade': student['grade'],
}
student_dict['percent'] = 0
if student['max_grade'] > 0:
student_dict['percent'] = round(student['grade'] * 100 / student['max_grade'])
results.append(student_dict)
max_exceeded = False
if len(results) > MAX_SCREEN_LIST_LENGTH:
# Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH
del results[-1]
max_exceeded = True
response_payload = {
'results': results,
'max_exceeded': max_exceeded,
}
return JsonResponse(response_payload)
else:
tooltip = request.GET.get('tooltip')
filename = sanitize_filename(tooltip[:tooltip.rfind(' - ')])
header = [_("Name"), _("Username"), _("Grade"), _("Percent")]
for student in students:
percent = 0
if student['max_grade'] > 0:
percent = round(student['grade'] * 100 / student['max_grade'])
results.append([student['student__profile__name'], student['student__username'], student['grade'], percent])
response = create_csv_response(filename, header, results)
return response
def post_metrics_data_csv(request):
"""
Generate a list of opened subsections or problems for the entire course for CSV download.
Returns a header array, and an array of arrays in the format:
section, subsection, count of students for subsections
or section, problem, name, count of students, percent of students, score for problems.
"""
data = json.loads(request.POST['data'])
sections = json.loads(data['sections'])
tooltips = json.loads(data['tooltips'])
course_id = data['course_id']
data_type = data['data_type']
results = []
if data_type == 'subsection':
header = [_("Section"), _("Subsection"), _("Opened by this number of students")]
filename = sanitize_filename(_('subsections') + '_' + course_id)
elif data_type == 'problem':
header = [
_("Section"), _("Problem"), _("Name"), _("Count of Students"),
_("Percent of Students"), _("Score"),
]
filename = sanitize_filename(_('problems') + '_' + course_id)
for index, section in enumerate(sections):
results.append([section])
# tooltips array is array of dicts for subsections and
# array of array of dicts for problems.
if data_type == 'subsection':
for tooltip_dict in tooltips[index]:
num_students = tooltip_dict['num_students']
subsection = tooltip_dict['subsection_name']
# Append to results offsetting 1 column to the right.
results.append(['', subsection, num_students])
elif data_type == 'problem':
for tooltip in tooltips[index]:
for tooltip_dict in tooltip:
label = tooltip_dict['label']
problem_name = tooltip_dict['problem_name']
count_grade = tooltip_dict['count_grade']
student_count_percent = tooltip_dict['student_count_percent']
percent = tooltip_dict['percent']
# Append to results offsetting 1 column to the right.
results.append(['', label, problem_name, count_grade, student_count_percent, percent])
response = create_csv_response(filename, header, results)
return response
def sanitize_filename(filename):
"""
Utility function
"""
filename = filename.replace(" ", "_")
filename = filename.encode('utf-8')
filename = filename[0:25] + '.csv'
return filename
| agpl-3.0 |
marmyshev/bug_1117098 | openlp/plugins/songs/forms/__init__.py | 2 | 3331 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
Forms in OpenLP are made up of two classes. One class holds all the graphical
elements, like buttons and lists, and the other class holds all the functional
code, like slots and loading and saving.
The first class, commonly known as the **Dialog** class, is typically named
``Ui_<name>Dialog``. It is a slightly modified version of the class that the
``pyuic4`` command produces from Qt4's .ui file. Typical modifications will be
converting most strings from "" to u'' and using OpenLP's ``translate()``
function for translating strings.
The second class, commonly known as the **Form** class, is typically named
``<name>Form``. This class is the one which is instantiated and used. It uses
dual inheritance to inherit from (usually) QtGui.QDialog and the Ui class
mentioned above, like so::
class AuthorsForm(QtGui.QDialog, Ui_AuthorsDialog):
def __init__(self, parent=None):
super(AuthorsForm, self).__init__(parent)
self.setupUi(self)
This allows OpenLP to use ``self.object`` for all the GUI elements while keeping
them separate from the functionality, so that it is easier to recreate the GUI
from the .ui files later if necessary.
"""
from .editsongform import EditSongForm
| gpl-2.0 |
baryon5/pylights | pylights/entities/etcfiles/__init__.py | 1 | 3037 | import struct
class Readable(object):
@classmethod
def from_string(cls,string):
"""Takes a bytestring and returns an object loaded from that bytestring."""
t = struct.unpack(cls._struct_format,string)
d = {}
for n,i in enumerate(cls._struct_fields):
if i:
o = t[n]
if i.startswith("*"):
i = i[1:]
o = cls._struct_structs[i].from_string(o)
if d.get(i,False):
if type(d[i]) == list:
d[i].append(o)
else:
d[i] = [d[i]]
d[i].append(o)
else:
d[i] = o
try:
return cls(**d)
except TypeError:
return cls.from_struct(**d)
@classmethod
def from_file(cls,fn,writeable=True):
"""
A wrapper around :meth:`from_string`, this takes a filename and an optional boolean
flag. It reads from the specified file and then calls :meth:`from_string` on its
contents, and returns the result.
Additionally, if the optional boolean `writeable` is True and the class inherits from
Writeable, the specified filename will be remembered for when the object is written
to a file.
"""
fd = open(fn,"rb")
s = fd.read()
fd.close()
c = cls.from_string(s)
if writeable and isinstance(c,Writeable):
c._Writeable__filename = fn
return c
@classmethod
def from_struct(cls,**args):
"""
This classmethod must be overwritten. It will be called to create a new object
from a file with arguments as defined in :attr:`_struct_fields
"""
raise NotImplementedError("Subclasses must implement from_struct!")
class Writeable(object):
def to_string(self):
"""Returns a :mod:`struct` packed bytestring of the object."""
t = list(self.to_struct())
if getattr(self,"_struct_fields"):
for n,i in enumerate(self._struct_fields):
if i == None:
t[n] = 0
if i and i.startswith("*"):
t[n] = t[n].to_string()
return struct.pack(self._struct_format,*t)
def write(self,fn=None):
"""
A wrapper around :meth:`to_string`, this writes the data returned by :meth:`to_string`
to the specified file, or, if none is specified and a filename was remembered when
loading the object from a file, that remembered filename will be used.
Returns True on success, for no particular reason.
"""
if not fn:
try:
fn = self.__filename
except AttributeError:
raise TypeError("Object has no filename, please provide one!")
else:
self.__filename = fn
fd = open(fn,"wb")
fd.write(self.to_string())
fd.close()
return True
| gpl-2.0 |
carnotweat/cpupimp | libs/rtorrent/tracker.py | 173 | 5212 | # Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# from rtorrent.rpc import Method
import rtorrent.rpc
from rtorrent.common import safe_repr
Method = rtorrent.rpc.Method
class Tracker:
"""Represents an individual tracker within a L{Torrent} instance."""
def __init__(self, _rt_obj, info_hash, **kwargs):
self._rt_obj = _rt_obj
self.info_hash = info_hash # : info hash for the torrent using this tracker
for k in kwargs.keys():
setattr(self, k, kwargs.get(k, None))
# for clarity's sake...
self.index = self.group # : position of tracker within the torrent's tracker list
self.rpc_id = "{0}:t{1}".format(
self.info_hash, self.index) # : unique id to pass to rTorrent
def __repr__(self):
return safe_repr("Tracker(index={0}, url=\"{1}\")",
self.index, self.url)
def enable(self):
"""Alias for set_enabled("yes")"""
self.set_enabled("yes")
def disable(self):
"""Alias for set_enabled("no")"""
self.set_enabled("no")
def update(self):
"""Refresh tracker data
@note: All fields are stored as attributes to self.
@return: None
"""
multicall = rtorrent.rpc.Multicall(self)
retriever_methods = [m for m in methods
if m.is_retriever() and m.is_available(self._rt_obj)]
for method in retriever_methods:
multicall.add(method, self.rpc_id)
multicall.call()
methods = [
# RETRIEVERS
Method(Tracker, 'is_enabled', 't.is_enabled', boolean=True),
Method(Tracker, 'get_id', 't.get_id'),
Method(Tracker, 'get_scrape_incomplete', 't.get_scrape_incomplete'),
Method(Tracker, 'is_open', 't.is_open', boolean=True),
Method(Tracker, 'get_min_interval', 't.get_min_interval'),
Method(Tracker, 'get_scrape_downloaded', 't.get_scrape_downloaded'),
Method(Tracker, 'get_group', 't.get_group'),
Method(Tracker, 'get_scrape_time_last', 't.get_scrape_time_last'),
Method(Tracker, 'get_type', 't.get_type'),
Method(Tracker, 'get_normal_interval', 't.get_normal_interval'),
Method(Tracker, 'get_url', 't.get_url'),
Method(Tracker, 'get_scrape_complete', 't.get_scrape_complete',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_activity_time_last', 't.activity_time_last',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_activity_time_next', 't.activity_time_next',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_failed_time_last', 't.failed_time_last',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_failed_time_next', 't.failed_time_next',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_success_time_last', 't.success_time_last',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_success_time_next', 't.success_time_next',
min_version=(0, 8, 9),
),
Method(Tracker, 'can_scrape', 't.can_scrape',
min_version=(0, 9, 1),
boolean=True
),
Method(Tracker, 'get_failed_counter', 't.failed_counter',
min_version=(0, 8, 9)
),
Method(Tracker, 'get_scrape_counter', 't.scrape_counter',
min_version=(0, 8, 9)
),
Method(Tracker, 'get_success_counter', 't.success_counter',
min_version=(0, 8, 9)
),
Method(Tracker, 'is_usable', 't.is_usable',
min_version=(0, 9, 1),
boolean=True
),
Method(Tracker, 'is_busy', 't.is_busy',
min_version=(0, 9, 1),
boolean=True
),
Method(Tracker, 'is_extra_tracker', 't.is_extra_tracker',
min_version=(0, 9, 1),
boolean=True,
),
Method(Tracker, "get_latest_sum_peers", "t.latest_sum_peers",
min_version=(0, 9, 0)
),
Method(Tracker, "get_latest_new_peers", "t.latest_new_peers",
min_version=(0, 9, 0)
),
# MODIFIERS
Method(Tracker, 'set_enabled', 't.set_enabled'),
]
| gpl-3.0 |
zombo/p2pool | p2pool/bitcoin/script.py | 282 | 2589 | from p2pool.util import math, pack
def reads_nothing(f):
return None, f
def protoPUSH(length):
return lambda f: pack.read(f, length)
def protoPUSHDATA(size_len):
def _(f):
length_str, f = pack.read(f, size_len)
length = math.string_to_natural(length_str[::-1].lstrip(chr(0)))
data, f = pack.read(f, length)
return data, f
return _
opcodes = {}
for i in xrange(256):
opcodes[i] = 'UNK_' + str(i), reads_nothing
opcodes[0] = 'PUSH', lambda f: ('', f)
for i in xrange(1, 76):
opcodes[i] = 'PUSH', protoPUSH(i)
opcodes[76] = 'PUSH', protoPUSHDATA(1)
opcodes[77] = 'PUSH', protoPUSHDATA(2)
opcodes[78] = 'PUSH', protoPUSHDATA(4)
opcodes[79] = 'PUSH', lambda f: ('\x81', f)
for i in xrange(81, 97):
opcodes[i] = 'PUSH', lambda f, _i=i: (chr(_i - 80), f)
opcodes[172] = 'CHECKSIG', reads_nothing
opcodes[173] = 'CHECKSIGVERIFY', reads_nothing
opcodes[174] = 'CHECKMULTISIG', reads_nothing
opcodes[175] = 'CHECKMULTISIGVERIFY', reads_nothing
def parse(script):
f = script, 0
while pack.size(f):
opcode_str, f = pack.read(f, 1)
opcode = ord(opcode_str)
opcode_name, read_func = opcodes[opcode]
opcode_arg, f = read_func(f)
yield opcode_name, opcode_arg
def get_sigop_count(script):
weights = {
'CHECKSIG': 1,
'CHECKSIGVERIFY': 1,
'CHECKMULTISIG': 20,
'CHECKMULTISIGVERIFY': 20,
}
return sum(weights.get(opcode_name, 0) for opcode_name, opcode_arg in parse(script))
def create_push_script(datums): # datums can be ints or strs
res = []
for datum in datums:
if isinstance(datum, (int, long)):
if datum == -1 or 1 <= datum <= 16:
res.append(chr(datum + 80))
continue
negative = datum < 0
datum = math.natural_to_string(abs(datum))
if datum and ord(datum[0]) & 128:
datum = '\x00' + datum
if negative:
datum = chr(ord(datum[0]) + 128) + datum[1:]
datum = datum[::-1]
if len(datum) < 76:
res.append(chr(len(datum)))
elif len(datum) <= 0xff:
res.append(76)
res.append(chr(len(datum)))
elif len(datum) <= 0xffff:
res.append(77)
res.append(pack.IntType(16).pack(len(datum)))
elif len(datum) <= 0xffffffff:
res.append(78)
res.append(pack.IntType(32).pack(len(datum)))
else:
raise ValueError('string too long')
res.append(datum)
return ''.join(res)
| gpl-3.0 |
sdague/home-assistant | tests/helpers/test_script.py | 5 | 65412 | """The tests for the Script component."""
# pylint: disable=protected-access
import asyncio
from contextlib import contextmanager
from datetime import timedelta
import logging
from types import MappingProxyType
from unittest import mock
import pytest
import voluptuous as vol
# Otherwise can't test just this file (import order issue)
from homeassistant import exceptions
import homeassistant.components.scene as scene
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_ON
from homeassistant.core import Context, CoreState, callback
from homeassistant.helpers import config_validation as cv, script
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import (
async_capture_events,
async_fire_time_changed,
async_mock_service,
)
ENTITY_ID = "script.test"
def async_watch_for_action(script_obj, message):
"""Watch for message in last_action."""
flag = asyncio.Event()
@callback
def check_action():
if script_obj.last_action and message in script_obj.last_action:
flag.set()
script_obj.change_listener = check_action
assert script_obj.change_listener is check_action
return flag
async def test_firing_event_basic(hass, caplog):
"""Test the firing of events."""
event = "test_event"
context = Context()
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA({"event": event, "event_data": {"hello": "world"}})
script_obj = script.Script(
hass, sequence, "Test Name", "test_domain", running_description="test script"
)
await script_obj.async_run(context=context)
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].context is context
assert events[0].data.get("hello") == "world"
assert ".test_name:" in caplog.text
assert "Test Name: Running test script" in caplog.text
async def test_firing_event_template(hass):
"""Test the firing of events."""
event = "test_event"
context = Context()
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
{
"event": event,
"event_data": {
"dict": {
1: "{{ is_world }}",
2: "{{ is_world }}{{ is_world }}",
3: "{{ is_world }}{{ is_world }}{{ is_world }}",
},
"list": ["{{ is_world }}", "{{ is_world }}{{ is_world }}"],
},
"event_data_template": {
"dict2": {
1: "{{ is_world }}",
2: "{{ is_world }}{{ is_world }}",
3: "{{ is_world }}{{ is_world }}{{ is_world }}",
},
"list2": ["{{ is_world }}", "{{ is_world }}{{ is_world }}"],
},
}
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(MappingProxyType({"is_world": "yes"}), context=context)
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].context is context
assert events[0].data == {
"dict": {1: "yes", 2: "yesyes", 3: "yesyesyes"},
"list": ["yes", "yesyes"],
"dict2": {1: "yes", 2: "yesyes", 3: "yesyesyes"},
"list2": ["yes", "yesyes"],
}
async def test_calling_service_basic(hass):
"""Test the calling of a service."""
context = Context()
calls = async_mock_service(hass, "test", "script")
sequence = cv.SCRIPT_SCHEMA({"service": "test.script", "data": {"hello": "world"}})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data.get("hello") == "world"
async def test_calling_service_template(hass):
"""Test the calling of a service."""
context = Context()
calls = async_mock_service(hass, "test", "script")
sequence = cv.SCRIPT_SCHEMA(
{
"service_template": """
{% if True %}
test.script
{% else %}
test.not_script
{% endif %}""",
"data_template": {
"hello": """
{% if is_world == 'yes' %}
world
{% else %}
not world
{% endif %}
"""
},
}
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(MappingProxyType({"is_world": "yes"}), context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data.get("hello") == "world"
async def test_data_template_with_templated_key(hass):
"""Test the calling of a service with a data_template with a templated key."""
context = Context()
calls = async_mock_service(hass, "test", "script")
sequence = cv.SCRIPT_SCHEMA(
{"service": "test.script", "data_template": {"{{ hello_var }}": "world"}}
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(
MappingProxyType({"hello_var": "hello"}), context=context
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert "hello" in calls[0].data
async def test_multiple_runs_no_wait(hass):
"""Test multiple runs with no wait in script."""
logger = logging.getLogger("TEST")
calls = []
heard_event = asyncio.Event()
async def async_simulate_long_service(service):
"""Simulate a service that takes a not insignificant time."""
fire = service.data.get("fire")
listen = service.data.get("listen")
service_done = asyncio.Event()
@callback
def service_done_cb(event):
logger.debug("simulated service (%s:%s) done", fire, listen)
service_done.set()
calls.append(service)
logger.debug("simulated service (%s:%s) started", fire, listen)
unsub = hass.bus.async_listen(str(listen), service_done_cb)
hass.bus.async_fire(str(fire))
await service_done.wait()
unsub()
hass.services.async_register("test", "script", async_simulate_long_service)
@callback
def heard_event_cb(event):
logger.debug("heard: %s", event)
heard_event.set()
sequence = cv.SCRIPT_SCHEMA(
[
{
"service": "test.script",
"data_template": {"fire": "{{ fire1 }}", "listen": "{{ listen1 }}"},
},
{
"service": "test.script",
"data_template": {"fire": "{{ fire2 }}", "listen": "{{ listen2 }}"},
},
]
)
script_obj = script.Script(
hass, sequence, "Test Name", "test_domain", script_mode="parallel", max_runs=2
)
# Start script twice in such a way that second run will be started while first run
# is in the middle of the first service call.
unsub = hass.bus.async_listen("1", heard_event_cb)
logger.debug("starting 1st script")
hass.async_create_task(
script_obj.async_run(
MappingProxyType(
{"fire1": "1", "listen1": "2", "fire2": "3", "listen2": "4"}
),
Context(),
)
)
await asyncio.wait_for(heard_event.wait(), 1)
unsub()
logger.debug("starting 2nd script")
await script_obj.async_run(
MappingProxyType({"fire1": "2", "listen1": "3", "fire2": "4", "listen2": "4"}),
Context(),
)
await hass.async_block_till_done()
assert len(calls) == 4
async def test_activating_scene(hass):
"""Test the activation of a scene."""
context = Context()
calls = async_mock_service(hass, scene.DOMAIN, SERVICE_TURN_ON)
sequence = cv.SCRIPT_SCHEMA({"scene": "scene.hello"})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data.get(ATTR_ENTITY_ID) == "scene.hello"
@pytest.mark.parametrize("count", [1, 3])
async def test_stop_no_wait(hass, count):
"""Test stopping script."""
service_started_sem = asyncio.Semaphore(0)
finish_service_event = asyncio.Event()
event = "test_event"
events = async_capture_events(hass, event)
async def async_simulate_long_service(service):
"""Simulate a service that takes a not insignificant time."""
service_started_sem.release()
await finish_service_event.wait()
hass.services.async_register("test", "script", async_simulate_long_service)
sequence = cv.SCRIPT_SCHEMA([{"service": "test.script"}, {"event": event}])
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
script_mode="parallel",
max_runs=count,
)
# Get script started specified number of times and wait until the test.script
# service has started for each run.
tasks = []
for _ in range(count):
hass.async_create_task(script_obj.async_run(context=Context()))
tasks.append(hass.async_create_task(service_started_sem.acquire()))
await asyncio.wait_for(asyncio.gather(*tasks), 1)
# Can't assert just yet because we haven't verified stopping works yet.
# If assert fails we can hang test if async_stop doesn't work.
script_was_runing = script_obj.is_running
were_no_events = len(events) == 0
# Begin the process of stopping the script (which should stop all runs), and then
# let the service calls complete.
hass.async_create_task(script_obj.async_stop())
finish_service_event.set()
await hass.async_block_till_done()
assert script_was_runing
assert were_no_events
assert not script_obj.is_running
assert len(events) == 0
async def test_delay_basic(hass):
"""Test the delay."""
delay_alias = "delay step"
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": 5}, "alias": delay_alias})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
delay_started_flag = async_watch_for_action(script_obj, delay_alias)
try:
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == delay_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert script_obj.last_action is None
async def test_multiple_runs_delay(hass):
"""Test multiple runs with delay in script."""
event = "test_event"
events = async_capture_events(hass, event)
delay = timedelta(seconds=5)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"delay": delay},
{"event": event, "event_data": {"value": 2}},
]
)
script_obj = script.Script(
hass, sequence, "Test Name", "test_domain", script_mode="parallel", max_runs=2
)
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[-1].data["value"] == 1
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
# Start second run of script while first run is in a delay.
script_obj.sequence[1]["alias"] = "delay run 2"
delay_started_flag = async_watch_for_action(script_obj, "delay run 2")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
async_fire_time_changed(hass, dt_util.utcnow() + delay)
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 4
assert events[-3].data["value"] == 1
assert events[-2].data["value"] == 2
assert events[-1].data["value"] == 2
async def test_delay_template_ok(hass):
"""Test the delay as a template."""
sequence = cv.SCRIPT_SCHEMA({"delay": "00:00:{{ 5 }}"})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
async def test_delay_template_invalid(hass, caplog):
"""Test the delay as a template that fails."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{"delay": "{{ invalid_delay }}"},
{"delay": {"seconds": 5}},
{"event": event},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
start_idx = len(caplog.records)
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert any(
rec.levelname == "ERROR" and "Error rendering" in rec.message
for rec in caplog.records[start_idx:]
)
assert not script_obj.is_running
assert len(events) == 1
async def test_delay_template_complex_ok(hass):
"""Test the delay with a working complex template."""
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": "{{ 5 }}"}})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
async def test_delay_template_complex_invalid(hass, caplog):
"""Test the delay with a complex template that fails."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{"delay": {"seconds": "{{ invalid_delay }}"}},
{"delay": {"seconds": 5}},
{"event": event},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
start_idx = len(caplog.records)
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert any(
rec.levelname == "ERROR" and "Error rendering" in rec.message
for rec in caplog.records[start_idx:]
)
assert not script_obj.is_running
assert len(events) == 1
async def test_cancel_delay(hass):
"""Test the cancelling while the delay is present."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA([{"delay": {"seconds": 5}}, {"event": event}])
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
await script_obj.async_stop()
assert not script_obj.is_running
# Make sure the script is really stopped.
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 0
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_wait_basic(hass, action_type):
"""Test wait actions."""
wait_alias = "wait step"
action = {"alias": wait_alias}
if action_type == "template":
action["wait_template"] = "{{ states.switch.test.state == 'off' }}"
else:
action["wait_for_trigger"] = {
"platform": "state",
"entity_id": "switch.test",
"to": "off",
}
sequence = cv.SCRIPT_SCHEMA(action)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, wait_alias)
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == wait_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert script_obj.last_action is None
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_multiple_runs_wait(hass, action_type):
"""Test multiple runs with wait in script."""
event = "test_event"
events = async_capture_events(hass, event)
if action_type == "template":
action = {"wait_template": "{{ states.switch.test.state == 'off' }}"}
else:
action = {
"wait_for_trigger": {
"platform": "state",
"entity_id": "switch.test",
"to": "off",
}
}
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
action,
{"event": event, "event_data": {"value": 2}},
]
)
script_obj = script.Script(
hass, sequence, "Test Name", "test_domain", script_mode="parallel", max_runs=2
)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[-1].data["value"] == 1
# Start second run of script while first run is in wait_template.
wait_started_flag.clear()
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 4
assert events[-3].data["value"] == 1
assert events[-2].data["value"] == 2
assert events[-1].data["value"] == 2
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_cancel_wait(hass, action_type):
"""Test the cancelling while wait is present."""
event = "test_event"
events = async_capture_events(hass, event)
if action_type == "template":
action = {"wait_template": "{{ states.switch.test.state == 'off' }}"}
else:
action = {
"wait_for_trigger": {
"platform": "state",
"entity_id": "switch.test",
"to": "off",
}
}
sequence = cv.SCRIPT_SCHEMA([action, {"event": event}])
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
await script_obj.async_stop()
assert not script_obj.is_running
# Make sure the script is really stopped.
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 0
async def test_wait_template_not_schedule(hass):
"""Test the wait template with correct condition."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{"wait_template": "{{ states.switch.test.state == 'on' }}"},
{"event": event},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
hass.states.async_set("switch.test", "on")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 2
@pytest.mark.parametrize(
"timeout_param", [5, "{{ 5 }}", {"seconds": 5}, {"seconds": "{{ 5 }}"}]
)
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_wait_timeout(hass, caplog, timeout_param, action_type):
"""Test the wait timeout option."""
event = "test_event"
events = async_capture_events(hass, event)
if action_type == "template":
action = {"wait_template": "{{ states.switch.test.state == 'off' }}"}
else:
action = {
"wait_for_trigger": {
"platform": "state",
"entity_id": "switch.test",
"to": "off",
}
}
action["timeout"] = timeout_param
action["continue_on_timeout"] = True
sequence = cv.SCRIPT_SCHEMA([action, {"event": event}])
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
cur_time = dt_util.utcnow()
async_fire_time_changed(hass, cur_time + timedelta(seconds=4))
await asyncio.sleep(0)
assert len(events) == 0
async_fire_time_changed(hass, cur_time + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 1
assert "(timeout: 0:00:05)" in caplog.text
@pytest.mark.parametrize(
"continue_on_timeout,n_events", [(False, 0), (True, 1), (None, 1)]
)
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_wait_continue_on_timeout(
hass, continue_on_timeout, n_events, action_type
):
"""Test the wait continue_on_timeout option."""
event = "test_event"
events = async_capture_events(hass, event)
if action_type == "template":
action = {"wait_template": "{{ states.switch.test.state == 'off' }}"}
else:
action = {
"wait_for_trigger": {
"platform": "state",
"entity_id": "switch.test",
"to": "off",
}
}
action["timeout"] = 5
if continue_on_timeout is not None:
action["continue_on_timeout"] = continue_on_timeout
sequence = cv.SCRIPT_SCHEMA([action, {"event": event}])
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == n_events
async def test_wait_template_variables_in(hass):
"""Test the wait template with input variables."""
sequence = cv.SCRIPT_SCHEMA({"wait_template": "{{ is_state(data, 'off') }}"})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(
script_obj.async_run(MappingProxyType({"data": "switch.test"}), Context())
)
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
@pytest.mark.parametrize("mode", ["no_timeout", "timeout_finish", "timeout_not_finish"])
@pytest.mark.parametrize("action_type", ["template", "trigger"])
async def test_wait_variables_out(hass, mode, action_type):
"""Test the wait output variable."""
event = "test_event"
events = async_capture_events(hass, event)
if action_type == "template":
action = {"wait_template": "{{ states.switch.test.state == 'off' }}"}
event_key = "completed"
else:
action = {
"wait_for_trigger": {
"platform": "state",
"entity_id": "switch.test",
"to": "off",
}
}
event_key = "trigger"
if mode != "no_timeout":
action["timeout"] = 5
action["continue_on_timeout"] = True
sequence = [
action,
{
"event": event,
"event_data_template": {
event_key: f"{{{{ wait.{event_key} }}}}",
"remaining": "{{ wait.remaining }}",
},
},
]
sequence = cv.SCRIPT_SCHEMA(sequence)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
if mode == "timeout_not_finish":
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 1
if action_type == "template":
assert events[0].data["completed"] == (mode != "timeout_not_finish")
elif mode != "timeout_not_finish":
assert "'to_state': <state switch.test=off" in events[0].data["trigger"]
else:
assert events[0].data["trigger"] is None
remaining = events[0].data["remaining"]
if mode == "no_timeout":
assert remaining is None
elif mode == "timeout_finish":
assert 0.0 < float(remaining) < 5
else:
assert float(remaining) == 0.0
async def test_wait_for_trigger_bad(hass, caplog):
"""Test bad wait_for_trigger."""
script_obj = script.Script(
hass,
cv.SCRIPT_SCHEMA(
{"wait_for_trigger": {"platform": "state", "entity_id": "sensor.abc"}}
),
"Test Name",
"test_domain",
)
async def async_attach_trigger_mock(*args, **kwargs):
return None
with mock.patch(
"homeassistant.components.homeassistant.triggers.state.async_attach_trigger",
wraps=async_attach_trigger_mock,
):
hass.async_create_task(script_obj.async_run())
await hass.async_block_till_done()
assert "Error setting up trigger" in caplog.text
async def test_condition_basic(hass):
"""Test if we can use conditions in a script."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{
"condition": "template",
"value_template": "{{ states.test.entity.state == 'hello' }}",
},
{"event": event},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
hass.states.async_set("test.entity", "hello")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert len(events) == 2
hass.states.async_set("test.entity", "goodbye")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert len(events) == 3
@patch("homeassistant.helpers.script.condition.async_from_config")
async def test_condition_created_once(async_from_config, hass):
"""Test that the conditions do not get created multiple times."""
sequence = cv.SCRIPT_SCHEMA(
{
"condition": "template",
"value_template": '{{ states.test.entity.state == "hello" }}',
}
)
script_obj = script.Script(
hass, sequence, "Test Name", "test_domain", script_mode="parallel", max_runs=2
)
async_from_config.reset_mock()
hass.states.async_set("test.entity", "hello")
await script_obj.async_run(context=Context())
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
async_from_config.assert_called_once()
assert len(script_obj._config_cache) == 1
async def test_condition_all_cached(hass):
"""Test that multiple conditions get cached."""
sequence = cv.SCRIPT_SCHEMA(
[
{
"condition": "template",
"value_template": '{{ states.test.entity.state == "hello" }}',
},
{
"condition": "template",
"value_template": '{{ states.test.entity.state != "hello" }}',
},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
hass.states.async_set("test.entity", "hello")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert len(script_obj._config_cache) == 2
async def test_repeat_count(hass):
"""Test repeat action w/ count option."""
event = "test_event"
events = async_capture_events(hass, event)
count = 3
sequence = cv.SCRIPT_SCHEMA(
{
"repeat": {
"count": count,
"sequence": {
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
},
},
}
}
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert len(events) == count
for index, event in enumerate(events):
assert event.data.get("first") == (index == 0)
assert event.data.get("index") == index + 1
assert event.data.get("last") == (index == count - 1)
@pytest.mark.parametrize("condition", ["while", "until"])
@pytest.mark.parametrize("direct_template", [False, True])
async def test_repeat_conditional(hass, condition, direct_template):
"""Test repeat action w/ while option."""
event = "test_event"
events = async_capture_events(hass, event)
count = 3
sequence = {
"repeat": {
"sequence": [
{
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
},
},
{"wait_template": "{{ is_state('sensor.test', 'next') }}"},
{"wait_template": "{{ not is_state('sensor.test', 'next') }}"},
],
}
}
if condition == "while":
template = "{{ not is_state('sensor.test', 'done') }}"
if direct_template:
sequence["repeat"]["while"] = template
else:
sequence["repeat"]["while"] = {
"condition": "template",
"value_template": template,
}
else:
template = "{{ is_state('sensor.test', 'done') }}"
if direct_template:
sequence["repeat"]["until"] = template
else:
sequence["repeat"]["until"] = {
"condition": "template",
"value_template": template,
}
script_obj = script.Script(
hass, cv.SCRIPT_SCHEMA(sequence), "Test Name", "test_domain"
)
wait_started = async_watch_for_action(script_obj, "wait")
hass.states.async_set("sensor.test", "1")
hass.async_create_task(script_obj.async_run(context=Context()))
try:
for index in range(2, count + 1):
await asyncio.wait_for(wait_started.wait(), 1)
wait_started.clear()
hass.states.async_set("sensor.test", "next")
await asyncio.wait_for(wait_started.wait(), 1)
wait_started.clear()
hass.states.async_set("sensor.test", index)
await asyncio.wait_for(wait_started.wait(), 1)
wait_started.clear()
hass.states.async_set("sensor.test", "next")
await asyncio.wait_for(wait_started.wait(), 1)
wait_started.clear()
hass.states.async_set("sensor.test", "done")
await asyncio.wait_for(hass.async_block_till_done(), 1)
except asyncio.TimeoutError:
await script_obj.async_stop()
raise
assert len(events) == count
for index, event in enumerate(events):
assert event.data.get("first") == (index == 0)
assert event.data.get("index") == index + 1
@pytest.mark.parametrize("condition", ["while", "until"])
async def test_repeat_var_in_condition(hass, condition):
"""Test repeat action w/ while option."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = {"repeat": {"sequence": {"event": event}}}
if condition == "while":
sequence["repeat"]["while"] = {
"condition": "template",
"value_template": "{{ repeat.index <= 2 }}",
}
else:
sequence["repeat"]["until"] = {
"condition": "template",
"value_template": "{{ repeat.index == 2 }}",
}
script_obj = script.Script(
hass, cv.SCRIPT_SCHEMA(sequence), "Test Name", "test_domain"
)
with mock.patch(
"homeassistant.helpers.condition._LOGGER.error",
side_effect=AssertionError("Template Error"),
):
await script_obj.async_run(context=Context())
assert len(events) == 2
@pytest.mark.parametrize(
"variables,first_last,inside_x",
[
(None, {"repeat": None, "x": None}, None),
(MappingProxyType({"x": 1}), {"repeat": None, "x": 1}, 1),
],
)
async def test_repeat_nested(hass, variables, first_last, inside_x):
"""Test nested repeats."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{
"event": event,
"event_data_template": {
"repeat": "{{ None if repeat is not defined else repeat }}",
"x": "{{ None if x is not defined else x }}",
},
},
{
"repeat": {
"count": 2,
"sequence": [
{
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
"x": "{{ None if x is not defined else x }}",
},
},
{
"repeat": {
"count": 2,
"sequence": {
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
"x": "{{ None if x is not defined else x }}",
},
},
}
},
{
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
"x": "{{ None if x is not defined else x }}",
},
},
],
}
},
{
"event": event,
"event_data_template": {
"repeat": "{{ None if repeat is not defined else repeat }}",
"x": "{{ None if x is not defined else x }}",
},
},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
with mock.patch(
"homeassistant.helpers.condition._LOGGER.error",
side_effect=AssertionError("Template Error"),
):
await script_obj.async_run(variables, Context())
assert len(events) == 10
assert events[0].data == first_last
assert events[-1].data == first_last
for index, result in enumerate(
(
(True, 1, False, inside_x),
(True, 1, False, inside_x),
(False, 2, True, inside_x),
(True, 1, False, inside_x),
(False, 2, True, inside_x),
(True, 1, False, inside_x),
(False, 2, True, inside_x),
(False, 2, True, inside_x),
),
1,
):
assert events[index].data == {
"first": result[0],
"index": result[1],
"last": result[2],
"x": result[3],
}
@pytest.mark.parametrize("var,result", [(1, "first"), (2, "second"), (3, "default")])
async def test_choose(hass, var, result):
"""Test choose action."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
{
"choose": [
{
"conditions": {
"condition": "template",
"value_template": "{{ var == 1 }}",
},
"sequence": {"event": event, "event_data": {"choice": "first"}},
},
{
"conditions": "{{ var == 2 }}",
"sequence": {"event": event, "event_data": {"choice": "second"}},
},
],
"default": {"event": event, "event_data": {"choice": "default"}},
}
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(MappingProxyType({"var": var}), Context())
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["choice"] == result
@pytest.mark.parametrize(
"action",
[
{"repeat": {"count": 1, "sequence": {"event": "abc"}}},
{"choose": {"conditions": [], "sequence": {"event": "abc"}}},
{"choose": [], "default": {"event": "abc"}},
],
)
async def test_multiple_runs_repeat_choose(hass, caplog, action):
"""Test parallel runs with repeat & choose actions & max_runs > default."""
max_runs = script.DEFAULT_MAX + 1
script_obj = script.Script(
hass,
cv.SCRIPT_SCHEMA(action),
"Test Name",
"test_domain",
script_mode="parallel",
max_runs=max_runs,
)
events = async_capture_events(hass, "abc")
for _ in range(max_runs):
hass.async_create_task(script_obj.async_run(context=Context()))
await hass.async_block_till_done()
assert "WARNING" not in caplog.text
assert "ERROR" not in caplog.text
assert len(events) == max_runs
async def test_last_triggered(hass):
"""Test the last_triggered."""
event = "test_event"
sequence = cv.SCRIPT_SCHEMA({"event": event})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
assert script_obj.last_triggered is None
time = dt_util.utcnow()
with mock.patch("homeassistant.helpers.script.utcnow", return_value=time):
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert script_obj.last_triggered == time
async def test_propagate_error_service_not_found(hass):
"""Test that a script aborts when a service is not found."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA([{"service": "test.script"}, {"event": event}])
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
with pytest.raises(exceptions.ServiceNotFound):
await script_obj.async_run(context=Context())
assert len(events) == 0
assert not script_obj.is_running
async def test_propagate_error_invalid_service_data(hass):
"""Test that a script aborts when we send invalid service data."""
event = "test_event"
events = async_capture_events(hass, event)
calls = async_mock_service(hass, "test", "script", vol.Schema({"text": str}))
sequence = cv.SCRIPT_SCHEMA(
[{"service": "test.script", "data": {"text": 1}}, {"event": event}]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
with pytest.raises(vol.Invalid):
await script_obj.async_run(context=Context())
assert len(events) == 0
assert len(calls) == 0
assert not script_obj.is_running
async def test_propagate_error_service_exception(hass):
"""Test that a script aborts when a service throws an exception."""
event = "test_event"
events = async_capture_events(hass, event)
@callback
def record_call(service):
"""Add recorded event to set."""
raise ValueError("BROKEN")
hass.services.async_register("test", "script", record_call)
sequence = cv.SCRIPT_SCHEMA([{"service": "test.script"}, {"event": event}])
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
with pytest.raises(ValueError):
await script_obj.async_run(context=Context())
assert len(events) == 0
assert not script_obj.is_running
async def test_referenced_entities(hass):
"""Test referenced entities."""
script_obj = script.Script(
hass,
cv.SCRIPT_SCHEMA(
[
{
"service": "test.script",
"data": {"entity_id": "light.service_not_list"},
},
{
"service": "test.script",
"data": {"entity_id": ["light.service_list"]},
},
{
"service": "test.script",
"data": {"entity_id": "{{ 'light.service_template' }}"},
},
{
"condition": "state",
"entity_id": "sensor.condition",
"state": "100",
},
{"service": "test.script", "data": {"without": "entity_id"}},
{"scene": "scene.hello"},
{"event": "test_event"},
{"delay": "{{ delay_period }}"},
]
),
"Test Name",
"test_domain",
)
assert script_obj.referenced_entities == {
"light.service_not_list",
"light.service_list",
"sensor.condition",
"scene.hello",
}
# Test we cache results.
assert script_obj.referenced_entities is script_obj.referenced_entities
async def test_referenced_devices(hass):
"""Test referenced entities."""
script_obj = script.Script(
hass,
cv.SCRIPT_SCHEMA(
[
{"domain": "light", "device_id": "script-dev-id"},
{
"condition": "device",
"device_id": "condition-dev-id",
"domain": "switch",
},
]
),
"Test Name",
"test_domain",
)
assert script_obj.referenced_devices == {"script-dev-id", "condition-dev-id"}
# Test we cache results.
assert script_obj.referenced_devices is script_obj.referenced_devices
@contextmanager
def does_not_raise():
"""Indicate no exception is expected."""
yield
async def test_script_mode_single(hass, caplog):
"""Test overlapping runs with max_runs = 1."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event, "event_data": {"value": 2}},
]
)
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[0].data["value"] == 1
# Start second run of script while first run is suspended in wait_template.
await script_obj.async_run(context=Context())
assert "Already running" in caplog.text
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 2
assert events[1].data["value"] == 2
@pytest.mark.parametrize("max_exceeded", [None, "WARNING", "INFO", "ERROR", "SILENT"])
@pytest.mark.parametrize(
"script_mode,max_runs", [("single", 1), ("parallel", 2), ("queued", 2)]
)
async def test_max_exceeded(hass, caplog, max_exceeded, script_mode, max_runs):
"""Test max_exceeded option."""
sequence = cv.SCRIPT_SCHEMA(
{"wait_template": "{{ states.switch.test.state == 'off' }}"}
)
if max_exceeded is None:
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
script_mode=script_mode,
max_runs=max_runs,
)
else:
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
script_mode=script_mode,
max_runs=max_runs,
max_exceeded=max_exceeded,
)
hass.states.async_set("switch.test", "on")
for _ in range(max_runs + 1):
hass.async_create_task(script_obj.async_run(context=Context()))
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
if max_exceeded is None:
max_exceeded = "WARNING"
if max_exceeded == "SILENT":
assert not any(
any(
message in rec.message
for message in ("Already running", "Maximum number of runs exceeded")
)
for rec in caplog.records
)
else:
assert any(
rec.levelname == max_exceeded
and any(
message in rec.message
for message in ("Already running", "Maximum number of runs exceeded")
)
for rec in caplog.records
)
@pytest.mark.parametrize(
"script_mode,messages,last_events",
[("restart", ["Restarting"], [2]), ("parallel", [], [2, 2])],
)
async def test_script_mode_2(hass, caplog, script_mode, messages, last_events):
"""Test overlapping runs with max_runs > 1."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event, "event_data": {"value": 2}},
]
)
logger = logging.getLogger("TEST")
max_runs = 1 if script_mode == "restart" else 2
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
script_mode=script_mode,
max_runs=max_runs,
logger=logger,
)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[0].data["value"] == 1
# Start second run of script while first run is suspended in wait_template.
wait_started_flag.clear()
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 2
assert events[1].data["value"] == 1
assert all(
any(
rec.levelname == "INFO"
and rec.name == "TEST"
and message in rec.message
for rec in caplog.records
)
for message in messages
)
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 2 + len(last_events)
for idx, value in enumerate(last_events, start=2):
assert events[idx].data["value"] == value
async def test_script_mode_queued(hass):
"""Test overlapping runs with script_mode = 'queued' & max_runs > 1."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{
"wait_template": "{{ states.switch.test.state == 'off' }}",
"alias": "wait_1",
},
{"event": event, "event_data": {"value": 2}},
{
"wait_template": "{{ states.switch.test.state == 'on' }}",
"alias": "wait_2",
},
]
)
logger = logging.getLogger("TEST")
script_obj = script.Script(
hass,
sequence,
"Test Name",
"test_domain",
script_mode="queued",
max_runs=2,
logger=logger,
)
watch_messages = []
@callback
def check_action():
for message, flag in watch_messages:
if script_obj.last_action and message in script_obj.last_action:
flag.set()
script_obj.change_listener = check_action
wait_started_flag_1 = asyncio.Event()
watch_messages.append(("wait_1", wait_started_flag_1))
wait_started_flag_2 = asyncio.Event()
watch_messages.append(("wait_2", wait_started_flag_2))
try:
assert not script_obj.is_running
assert script_obj.runs == 0
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag_1.wait(), 1)
assert script_obj.is_running
assert script_obj.runs == 1
assert len(events) == 1
assert events[0].data["value"] == 1
# Start second run of script while first run is suspended in wait_template.
# This second run should not start until the first run has finished.
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.sleep(0)
assert script_obj.is_running
assert script_obj.runs == 2
assert len(events) == 1
hass.states.async_set("switch.test", "off")
await asyncio.wait_for(wait_started_flag_2.wait(), 1)
assert script_obj.is_running
assert script_obj.runs == 2
assert len(events) == 2
assert events[1].data["value"] == 2
wait_started_flag_1.clear()
hass.states.async_set("switch.test", "on")
await asyncio.wait_for(wait_started_flag_1.wait(), 1)
assert script_obj.is_running
assert script_obj.runs == 1
assert len(events) == 3
assert events[2].data["value"] == 1
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await asyncio.sleep(0)
hass.states.async_set("switch.test", "on")
await hass.async_block_till_done()
assert not script_obj.is_running
assert script_obj.runs == 0
assert len(events) == 4
assert events[3].data["value"] == 2
async def test_script_mode_queued_cancel(hass):
"""Test canceling with a queued run."""
script_obj = script.Script(
hass,
cv.SCRIPT_SCHEMA({"wait_template": "{{ false }}"}),
"Test Name",
"test_domain",
script_mode="queued",
max_runs=2,
)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
assert not script_obj.is_running
assert script_obj.runs == 0
task1 = hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(wait_started_flag.wait(), 1)
task2 = hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.sleep(0)
assert script_obj.is_running
assert script_obj.runs == 2
with pytest.raises(asyncio.CancelledError):
task2.cancel()
await task2
assert script_obj.is_running
assert script_obj.runs == 1
with pytest.raises(asyncio.CancelledError):
task1.cancel()
await task1
assert not script_obj.is_running
assert script_obj.runs == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
async def test_script_logging(hass, caplog):
"""Test script logging."""
script_obj = script.Script(hass, [], "Script with % Name", "test_domain")
script_obj._log("Test message with name %s", 1)
assert "Script with % Name: Test message with name 1" in caplog.text
async def test_shutdown_at(hass, caplog):
"""Test stopping scripts at shutdown."""
delay_alias = "delay step"
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": 120}, "alias": delay_alias})
script_obj = script.Script(hass, sequence, "test script", "test_domain")
delay_started_flag = async_watch_for_action(script_obj, delay_alias)
try:
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == delay_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.bus.async_fire("homeassistant_stop")
await hass.async_block_till_done()
assert not script_obj.is_running
assert "Stopping scripts running at shutdown: test script" in caplog.text
async def test_shutdown_after(hass, caplog):
"""Test stopping scripts at shutdown."""
delay_alias = "delay step"
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": 120}, "alias": delay_alias})
script_obj = script.Script(hass, sequence, "test script", "test_domain")
delay_started_flag = async_watch_for_action(script_obj, delay_alias)
hass.state = CoreState.stopping
hass.bus.async_fire("homeassistant_stop")
await hass.async_block_till_done()
try:
hass.async_create_task(script_obj.async_run(context=Context()))
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == delay_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=60))
await hass.async_block_till_done()
assert not script_obj.is_running
assert (
"Stopping scripts running too long after shutdown: test script"
in caplog.text
)
async def test_update_logger(hass, caplog):
"""Test updating logger."""
sequence = cv.SCRIPT_SCHEMA({"event": "test_event"})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert script.__name__ in caplog.text
log_name = "testing.123"
script_obj.update_logger(logging.getLogger(log_name))
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert log_name in caplog.text
async def test_started_action(hass, caplog):
"""Test the callback of started_action."""
event = "test_event"
log_message = "The script started!"
logger = logging.getLogger("TEST")
sequence = cv.SCRIPT_SCHEMA({"event": event})
script_obj = script.Script(hass, sequence, "Test Name", "test_domain")
@callback
def started_action():
logger.info(log_message)
await script_obj.async_run(context=Context(), started_action=started_action)
await hass.async_block_till_done()
assert log_message in caplog.text
async def test_set_variable(hass, caplog):
"""Test setting variables in scripts."""
sequence = cv.SCRIPT_SCHEMA(
[
{"variables": {"variable": "value"}},
{"service": "test.script", "data": {"value": "{{ variable }}"}},
]
)
script_obj = script.Script(hass, sequence, "test script", "test_domain")
mock_calls = async_mock_service(hass, "test", "script")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert mock_calls[0].data["value"] == "value"
async def test_set_redefines_variable(hass, caplog):
"""Test setting variables based on their current value."""
sequence = cv.SCRIPT_SCHEMA(
[
{"variables": {"variable": "1"}},
{"service": "test.script", "data": {"value": "{{ variable }}"}},
{"variables": {"variable": "{{ variable | int + 1 }}"}},
{"service": "test.script", "data": {"value": "{{ variable }}"}},
]
)
script_obj = script.Script(hass, sequence, "test script", "test_domain")
mock_calls = async_mock_service(hass, "test", "script")
await script_obj.async_run(context=Context())
await hass.async_block_till_done()
assert mock_calls[0].data["value"] == 1
assert mock_calls[1].data["value"] == 2
async def test_validate_action_config(hass):
"""Validate action config."""
configs = {
cv.SCRIPT_ACTION_CALL_SERVICE: {"service": "light.turn_on"},
cv.SCRIPT_ACTION_DELAY: {"delay": 5},
cv.SCRIPT_ACTION_WAIT_TEMPLATE: {
"wait_template": "{{ states.light.kitchen.state == 'on' }}"
},
cv.SCRIPT_ACTION_FIRE_EVENT: {"event": "my_event"},
cv.SCRIPT_ACTION_CHECK_CONDITION: {
"condition": "{{ states.light.kitchen.state == 'on' }}"
},
cv.SCRIPT_ACTION_DEVICE_AUTOMATION: {
"domain": "light",
"entity_id": "light.kitchen",
"device_id": "abcd",
"type": "turn_on",
},
cv.SCRIPT_ACTION_ACTIVATE_SCENE: {"scene": "scene.relax"},
cv.SCRIPT_ACTION_REPEAT: {
"repeat": {"count": 3, "sequence": [{"event": "repeat_event"}]}
},
cv.SCRIPT_ACTION_CHOOSE: {
"choose": [
{
"condition": "{{ states.light.kitchen.state == 'on' }}",
"sequence": [{"event": "choose_event"}],
}
],
"default": [{"event": "choose_default_event"}],
},
cv.SCRIPT_ACTION_WAIT_FOR_TRIGGER: {
"wait_for_trigger": [
{"platform": "event", "event_type": "wait_for_trigger_event"}
]
},
cv.SCRIPT_ACTION_VARIABLES: {"variables": {"hello": "world"}},
}
for key in cv.ACTION_TYPE_SCHEMAS:
assert key in configs, f"No validate config test found for {key}"
# Verify we raise if we don't know the action type
with patch(
"homeassistant.helpers.config_validation.determine_script_action",
return_value="non-existing",
), pytest.raises(ValueError):
await script.async_validate_action_config(hass, {})
for action_type, config in configs.items():
assert cv.determine_script_action(config) == action_type
try:
await script.async_validate_action_config(hass, config)
except vol.Invalid as err:
assert False, f"{action_type} config invalid: {err}"
async def test_embedded_wait_for_trigger_in_automation(hass):
"""Test an embedded wait for trigger."""
assert await async_setup_component(
hass,
"automation",
{
"automation": {
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"repeat": {
"while": [
{
"condition": "template",
"value_template": '{{ is_state("test.value1", "trigger-while") }}',
}
],
"sequence": [
{"event": "trigger_wait_event"},
{
"wait_for_trigger": [
{
"platform": "template",
"value_template": '{{ is_state("test.value2", "trigger-wait") }}',
}
]
},
{"service": "test.script"},
],
}
},
}
},
)
hass.states.async_set("test.value1", "trigger-while")
hass.states.async_set("test.value2", "not-trigger-wait")
mock_calls = async_mock_service(hass, "test", "script")
async def trigger_wait_event(_):
# give script the time to attach the trigger.
await asyncio.sleep(0)
hass.states.async_set("test.value1", "not-trigger-while")
hass.states.async_set("test.value2", "trigger-wait")
hass.bus.async_listen("trigger_wait_event", trigger_wait_event)
# Start automation
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(mock_calls) == 1
| apache-2.0 |
mlperf/training_results_v0.6 | NVIDIA/benchmarks/gnmt/implementations/pytorch/seq2seq/data/tokenizer.py | 5 | 3344 | import logging
from collections import defaultdict
from functools import partial
import seq2seq.data.config as config
class Tokenizer:
"""
Tokenizer class.
"""
def __init__(self, vocab_fname=None, pad=1, separator='@@'):
"""
Constructor for the Tokenizer class.
:param vocab_fname: path to the file with vocabulary
:param pad: pads vocabulary to a multiple of 'pad' tokens
:param separator: tokenization separator
"""
if vocab_fname:
self.separator = separator
logging.info(f'Building vocabulary from {vocab_fname}')
vocab = [config.PAD_TOKEN, config.UNK_TOKEN,
config.BOS_TOKEN, config.EOS_TOKEN]
with open(vocab_fname) as vfile:
for line in vfile:
vocab.append(line.strip())
self.pad_vocabulary(vocab, pad)
self.vocab_size = len(vocab)
logging.info(f'Size of vocabulary: {self.vocab_size}')
self.tok2idx = defaultdict(partial(int, config.UNK))
for idx, token in enumerate(vocab):
self.tok2idx[token] = idx
self.idx2tok = {}
for key, value in self.tok2idx.items():
self.idx2tok[value] = key
def pad_vocabulary(self, vocab, pad):
"""
Pads vocabulary to a multiple of 'pad' tokens.
:param vocab: list with vocabulary
:param pad: integer
"""
vocab_size = len(vocab)
padded_vocab_size = (vocab_size + pad - 1) // pad * pad
for i in range(0, padded_vocab_size - vocab_size):
token = f'madeupword{i:04d}'
vocab.append(token)
assert len(vocab) % pad == 0
def get_state(self):
logging.info(f'Saving state of the tokenizer')
state = {
'separator': self.separator,
'vocab_size': self.vocab_size,
'tok2idx': self.tok2idx,
'idx2tok': self.idx2tok,
}
return state
def set_state(self, state):
logging.info(f'Restoring state of the tokenizer')
self.separator = state['separator']
self.vocab_size = state['vocab_size']
self.tok2idx = state['tok2idx']
self.idx2tok = state['idx2tok']
def segment(self, line):
"""
Tokenizes single sentence and adds special BOS and EOS tokens.
:param line: sentence
returns: list representing tokenized sentence
"""
line = line.strip().split()
entry = [self.tok2idx[i] for i in line]
entry = [config.BOS] + entry + [config.EOS]
return entry
def detokenize(self, inputs, delim=' '):
"""
Detokenizes single sentence and removes token separator characters.
:param inputs: sequence of tokens
:param delim: tokenization delimiter
returns: string representing detokenized sentence
"""
detok = delim.join([self.idx2tok[idx] for idx in inputs])
detok = detok.replace(self.separator + ' ', '')
detok = detok.replace(self.separator, '')
detok = detok.replace(config.BOS_TOKEN, '')
detok = detok.replace(config.EOS_TOKEN, '')
detok = detok.replace(config.PAD_TOKEN, '')
detok = detok.strip()
return detok
| apache-2.0 |
terkkila/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 75 | 34122 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
huonw/servo | tests/wpt/css-tests/tools/html5lib/html5lib/treebuilders/etree_lxml.py | 1724 | 14031 | """Module for supporting the lxml.etree library. The idea here is to use as much
of the native library as possible, without using fragile hacks like custom element
names that break between releases. The downside of this is that we cannot represent
all possible trees; specifically the following are known to cause problems:
Text or comments as siblings of the root element
Docypes with no name
When any of these things occur, we emit a DataLossWarning
"""
from __future__ import absolute_import, division, unicode_literals
import warnings
import re
import sys
from . import _base
from ..constants import DataLossWarning
from .. import constants
from . import etree as etree_builders
from .. import ihatexml
import lxml.etree as etree
fullTree = True
tag_regexp = re.compile("{([^}]*)}(.*)")
comment_type = etree.Comment("asd").tag
class DocumentType(object):
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
class Document(object):
def __init__(self):
self._elementTree = None
self._childNodes = []
def appendChild(self, element):
self._elementTree.getroot().addnext(element._element)
def _getChildNodes(self):
return self._childNodes
childNodes = property(_getChildNodes)
def testSerializer(element):
rv = []
finalText = None
infosetFilter = ihatexml.InfosetFilter()
def serializeElement(element, indent=0):
if not hasattr(element, "tag"):
if hasattr(element, "getroot"):
# Full tree case
rv.append("#document")
if element.docinfo.internalDTD:
if not (element.docinfo.public_id or
element.docinfo.system_url):
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
else:
dtd_str = """<!DOCTYPE %s "%s" "%s">""" % (
element.docinfo.root_name,
element.docinfo.public_id,
element.docinfo.system_url)
rv.append("|%s%s" % (' ' * (indent + 2), dtd_str))
next_element = element.getroot()
while next_element.getprevious() is not None:
next_element = next_element.getprevious()
while next_element is not None:
serializeElement(next_element, indent + 2)
next_element = next_element.getnext()
elif isinstance(element, str) or isinstance(element, bytes):
# Text in a fragment
assert isinstance(element, str) or sys.version_info.major == 2
rv.append("|%s\"%s\"" % (' ' * indent, element))
else:
# Fragment case
rv.append("#document-fragment")
for next_element in element:
serializeElement(next_element, indent + 2)
elif element.tag == comment_type:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * indent, element.tail))
else:
assert isinstance(element, etree._Element)
nsmatch = etree_builders.tag_regexp.match(element.tag)
if nsmatch is not None:
ns = nsmatch.group(1)
tag = nsmatch.group(2)
prefix = constants.prefixes[ns]
rv.append("|%s<%s %s>" % (' ' * indent, prefix,
infosetFilter.fromXmlName(tag)))
else:
rv.append("|%s<%s>" % (' ' * indent,
infosetFilter.fromXmlName(element.tag)))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
name = infosetFilter.fromXmlName(name)
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = infosetFilter.fromXmlName(name)
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
if finalText is not None:
rv.append("|%s\"%s\"" % (' ' * 2, finalText))
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
finalText = None
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
if finalText is not None:
rv.append("%s\"" % (' ' * 2, finalText))
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = None
commentClass = None
fragmentClass = Document
implementation = etree
def __init__(self, namespaceHTMLElements, fullTree=False):
builder = etree_builders.getETreeModule(etree, fullTree=fullTree)
infosetFilter = self.infosetFilter = ihatexml.InfosetFilter()
self.namespaceHTMLElements = namespaceHTMLElements
class Attributes(dict):
def __init__(self, element, value={}):
self._element = element
dict.__init__(self, value)
for key, value in self.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
class Element(builder.Element):
def __init__(self, name, namespace):
name = infosetFilter.coerceElement(name)
builder.Element.__init__(self, name, namespace=namespace)
self._attributes = Attributes(self)
def _setName(self, name):
self._name = infosetFilter.coerceElement(name)
self._element.tag = self._getETreeTag(
self._name, self._namespace)
def _getName(self):
return infosetFilter.fromXmlName(self._name)
name = property(_getName, _setName)
def _getAttributes(self):
return self._attributes
def _setAttributes(self, attributes):
self._attributes = Attributes(self, attributes)
attributes = property(_getAttributes, _setAttributes)
def insertText(self, data, insertBefore=None):
data = infosetFilter.coerceCharacters(data)
builder.Element.insertText(self, data, insertBefore)
def appendChild(self, child):
builder.Element.appendChild(self, child)
class Comment(builder.Comment):
def __init__(self, data):
data = infosetFilter.coerceComment(data)
builder.Comment.__init__(self, data)
def _setData(self, data):
data = infosetFilter.coerceComment(data)
self._element.text = data
def _getData(self):
return self._element.text
data = property(_getData, _setData)
self.elementClass = Element
self.commentClass = builder.Comment
# self.fragmentClass = builder.DocumentFragment
_base.TreeBuilder.__init__(self, namespaceHTMLElements)
def reset(self):
_base.TreeBuilder.reset(self)
self.insertComment = self.insertCommentInitial
self.initial_comments = []
self.doctype = None
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._elementTree
else:
return self.document._elementTree.getroot()
def getFragment(self):
fragment = []
element = self.openElements[0]._element
if element.text:
fragment.append(element.text)
fragment.extend(list(element))
if element.tail:
fragment.append(element.tail)
return fragment
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
if not name:
warnings.warn("lxml cannot represent empty doctype", DataLossWarning)
self.doctype = None
else:
coercedName = self.infosetFilter.coerceElement(name)
if coercedName != name:
warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning)
doctype = self.doctypeClass(coercedName, publicId, systemId)
self.doctype = doctype
def insertCommentInitial(self, data, parent=None):
self.initial_comments.append(data)
def insertCommentMain(self, data, parent=None):
if (parent == self.document and
self.document._elementTree.getroot()[-1].tag == comment_type):
warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning)
super(TreeBuilder, self).insertComment(data, parent)
def insertRoot(self, token):
"""Create the document root"""
# Because of the way libxml2 works, it doesn't seem to be possible to
# alter information like the doctype after the tree has been parsed.
# Therefore we need to use the built-in parser to create our iniial
# tree, after which we can add elements like normal
docStr = ""
if self.doctype:
assert self.doctype.name
docStr += "<!DOCTYPE %s" % self.doctype.name
if (self.doctype.publicId is not None or
self.doctype.systemId is not None):
docStr += (' PUBLIC "%s" ' %
(self.infosetFilter.coercePubid(self.doctype.publicId or "")))
if self.doctype.systemId:
sysid = self.doctype.systemId
if sysid.find("'") >= 0 and sysid.find('"') >= 0:
warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning)
sysid = sysid.replace("'", 'U00027')
if sysid.find("'") >= 0:
docStr += '"%s"' % sysid
else:
docStr += "'%s'" % sysid
else:
docStr += "''"
docStr += ">"
if self.doctype.name != token["name"]:
warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning)
docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>"
root = etree.fromstring(docStr)
# Append the initial comments:
for comment_token in self.initial_comments:
root.addprevious(etree.Comment(comment_token["data"]))
# Create the root document and add the ElementTree to it
self.document = self.documentClass()
self.document._elementTree = root.getroottree()
# Give the root element the right name
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
root.tag = etree_tag
# Add the root element to the internal child/open data structures
root_element = self.elementClass(name, namespace)
root_element._element = root
self.document._childNodes.append(root_element)
self.openElements.append(root_element)
# Reset to the default insert comment function
self.insertComment = self.insertCommentMain
| mpl-2.0 |
christopher-gillies/VCFScripts | make_vcfs_for_imputation_server.py | 1 | 14155 | #!/usr/bin/python
import argparse
import re
import gzip
import sys
import string
import os
from make import MakeEntry
from make import MakeFile
##############
# INPUT PROCESSSING
##############
calling_dir = os.path.dirname(os.path.realpath(__file__))
print "########################################################"
print "Genotype File formatting pipeline for imputation server"
print "########################################################"
parser = argparse.ArgumentParser(description='This program takes a plink genotype file and formats it for the Imputation Server')
parser.add_argument('--bfile', help='plink formatted binary genotype file prefix (same as plink)', required=True)
parser.add_argument('--outdir', help='the output directory ', required=True)
parser.add_argument('--strandfile', help='Download from http://www.well.ox.ac.uk/~wrayner/strand/', required=False, default=None)
parser.add_argument('--checkVCF', help='checkVCF https://github.com/zhanxw/checkVCF', required=True)
parser.add_argument('--refseq', help='Reference fasta file with checkVCF.py', required=True)
parser.add_argument('--hrcsites', help='Download from ftp://ngs.sanger.ac.uk/production/hrc/HRC.r1-1/HRC.r1-1.GRCh37.wgs.mac5.sites.tab.gz', required=True)
parser.add_argument('--hrccheck', help='Downlaod from http://www.well.ox.ac.uk/~wrayner/tools/#Checking', required=True)
# DEFAULTS
parser.add_argument('--skip_strand', help='ignore strand file', required=False, action="store_true")
parser.add_argument('--out_vcf_prefix', help='the prefix for the final vcf files', required=False, default="final")
parser.add_argument('--sort_buffer', help='Buffer size for sort program', required=False, default="4G")
parser.add_argument('--plink', help='plink executable location', required=False, default="plink")
parser.add_argument('--plink_threads', help='Number of threads for plink', required=False, default=1, type=int)
parser.add_argument('--tabix', help='tabix executable location', required=False, default="tabix")
parser.add_argument('--flip_ref_alt', help='flip_ref_alt.py executable location', required=False, default="{0}/{1}".format(calling_dir,"flip_ref_alt.py"))
parser.add_argument('--mind', help='plink --mind parameter (default 0.05)', type=float, default=0.05)
parser.add_argument('--geno', help='plink --geno parameter (default 0.05)', type=float, default=0.05)
args = parser.parse_args()
if not args.skip_strand and args.strandfile == None:
raise ValueError("Please specify a strandfile")
print
print "########################################################"
print "OPTIONS"
print "########################################################"
print
for attr, value in args.__dict__.iteritems():
print "{0:>15}\t\t{1!s}".format( "--{0}".format(attr), str(value))
##############
# Store inputs
##############
bfile = args.bfile
outdir = args.outdir
if outdir.endswith("/"):
outdir = outdir.rstrip("/")
skip_strand = args.skip_strand
outdir_temp = "{0}/tmp".format(outdir)
strandfile = args.strandfile
checkVCF = args.checkVCF
refseq = args.refseq
hrcsites = args.hrcsites
hrccheck = args.hrccheck
out_vcf_prefix = args.out_vcf_prefix
sort_buffer = args.sort_buffer
plink = args.plink
plink_threads = args.plink_threads
#vcfsort = args.vcfsort
tabix = args.tabix
flip_ref_alt = args.flip_ref_alt
mind = args.mind
geno = args.geno
#mk_test = MakeEntry("11111", ["print a","print b"], [])
#mk_test_a = MakeEntry("2222", ["print c","print d"], [mk_test])
#mk_file = MakeFile("all.ok", [ mk_test,mk_test_a ])
#print mk_file
print
print "CREATING MAKEFILE"
print
make_entries = []
############
# Create Commands
############
########
# Update positions, extact snps in strand file, filter geno/inds, flip strands, make frequency file
########
bfile_base = os.path.basename(bfile)
make_outdir_tmp_cmd = "mkdir -p {0}".format(outdir_temp)
pre_hrc_check_entry = None
if not skip_strand:
snp_id_file = "{0}/snp.ids".format(outdir_temp)
snp_id_command = "cat {0} | cut -f1 > {1}".format(strandfile,snp_id_file)
minus_strand_file = "{0}/minus.snp.ids".format(outdir_temp)
minus_strand_cmd = "cat {strandfile} | perl -lane 'print $$F[0] if $$F[4] eq \"-\"' > {minus_strand_file}".format(strandfile=strandfile, minus_strand_file=minus_strand_file)
bfile_plink_geno_prefix = "{0}/extract.geno".format(outdir_temp)
extract_snps_geno_filter_cmd = "{plink} --bfile {bfile} --geno {geno} --extract {snp_id_file} --make-bed --out {out} --threads {plink_threads} --allow-extra-chr".format(plink=plink, bfile=bfile, geno=geno, snp_id_file=snp_id_file, out=bfile_plink_geno_prefix,plink_threads=plink_threads)
bfile_plink_geno_pos_ind_prefix = "{0}/extract.geno.pos.ind".format(outdir_temp)
update_pos_ind_cmd = "{plink} --bfile {bfile} --mind {mind} --update-map {strandfile} 3 1 --make-bed --out {out} --threads {plink_threads} --allow-extra-chr".format(plink=plink, bfile=bfile_plink_geno_prefix, strandfile=strandfile, mind=mind, out=bfile_plink_geno_pos_ind_prefix, plink_threads=plink_threads)
bfile_plink_geno_pos_ind_flip_prefix = "{0}/extract.geno.pos.ind.flip".format(outdir_temp)
flip_cmd = "{plink} --bfile {bfile} --flip {minus_strand_file} --make-bed --freq --out {out} --threads {plink_threads} --allow-extra-chr".format(plink=plink, bfile=bfile_plink_geno_prefix, minus_strand_file=minus_strand_file, out=bfile_plink_geno_pos_ind_flip_prefix,plink_threads=plink_threads)
pre_hrc_check_entry = MakeEntry( "{0}/pre_hrc_check.OK".format(outdir_temp), [make_outdir_tmp_cmd, snp_id_command, minus_strand_cmd, extract_snps_geno_filter_cmd, update_pos_ind_cmd, flip_cmd], [], comment = "PRE-HRC CHECKING FORMATING" )
else:
bfile_plink_geno_prefix = "{0}/extract.geno".format(outdir_temp)
extract_snps_geno_filter_cmd = "{plink} --bfile {bfile} --geno {geno} --make-bed --out {out} --threads {plink_threads} --allow-extra-chr".format(plink=plink, bfile=bfile, geno=geno, out=bfile_plink_geno_prefix,plink_threads=plink_threads)
bfile_plink_geno_pos_ind_prefix = "{0}/extract.geno.pos.ind".format(outdir_temp)
update_pos_ind_cmd = "{plink} --bfile {bfile} --mind {mind} --make-bed --out {out} --threads {plink_threads} --allow-extra-chr".format(plink=plink, bfile=bfile_plink_geno_prefix, mind=mind, out=bfile_plink_geno_pos_ind_prefix, plink_threads=plink_threads)
bfile_plink_geno_pos_ind_flip_prefix = "{0}/extract.geno.pos.ind.flip".format(outdir_temp)
flip_cmd = "{plink} --bfile {bfile} --make-bed --freq --out {out} --threads {plink_threads} --allow-extra-chr".format(plink=plink, bfile=bfile_plink_geno_prefix, out=bfile_plink_geno_pos_ind_flip_prefix,plink_threads=plink_threads)
pre_hrc_check_entry = MakeEntry( "{0}/pre_hrc_check.OK".format(outdir_temp), [make_outdir_tmp_cmd, extract_snps_geno_filter_cmd, update_pos_ind_cmd, flip_cmd], [], comment = "PRE-HRC CHECKING FORMATING" )
flip_freq_file = "{0}/extract.geno.pos.ind.flip.frq".format(outdir_temp)
flip_bim_file = "{0}/extract.geno.pos.ind.flip.bim".format(outdir_temp)
make_entries.append(pre_hrc_check_entry)
#######
# HRC-Check
#######
hrc_cmd = "cd {outdir_temp}; {hrccheck} --bim {flip_bim_file} --freq {flip_freq_file} --ref {hrcsites} --hrc -v".format(outdir_temp=outdir_temp,hrccheck=hrccheck, flip_bim_file=flip_bim_file, flip_freq_file=flip_freq_file, hrcsites=hrcsites)
hrc_check_entry = MakeEntry( "{0}/hrc_check.OK".format(outdir_temp), [hrc_cmd], [pre_hrc_check_entry], comment = "HRC CHECK " )
make_entries.append(hrc_check_entry)
#######
# HRC FORMAT
#######
#append plink threads
hrc_format_script = "cat {outdir_temp}/Run-plink.sh | perl -lane 'print $$_.\" --threads {plink_threads} --allow-extra-chr \" if $$_ =~ /^plink/' > {outdir_temp}/Run-plink-updated.sh".format(outdir_temp=outdir_temp,plink_threads=plink_threads)
hrc_run_cmd = "cd {outdir_temp}; sh {outdir_temp}/Run-plink-updated.sh".format(outdir_temp=outdir_temp)
hrc_format_entry = MakeEntry( "{0}/hrc_format.OK".format(outdir_temp), [hrc_format_script , hrc_run_cmd], [hrc_check_entry], comment = "HRC FORMAT" )
make_entries.append(hrc_format_entry)
#######
# Convert Plink Files to VCF sort and tabix
#######
# {outdir_temp}/extract.geno.pos.ind.flip-updated-chr7
hrc_bfile_prefix = []
hrc_vcf_file_prefix = []
hrc_vcf_file_out = []
hrc_vcf_file_out_sorted = []
hrc_vcf_file_out_sorted_prefix = []
vcf_convert_targets = []
vcf_sort_targets = []
# Chr start at 1
for i in range(1,24):
bfile_path = "{0}/extract.geno.pos.ind.flip-updated-chr{1}".format(outdir_temp,i)
vcf_path = "{0}/extract.geno.pos.ind.flip.updated.chr{1}.vcf.gz".format(outdir_temp,i)
vcf_sorted_path = "{0}/extract.geno.pos.ind.flip.updated.chr{1}.sorted.vcf.gz".format(outdir_temp,i)
vcf_sorted_prefix = "{0}/extract.geno.pos.ind.flip.updated.chr{1}.sorted".format(outdir_temp,i)
vcf_convert = "{0}/vcf.convert.chr{1}.OK".format(outdir_temp,i)
vcf_sort_tar = "{0}/vcf.sort.chr{1}.OK".format(outdir_temp,i)
vcf_prefix = "{0}/extract.geno.pos.ind.flip.updated.chr{1}".format(outdir_temp,i)
#hrc_bfile_prefix.append( os.path.splitext(bfile_path)[0] )
hrc_bfile_prefix.append( bfile_path )
hrc_vcf_file_prefix.append(vcf_prefix)
hrc_vcf_file_out.append(vcf_path)
hrc_vcf_file_out_sorted.append(vcf_sorted_path)
hrc_vcf_file_out_sorted_prefix.append(vcf_sorted_prefix)
vcf_convert_targets.append(vcf_convert)
vcf_sort_targets.append(vcf_sort_tar)
####
# Create Sort and Convert commands
####
convert_entries = []
sort_entries = []
# Array Start at 0
for i in range(0,23):
plink_to_vcf_cmd = "{plink} --bfile {bfile} --recode vcf bgz --out {vcf_prefix} --threads {plink_threads}".format(plink=plink,bfile=hrc_bfile_prefix[i],vcf_prefix=hrc_vcf_file_prefix[i], plink_threads=plink_threads)
#vcf_sort_cmd = "{vcfsort} {vcf} -t {tmp} | bgzip -c > {sorted_vcf}".format(vcfsort=vcfsort,vcf=hrc_vcf_file_out[i],sorted_vcf=hrc_vcf_file_out_sorted[i],tmp=outdir_temp)
vcf_header_cmd = "zcat {vcf} | head -10000 | grep '^#' > {sorted_vcf}.header".format(vcf=hrc_vcf_file_out[i],sorted_vcf=hrc_vcf_file_out_sorted[i])
#vcf_sort_cmd = "( cat {sorted_vcf}.header; (zcat {vcf} | grep -v '^#' | sort -t$$'\\t' -T {tmp} -S {sort_buffer} -k1,1 -k2,2n) ) | bgzip -c > {sorted_vcf}".format(sort_buffer=sort_buffer,vcf=hrc_vcf_file_out[i],sorted_vcf=hrc_vcf_file_out_sorted[i],tmp=outdir_temp)
vcf_sort_cmd = "( cat {sorted_vcf}.header; (zcat {vcf} | grep -v '^#' | sort -t$$'\\t' -S {sort_buffer} -k1,1 -k2,2n) ) | bgzip -c > {sorted_vcf}".format(sort_buffer=sort_buffer,vcf=hrc_vcf_file_out[i],sorted_vcf=hrc_vcf_file_out_sorted[i],tmp=outdir_temp)
tabix_cmd = "{tabix} -pvcf {vcf}".format(tabix=tabix,vcf=hrc_vcf_file_out_sorted[i])
convert_to_vcf_entry = MakeEntry(vcf_convert_targets[i], [ plink_to_vcf_cmd ], [ hrc_format_entry ], comment="VCF conversion from plink for chr{0}".format(i + 1))
sort_entry = MakeEntry(vcf_sort_targets[i], [ vcf_header_cmd, vcf_sort_cmd, tabix_cmd ], [ convert_to_vcf_entry ], comment="Sort VCF from plink for chr{0}".format(i + 1))
make_entries.append(convert_to_vcf_entry)
make_entries.append(sort_entry)
convert_entries.append(convert_to_vcf_entry)
sort_entries.append(sort_entry)
#######
# run checkVCF.py
#######
check_vcf_out = []
check_vcf_out_ref_flips = []
check_vcf_target = []
check_vcf_entries = []
for i in range(0,23):
vcf = hrc_vcf_file_out_sorted[i]
target = "{0}/checkVCF.chr{1}.OK".format(outdir_temp,i + 1)
check_vcf_out.append(check_vcf_out)
check_vcf_target.append(target)
prefix = hrc_vcf_file_out_sorted_prefix[i]
check_vef_ref_flips = "{0}.check.ref".format(prefix)
check_vcf_out_ref_flips.append(check_vef_ref_flips)
check_vcf_cmd = "python {checkVCF} -r {refseq} -o {prefix} {vcf}".format(checkVCF =checkVCF, refseq = refseq, prefix = prefix, vcf = vcf)
check_entry = MakeEntry(target, [ check_vcf_cmd ], [ convert_entries[i], sort_entries[i] ], comment = "checkVCF for chr{0}".format(i + 1))
make_entries.append(check_entry)
check_vcf_entries.append(check_entry)
#######
# run flip remaining variants
#######
flip_ref_alt_vcf_out = []
flip_ref_alt_entries = []
for i in range(0,23):
in_vcf = hrc_vcf_file_out_sorted[i]
target = "{0}/flip_ref_alt.chr{1}.OK".format(outdir_temp,i + 1)
out_vcf = "{0}/extract.geno.pos.ind.flip.updated.chr{1}.sorted.fix_ref_alts.vcf.gz".format(outdir,i + 1)
flip_ref_alt_vcf_out.append(out_vcf)
flip_ref_alt_cmd = "{flip_ref_alt} --vcf {in_vcf} --ref_flips {flips} | bgzip -c > {out_vcf}".format(flip_ref_alt=flip_ref_alt,in_vcf=in_vcf,out_vcf=out_vcf,flips=check_vcf_out_ref_flips[i])
tabix_cmd = "{tabix} -pvcf {out_vcf}".format(tabix=tabix, out_vcf=out_vcf)
flip_ref_alt_entry = MakeEntry(target, [ flip_ref_alt_cmd, tabix_cmd], [ check_vcf_entries[i] ], comment = "flip ref and alt for chr{0}".format(i + 1))
flip_ref_alt_entries.append(flip_ref_alt_entry)
make_entries.append(flip_ref_alt_entry)
final_vcfs = []
for i in range(0,23):
target = "{0}/final.vcf.chr{1}.OK".format(outdir_temp,i + 1)
in_vcf = flip_ref_alt_vcf_out[i]
out_vcf = "{outdir}/{out_vcf_prefix}.chr{chr}.vcf.gz".format( outdir=outdir, out_vcf_prefix=out_vcf_prefix, chr=i + 1)
final_vcfs.append(out_vcf)
final_cmd = "mv {in_vcf} {out_vcf}; mv {in_vcf}.tbi {out_vcf}.tbi".format(in_vcf=in_vcf,out_vcf=out_vcf)
final_entry = MakeEntry(target, [final_cmd], [ flip_ref_alt_entries[i] ], comment="Rename final vcfs")
make_entries.append(final_entry)
######
######
#########
# Clean-up
#########
clean_cmd = "rm -r {outdir_temp}".format(outdir_temp=outdir_temp)
clean_entry = MakeEntry("clean", [ clean_cmd ], [], comment ="Clean command")
###
# write out final vcf list
###
out_vcf_file_list = open( "{0}/vcf_list.tsv".format(outdir), "w")
for i in range(0,23):
out_vcf_file_list.write("chr{chr}\t{vcf}\n".format(chr=i+1,vcf=final_vcfs[i]))
out_vcf_file_list.close()
####
# Create Makefile
####
makefile = MakeFile("{0}/all.OK".format(outdir), make_entries)
makefile.set_clean_entry(clean_entry)
makefile_loc = "{0}/Makefile".format(outdir)
out_makefile = open(makefile_loc, "w")
out_makefile.write(str(makefile))
out_makefile.write("\n")
out_makefile.close()
print "MAKEFILE CREATED"
print "Please run:"
print "cd " + outdir + ";" + "make"
| apache-2.0 |
wangxiya123/liupengyuan.github.io | chapter1/homework/computer/201611680672.py | 27 | 2720 |
# coding: utf-8
# - 练习 1:写程序,可由键盘读入用户姓名例如Mr. right,让用户输入出生的月份与日期,判断用户星座,假设用户是金牛座,则输出,Mr. right,你是非常有性格的金牛座!。
# In[1]:
name=input('what is your name ?')
print('when is you birthday ?example:5月21输入为以下各式:521')
birthday=int(input())
if 421<=birthday<=521 :
print('Mr.',name,'你是非常有个性的金牛座!')
# - 练习 2:写程序,可由键盘读入两个整数m与n(n不等于0),询问用户意图,如果要求和则计算从m到n的和输出,如果要乘积则计算从m到n的积并输出,如果要求余数则计算m除以n的余数的值并输出,否则则计算m整除n的值并输出。
# In[2]:
m=int(input('please enter an integer'))
n=int(input('please enter an integer'))
print('请输入您要执行的操作:+、*、%、else')
operation=input()
if operation=='+':
print(m+n)
elif operation=='*':
print(m*n)
elif operation=='%':
print(m%n)
else :
print(n//m)
# - 练习 3:写程序,能够根据北京雾霾PM2.5数值给出对应的防护建议。如当PM2.5数值大于500,则应该打开空气净化器,戴防雾霾口罩等
# In[3]:
value=int(input('请输入今天北京PM2.5的值'))
if value>500:
print('建议打开空气净化器,出门戴口罩')
# - 练习 4:英文单词单数转复数,要求输入一个英文动词(单数形式),能够得到其复数形式,或给出单数转复数形式的建议(提示,some_string.endswith(some_letter)函数可以判断某字符串结尾字符,可尝试运行:'myname'.endswith('me'),'liupengyuan'.endswith('n')`)。
# In[6]:
word=input('please enter a word ')
if word.endswith('s') or word.endswith('x') or word.endswith('ch') or word.endswith('sh') or word.endswith('o') :
print(word,'es',sep='')
else :
print(word,'s',sep='') # 此题不全面,若为一辅音字母加Y用现学方法怎解决?
# - 尝试性练习:写程序,能够在屏幕上显示空行。
# In[8]:
print('name')
print(sep='\n')
print('name')
# In[10]:
n=int(input('请输入将输入的数字总个数'))
max_num=int(input('请输入一个整数'))
secmax_num=int(input('请输入一个整数'))
i=2
while i<n :
if max_num<secmax_num :
temp1=max_num
max_num=secmax_num
secmax_num=temp1
num=int(input('请输入一个整数'))
if num>max_num :
temp2=max_num
max_num=num
secmax_num=temp2
elif num>secmax_num :
secmax_num=num
i+=1
print('The max nummber is :',max_num)
print('The second max number is',secmax_num)
| mit |
sbreen94/Zeus_S4_VZW | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
mxOBS/deb-pkg_xbmc-imx6 | lib/libUPnP/Platinum/Build/Tools/SCons/gcc-generic.py | 283 | 1317 | import os
def generate(env, gcc_cross_prefix=None, gcc_strict=True, gcc_stop_on_warning=None, gcc_extra_options=''):
if gcc_stop_on_warning == None: gcc_stop_on_warning = env['stop_on_warning']
### compiler flags
if gcc_strict:
env.AppendUnique(CCFLAGS = ['-pedantic', '-Wall', '-W', '-Wundef', '-Wno-long-long'])
env.AppendUnique(CFLAGS = ['-Wmissing-prototypes', '-Wmissing-declarations'])
else:
env.AppendUnique(CCFLAGS = ['-Wall'])
compiler_defines = ['-D_REENTRANT']
env.AppendUnique(CCFLAGS = compiler_defines)
env.AppendUnique(CPPFLAGS = compiler_defines)
if env['build_config'] == 'Debug':
env.AppendUnique(CCFLAGS = '-g')
else:
env.AppendUnique(CCFLAGS = '-O3')
if gcc_stop_on_warning:
env.AppendUnique(CCFLAGS = ['-Werror'])
env['STRIP'] = 'strip'
if gcc_cross_prefix:
env['ENV']['PATH'] += os.environ['PATH']
env['AR'] = gcc_cross_prefix+'-ar'
env['RANLIB'] = gcc_cross_prefix+'-ranlib'
env['CC'] = gcc_cross_prefix+'-gcc ' + gcc_extra_options
env['CXX'] = gcc_cross_prefix+'-g++ ' + gcc_extra_options
env['LINK'] = gcc_cross_prefix+'-g++ ' + gcc_extra_options
env['STRIP'] = gcc_cross_prefix+'-strip'
| gpl-2.0 |
nolanliou/tensorflow | tensorflow/contrib/gan/python/eval/python/sliced_wasserstein_test.py | 21 | 4991 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Sliced Wasserstein Distance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import ndimage
from tensorflow.contrib.gan.python.eval.python import sliced_wasserstein_impl as swd
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class ClassifierMetricsTest(test.TestCase):
def test_laplacian_pyramid(self):
# The numpy/scipy code for reference estimation comes from:
# https://github.com/tkarras/progressive_growing_of_gans
gaussian_filter = np.float32([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [
6, 24, 36, 24, 6
], [4, 16, 24, 16, 4], [1, 4, 6, 4, 1]]) / 256.0
def np_pyr_down(minibatch): # matches cv2.pyrDown()
assert minibatch.ndim == 4
return ndimage.convolve(
minibatch,
gaussian_filter[np.newaxis, np.newaxis, :, :],
mode='mirror')[:, :, ::2, ::2]
def np_pyr_up(minibatch): # matches cv2.pyrUp()
assert minibatch.ndim == 4
s = minibatch.shape
res = np.zeros((s[0], s[1], s[2] * 2, s[3] * 2), minibatch.dtype)
res[:, :, ::2, ::2] = minibatch
return ndimage.convolve(
res,
gaussian_filter[np.newaxis, np.newaxis, :, :] * 4.0,
mode='mirror')
def np_laplacian_pyramid(minibatch, num_levels):
# Note: there's a bug in the original SWD, fixed repeatability.
pyramid = [minibatch.astype('f').copy()]
for _ in range(1, num_levels):
pyramid.append(np_pyr_down(pyramid[-1]))
pyramid[-2] -= np_pyr_up(pyramid[-1])
return pyramid
data = np.random.normal(size=[256, 3, 32, 32]).astype('f')
pyramid = np_laplacian_pyramid(data, 3)
data_tf = array_ops.placeholder(dtypes.float32, [256, 32, 32, 3])
pyramid_tf = swd._laplacian_pyramid(data_tf, 3)
with self.test_session() as sess:
pyramid_tf = sess.run(
pyramid_tf, feed_dict={
data_tf: data.transpose(0, 2, 3, 1)
})
for x in range(3):
self.assertAllClose(
pyramid[x].transpose(0, 2, 3, 1), pyramid_tf[x], atol=1e-6)
def test_sliced_wasserstein_distance(self):
"""Test the distance."""
d1 = random_ops.random_uniform([256, 32, 32, 3])
d2 = random_ops.random_normal([256, 32, 32, 3])
wfunc = swd.sliced_wasserstein_distance(d1, d2)
with self.test_session() as sess:
wscores = [sess.run(x) for x in wfunc]
self.assertAllClose(
np.array([0.014, 0.014], 'f'),
np.array([x[0] for x in wscores], 'f'),
rtol=0.15)
self.assertAllClose(
np.array([0.014, 0.020], 'f'),
np.array([x[1] for x in wscores], 'f'),
rtol=0.15)
def test_sliced_wasserstein_distance_svd(self):
"""Test the distance."""
d1 = random_ops.random_uniform([256, 32, 32, 3])
d2 = random_ops.random_normal([256, 32, 32, 3])
wfunc = swd.sliced_wasserstein_distance(d1, d2, use_svd=True)
with self.test_session() as sess:
wscores = [sess.run(x) for x in wfunc]
self.assertAllClose(
np.array([0.013, 0.013], 'f'),
np.array([x[0] for x in wscores], 'f'),
rtol=0.15)
self.assertAllClose(
np.array([0.014, 0.019], 'f'),
np.array([x[1] for x in wscores], 'f'),
rtol=0.15)
def test_swd_mismatched(self):
"""Test the inputs mismatched shapes are detected."""
d1 = random_ops.random_uniform([256, 32, 32, 3])
d2 = random_ops.random_normal([256, 32, 31, 3])
d3 = random_ops.random_normal([256, 31, 32, 3])
d4 = random_ops.random_normal([255, 32, 32, 3])
with self.assertRaises(ValueError):
swd.sliced_wasserstein_distance(d1, d2)
with self.assertRaises(ValueError):
swd.sliced_wasserstein_distance(d1, d3)
with self.assertRaises(ValueError):
swd.sliced_wasserstein_distance(d1, d4)
def test_swd_not_rgb(self):
"""Test that only RGB is supported."""
d1 = random_ops.random_uniform([256, 32, 32, 1])
d2 = random_ops.random_normal([256, 32, 32, 1])
with self.assertRaises(ValueError):
swd.sliced_wasserstein_distance(d1, d2)
if __name__ == '__main__':
test.main()
| apache-2.0 |
lulufei/youtube-dl | youtube_dl/extractor/mailru.py | 81 | 3096 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class MailRuIE(InfoExtractor):
IE_NAME = 'mailru'
IE_DESC = 'Видео@Mail.Ru'
_VALID_URL = r'http://(?:www\.)?my\.mail\.ru/(?:video/.*#video=/?(?P<idv1>(?:[^/]+/){3}\d+)|(?:(?P<idv2prefix>(?:[^/]+/){2})video/(?P<idv2suffix>[^/]+/\d+))\.html)'
_TESTS = [
{
'url': 'http://my.mail.ru/video/top#video=/mail/sonypicturesrus/75/76',
'md5': 'dea205f03120046894db4ebb6159879a',
'info_dict': {
'id': '46301138_76',
'ext': 'mp4',
'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро',
'timestamp': 1393232740,
'upload_date': '20140224',
'uploader': 'sonypicturesrus',
'uploader_id': 'sonypicturesrus@mail.ru',
'duration': 184,
},
},
{
'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html',
'md5': '00a91a58c3402204dcced523777b475f',
'info_dict': {
'id': '46843144_1263',
'ext': 'mp4',
'title': 'Samsung Galaxy S5 Hammer Smash Fail Battery Explosion',
'timestamp': 1397217632,
'upload_date': '20140411',
'uploader': 'hitech',
'uploader_id': 'hitech@corp.mail.ru',
'duration': 245,
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('idv1')
if not video_id:
video_id = mobj.group('idv2prefix') + mobj.group('idv2suffix')
video_data = self._download_json(
'http://api.video.mail.ru/videos/%s.json?new=1' % video_id, video_id, 'Downloading video JSON')
author = video_data['author']
uploader = author['name']
uploader_id = author.get('id') or author.get('email')
view_count = video_data.get('views_count')
meta_data = video_data['meta']
content_id = '%s_%s' % (
meta_data.get('accId', ''), meta_data['itemId'])
title = meta_data['title']
if title.endswith('.mp4'):
title = title[:-4]
thumbnail = meta_data['poster']
duration = meta_data['duration']
timestamp = meta_data['timestamp']
formats = [
{
'url': video['url'],
'format_id': video['key'],
'height': int(video['key'].rstrip('p'))
} for video in video_data['videos']
]
self._sort_formats(formats)
return {
'id': content_id,
'title': title,
'thumbnail': thumbnail,
'timestamp': timestamp,
'uploader': uploader,
'uploader_id': uploader_id,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
| unlicense |
olysonek/tuned | tuned/plugins/plugin_video.py | 2 | 2874 | from . import base
from .decorators import *
import tuned.logs
from tuned.utils.commands import commands
import os
import re
log = tuned.logs.get()
class VideoPlugin(base.Plugin):
"""
Plugin for tuning powersave options for some graphic cards.
"""
def _init_devices(self):
self._devices_supported = True
self._free_devices = set()
self._assigned_devices = set()
# FIXME: this is a blind shot, needs testing
for device in self._hardware_inventory.get_devices("drm").match_sys_name("card*").match_property("DEVTYPE", "drm_minor"):
self._free_devices.add(device.sys_name)
self._cmd = commands()
def _get_device_objects(self, devices):
return [self._hardware_inventory.get_device("drm", x) for x in devices]
@classmethod
def _get_config_options(self):
return {
"radeon_powersave" : None,
}
def _instance_init(self, instance):
instance._has_dynamic_tuning = False
instance._has_static_tuning = True
def _instance_cleanup(self, instance):
pass
def _radeon_powersave_files(self, device):
return {
"method" : "/sys/class/drm/%s/device/power_method" % device,
"profile": "/sys/class/drm/%s/device/power_profile" % device,
"dpm_state": "/sys/class/drm/%s/device/power_dpm_state" % device
}
@command_set("radeon_powersave", per_device=True)
def _set_radeon_powersave(self, value, device, sim):
sys_files = self._radeon_powersave_files(device)
va = str(re.sub(r"(\s*:\s*)|(\s+)|(\s*;\s*)|(\s*,\s*)", " ", value)).split()
if not os.path.exists(sys_files["method"]):
if not sim:
log.warn("radeon_powersave is not supported on '%s'" % device)
return None
for v in va:
if v in ["default", "auto", "low", "mid", "high"]:
if not sim:
if (self._cmd.write_to_file(sys_files["method"], "profile") and
self._cmd.write_to_file(sys_files["profile"], v)):
return v
elif v == "dynpm":
if not sim:
if (self._cmd.write_to_file(sys_files["method"], "dynpm")):
return "dynpm"
# new DPM profiles, recommended to use if supported
elif v in ["dpm-battery", "dpm-balanced", "dpm-performance"]:
if not sim:
state = v[len("dpm-"):]
if (self._cmd.write_to_file(sys_files["method"], "dpm") and
self._cmd.write_to_file(sys_files["dpm_state"], state)):
return v
else:
if not sim:
log.warn("Invalid option for radeon_powersave.")
return None
return None
@command_get("radeon_powersave")
def _get_radeon_powersave(self, device, ignore_missing = False):
sys_files = self._radeon_powersave_files(device)
method = self._cmd.read_file(sys_files["method"], no_error=ignore_missing).strip()
if method == "profile":
return self._cmd.read_file(sys_files["profile"]).strip()
elif method == "dynpm":
return method
elif method == "dpm":
return "dpm-" + self._cmd.read_file(sys_files["dpm_state"]).strip()
else:
return None
| gpl-2.0 |
chiffa/PolyPharma | bioflow/algorithms_bank/clustering_routines.py | 1 | 13185 | """
Mostly deprecated clustering and data structural analysis routines.
"""
from typing import Tuple
import numpy as np
from matplotlib import pyplot as plt
from scipy import sparse as spmat
from bioflow.annotation_network.knowledge_access_analysis import ref_param_set, \
get_go_interface_instance
from bioflow.configs.main_configs import NewOutputs
from bioflow.utils.dataviz import kde_compute, render_2d_matrix
from bioflow.utils.linalg_routines import cluster_nodes, normalize_laplacian, \
average_off_diag_in_sub_matrix, average_interset_linkage
def local_indexed_select(tri_array, array_column, selection_span):
"""
Convenient small function to local_indexed_select a from tri_array all the elements where the column
number array_column is within the selection span
:param tri_array: the matrix on which we will be performing the selection
:param array_column: column on which the selection span will be applied
:param selection_span: span for which we are going to keep the column.
:return:
"""
selector = np.logical_and(
selection_span[0] < tri_array[
array_column, :], tri_array[
array_column, :] < selection_span[1])
if not any(selector):
return np.array([[0.0, 0.0, 0.0]])
decvec = tri_array[:, selector]
return decvec
# REFACTOR: Legacy code containing static analysis and clustering logic
def deprectated_show_correlations(
background_curr_deg_conf,
mean_correlations,
eigenvalues,
selector,
true_sample_tri_corr_array,
test_mean_correlation,
eigenvalue,
re_samples,
go_interface_instance=None,
sparse=False,
param_set=ref_param_set,
save_path: NewOutputs = None):
# TODO: there is a lot of repetition depending on which values are the biggest,
# test-setted or real setted. In all, we should be able to reduce it to two functions:
# scatterplot and histogram with two sets that should go into the dataviz module
"""
A general function that performs demonstration of an example of random samples of the
same size as our sample
and of our sample and conducts the statistical tests on whether any of nodes or
functional groups in our sample are non-random
:param background_curr_deg_conf: [[current, informativity, confusion_potential], ...] -
characteristics of the random samples
:param mean_correlations: [[cluster size, average internode connection], ...] -
characteristics of clustering random samples with the same parameters
:param eigenvalues: eigenvalues associated to the interconnection matrix of random samples
:param selector: range on which we would like to visually zoom and plot a histogram
:param true_sample_tri_corr_array: [[current, informativity, confusion_potential], ...] -
characteristics of the true sample. If none, nothing happens
:param test_mean_correlation: [[cluster size, average internode connection], ...] -
characteristics of clustering the true sample
:param eigenvalue: eigenvalues associated to the interconnection matrix of the true sample
:param re_samples: how many random samples we analyzed for the default model
:param go_interface_instance:
:param sparse:
:param param_set:
:return:
"""
if go_interface_instance is None:
go_interface_instance = get_go_interface_instance(param_set)
inf_sel = (go_interface_instance.calculate_informativity(selector[0]),
go_interface_instance.calculate_informativity(selector[1]))
fig = plt.figure()
fig.set_size_inches(30, 20)
# trivect: [0, :] - current; [1, :] - informativity; [2, :] - confusion potential
plt.subplot(331)
plt.title('current through nodes')
bins = np.linspace(background_curr_deg_conf[0, :].min(),
background_curr_deg_conf[0, :].max(),
100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(min(background_curr_deg_conf[0, :].min(),
true_sample_tri_corr_array[0, :].min()),
max(background_curr_deg_conf[0, :].max(),
true_sample_tri_corr_array[0, :].max()),
100)
plt.hist(background_curr_deg_conf[0, :],
bins=bins, histtype='step', log=True, color='b')
if true_sample_tri_corr_array is not None:
plt.hist(true_sample_tri_corr_array[0, :],
bins=bins, histtype='step', log=True, color='r')
plt.subplot(332)
plt.title('test current vs pure informativity')
plt.scatter(background_curr_deg_conf[1, :],
background_curr_deg_conf[0, :], color='b', alpha=0.1)
if true_sample_tri_corr_array is not None:
plt.scatter(
true_sample_tri_corr_array[1, :],
true_sample_tri_corr_array[0, :],
color='r', alpha=0.5)
plt.axvspan(inf_sel[0], inf_sel[1], facecolor='0.5', alpha=0.3)
plt.subplot(333)
plt.title('test current v.s. confusion potential')
plt.scatter(background_curr_deg_conf[2, :], background_curr_deg_conf[0, :])
if true_sample_tri_corr_array is not None:
plt.scatter(
true_sample_tri_corr_array[2, :],
true_sample_tri_corr_array[0, :],
color='r', alpha=0.5)
plt.axvspan(selector[0], selector[1], facecolor='0.5', alpha=0.3)
plt.subplot(334)
plt.title('Gaussian KDE current_info')
estimator_function = kde_compute(background_curr_deg_conf[(1, 0), :], 50, re_samples)
current_info_rel = None
if true_sample_tri_corr_array is not None:
# Used to be the way to compute the p-values
current_info_rel = estimator_function(true_sample_tri_corr_array[(1, 0), :])
plt.subplot(335)
plt.title('GO_term pure informativity distribution')
# REFACTOR: this needs to be moved elsewhere - this is a structural analysis
bins = np.linspace(
background_curr_deg_conf[1, :].min(),
background_curr_deg_conf[1, :].max(),
100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(min(background_curr_deg_conf[1, :].min(),
true_sample_tri_corr_array[1, :].min()),
max(background_curr_deg_conf[1, :].max(),
true_sample_tri_corr_array[1, :].max()),
100)
plt.hist(background_curr_deg_conf[1, :],
bins=bins, histtype='step', log=True, color='b')
if true_sample_tri_corr_array is not None:
plt.hist(true_sample_tri_corr_array[1, :],
bins=bins, histtype='step', log=True, color='r')
plt.subplot(336)
plt.title('Density of current in the highlighted area')
bins = np.linspace(local_indexed_select(background_curr_deg_conf, 2, selector)[0, :].min(),
local_indexed_select(background_curr_deg_conf, 2, selector)[0, :].max(),
100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(
min(local_indexed_select(background_curr_deg_conf, 2, selector)[0, :].min(),
local_indexed_select(true_sample_tri_corr_array, 2, selector)[0, :].min()),
max(local_indexed_select(background_curr_deg_conf, 2, selector)[0, :].max(),
local_indexed_select(true_sample_tri_corr_array, 2, selector)[0, :].max()),
100)
plt.hist(local_indexed_select(background_curr_deg_conf, 2, selector)[0, :],
bins=bins, histtype='step', log=True, color='b')
if true_sample_tri_corr_array is not None:
plt.hist(local_indexed_select(true_sample_tri_corr_array, 2, selector)[0, :],
bins=bins, histtype='step', log=True, color='r')
cluster_props = None
plt.subplot(337)
plt.title('Clustering correlation')
# REFACTOR: that's the custering logic to be extracted elsewhere
if not sparse:
# plt.scatter(mean_correlations[0, :], mean_correlations[1, :], color = 'b')
estimator_function = kde_compute(mean_correlations[(0, 1), :], 50, re_samples)
cluster_props = None
if test_mean_correlation is not None:
plt.scatter(test_mean_correlation[0, :],
test_mean_correlation[1, :],
color='k', alpha=0.8)
cluster_props = estimator_function(test_mean_correlation[(0, 1), :])
plt.subplot(338)
plt.title('Eigvals_hist')
# REFACTOR: this needs to be moved elsewhere - this is a structural analysis
if not sparse:
bins = np.linspace(eigenvalues.min(), eigenvalues.max(), 100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(min(eigenvalues.min(), eigenvalue.min()),
max(eigenvalues.max(), eigenvalue.max()),
100)
plt.hist(eigenvalues, bins=bins, histtype='step', color='b')
if eigenvalue is not None:
plt.hist(eigenvalue.tolist() * 3, bins=bins, histtype='step', color='r')
plt.subplot(339)
plt.title('confusion potential')
bins = np.linspace(background_curr_deg_conf[2, :].min(),
background_curr_deg_conf[2, :].max(),
100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(min(background_curr_deg_conf[2, :].min(),
true_sample_tri_corr_array[2, :].min()),
max(background_curr_deg_conf[2, :].max(),
true_sample_tri_corr_array[2, :].max()),
100)
plt.hist(background_curr_deg_conf[2, :],
bins=bins, histtype='step', log=True, color='b')
if true_sample_tri_corr_array is not None:
plt.hist(true_sample_tri_corr_array[2, :],
bins=bins, histtype='step', log=True, color='r')
# # plt.show()
plt.savefig(save_path.knowledge_network_scatterplot)
# pull the groups corresponding to non-random associations.
return current_info_rel, cluster_props
def deprecated_perform_clustering(inter_node_tension: spmat.csc_matrix,
cluster_number: int,
show: str = 'undefined clustering') -> Tuple[np.array, np.float64,
np.array, np.array]:
"""
Performs a clustering on the voltages of the nodes,
:param inter_node_tension:
:param cluster_number:
:param show:
"""
index_group = list(set([item
for key in inter_node_tension.keys()
for item in key]))
local_index = dict((UP, i) for i, UP in enumerate(index_group))
rev_idx = dict((i, UP) for i, UP in enumerate(index_group))
relations_matrix = spmat.lil_matrix((len(index_group), len(index_group)))
for (UP1, UP2), tension in inter_node_tension.items():
# TODO: change the metric used to cluster the nodes.
relations_matrix[local_index[UP1], local_index[UP2]] = -1.0 / tension
relations_matrix[local_index[UP2], local_index[UP1]] = -1.0 / tension
relations_matrix[local_index[UP2], local_index[UP2]] += 1.0 / tension
relations_matrix[local_index[UP1], local_index[UP1]] += 1.0 / tension
# underlying method is spectral clustering: do we really lie in a good zone for that?
# NOPE - we need a dynamic clusters number
# TODO: change clustering method to a different one
groups = cluster_nodes(relations_matrix, cluster_number)
relations_matrix = normalize_laplacian(relations_matrix)
if relations_matrix.shape[0] < 5:
eigenvals, _ = spmat.linalg.eigsh(relations_matrix, k=2)
elif relations_matrix.shape[0] < 10:
eigenvals, _ = spmat.linalg.eigsh(relations_matrix, k=4)
else:
eigenvals, _ = spmat.linalg.eigsh(relations_matrix)
relations_matrix = - relations_matrix
relations_matrix.setdiag(1)
group_sets = []
group_2_mean_off_diag = []
for i in range(0, cluster_number):
group_selector = groups == i
group_indexes = group_selector.nonzero()[0].tolist()
group_2_mean_off_diag.append(
(tuple(rev_idx[idx] for idx in group_indexes),
len(group_indexes),
average_off_diag_in_sub_matrix(relations_matrix, group_indexes)))
group_sets.append(group_indexes)
remainder = average_interset_linkage(relations_matrix, group_sets)
clustidx = np.array([item for itemset in group_sets for item in itemset])
relations_matrix = relations_matrix[:, clustidx]
relations_matrix = relations_matrix[clustidx, :]
mean_corr_array = np.array([[items, mean_corr]
for _, items, mean_corr in group_2_mean_off_diag])
if show:
render_2d_matrix(relations_matrix.toarray(), name=show, destination='')
return np.array(group_2_mean_off_diag), \
remainder, \
mean_corr_array, \
eigenvals | bsd-3-clause |
kawamon/hue | desktop/core/ext-py/simple-salesforce-0.74.2/simple_salesforce/util.py | 3 | 2515 | """Utility functions for simple-salesforce"""
import xml.dom.minidom
from simple_salesforce.exceptions import (
SalesforceGeneralError, SalesforceExpiredSession,
SalesforceMalformedRequest, SalesforceMoreThanOneRecord,
SalesforceRefusedRequest, SalesforceResourceNotFound
)
# pylint: disable=invalid-name
def getUniqueElementValueFromXmlString(xmlString, elementName):
"""
Extracts an element value from an XML string.
For example, invoking
getUniqueElementValueFromXmlString(
'<?xml version="1.0" encoding="UTF-8"?><foo>bar</foo>', 'foo')
should return the value 'bar'.
"""
xmlStringAsDom = xml.dom.minidom.parseString(xmlString)
elementsByName = xmlStringAsDom.getElementsByTagName(elementName)
elementValue = None
if len(elementsByName) > 0:
elementValue = elementsByName[0].toxml().replace(
'<' + elementName + '>', '').replace('</' + elementName + '>', '')
return elementValue
def date_to_iso8601(date):
"""Returns an ISO8601 string from a date"""
datetimestr = date.strftime('%Y-%m-%dT%H:%M:%S')
timezone_sign = date.strftime('%z')[0:1]
timezone_str = '%s:%s' % (
date.strftime('%z')[1:3], date.strftime('%z')[3:5])
return '{datetimestr}{tzsign}{timezone}'.format(
datetimestr=datetimestr,
tzsign=timezone_sign,
timezone=timezone_str
).replace(':', '%3A').replace('+', '%2B')
def exception_handler(result, name=""):
"""Exception router. Determines which error to raise for bad results"""
try:
response_content = result.json()
# pylint: disable=broad-except
except Exception:
response_content = result.text
exc_map = {
300: SalesforceMoreThanOneRecord,
400: SalesforceMalformedRequest,
401: SalesforceExpiredSession,
403: SalesforceRefusedRequest,
404: SalesforceResourceNotFound,
}
exc_cls = exc_map.get(result.status_code, SalesforceGeneralError)
raise exc_cls(result.url, result.status_code, name, response_content)
def call_salesforce(url, method, session, headers, **kwargs):
"""Utility method for performing HTTP call to Salesforce.
Returns a `requests.result` object.
"""
additional_headers = kwargs.pop('additional_headers', dict())
headers.update(additional_headers or dict())
result = session.request(method, url, headers=headers, **kwargs)
if result.status_code >= 300:
exception_handler(result)
return result
| apache-2.0 |
mF2C/COMPSs | tests/sources/python/8_argument_error/src/modules/testArgumentError.py | 1 | 3731 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PyCOMPSs Testbench
========================
"""
# Imports
import unittest
from pycompss.api.task import task
from pycompss.api.parameter import *
from pycompss.api.binary import binary
from pycompss.api.compss import compss
from pycompss.api.constraint import constraint
from pycompss.api.decaf import decaf
from pycompss.api.implement import implement
from pycompss.api.mpi import mpi
from pycompss.api.ompss import ompss
# from pycompss.api.opencl import opencl
# All of the following tasks do not include an mandatory argument or
# include it wrong.
# An exception has to be raised by the binding in this case.
# If not found the exception, then raise an exception to the test.
try:
# Missing binary
@binary(working_dir="/tmp")
@task()
def binary_task(dprefix, param):
pass
except Exception:
print("Exception thrown by @binary SUCCESSFULLY detected")
else:
raise Exception("ERROR: Exception expected from @binary not found")
try:
# Wrong app_name name (without underscore) - it should behave as if it is missing
@compss(runcompss="${RUNCOMPSS}", flags="-d", appname="${APP_DIR}/src/simple_compss_nested.py", worker_in_master="false", computing_nodes="2")
@constraint(computing_units="2")
@task(returns=int)
def compss_task(value):
pass
except Exception:
print("Exception thrown by @compss SUCCESSFULLY detected")
else:
raise Exception("ERROR: Exception expected from @compss not found")
try:
# Wrong df_script name (without underscore) - it should behave as if it is missing
@decaf(working_dir=".", Runner="mpirun", dfscript="${APP_DIR}/src/test_decaf.py", df_executor="test.sh", df_lib="lib")
@task(param=FILE_OUT)
def my_decaf_task(param):
pass
except Exception:
print("Exception thrown by @decaf SUCCESSFULLY detected")
else:
raise Exception("ERROR: Exception expected from @decaf not found")
@task(returns=int)
def slow_task(value):
return value * value * value
try:
# Missing method argument
@implement(source_class="modules.testArgumentError")
@constraint(computing_units="1")
@task(returns=list)
def better_task(value):
return value ** 3
except Exception:
print("Exception thrown by @implement SUCCESSFULLY detected")
else:
raise Exception("ERROR: Exception expected from @implement not found")
try:
# Missing runner
@mpi(binary="date", workingDir="/tmp")
@task()
def mpi_task(dprefix, param):
pass
except Exception:
print("Exception thrown by @mpi SUCCESSFULLY detected")
else:
raise Exception("ERROR: Exception expected from @mpi not found")
try:
# Missing binary
@ompss(workingDir="/tmp")
@task()
def ompss_task(dprefix, param):
pass
except Exception:
print("Exception thrown by @ompss SUCCESSFULLY detected")
else:
raise Exception("ERROR: Exception expected from @ompss not found")
# try:
# # Missing kernel
# @opencl(workingDir="/tmp")
# @task()
# def opencl_task(dprefix, param):
# pass
# except Exception:
# print("Exception thrown by @opencl SUCCESSFULLY detected")
# else:
# raise Exception("ERROR: Exception expected from @opencl not found")
class testArgumentError(unittest.TestCase):
# This test will never reach the execution of the tasks:
# - if the exception is caught: the call to a task will raise a
# NameError exception since it will not be defined (previous try
# except)
# - if the exception is not caught: the exception thrown will
# stop the test
def testErrors(self):
# Symbolic test. Everything is tested when loading the test
pass
| apache-2.0 |
rajarshd/TextKBQA | code/scratch/check_nick_coverage.py | 2 | 2028 | import json
vocab_file="/iesl/canvas/nmonath/data/wikipedia/20160305/en/enwiki-20160305-lstm-vocab.tsv"
entity_to_freebase_mapping="/iesl/canvas/nmonath/data/freebase/20160513/en-freebase_wiki_cat_title_map.txt"
#Read the entity_to_freebase_mapping file
print('Read the entity_to_freebase_mapping file')
name_to_mid = {}
with open(entity_to_freebase_mapping) as input_file:
for line in input_file:
line = line.strip()
name, mid = line.split('\t')
name_to_mid[name] = mid
#read the vocab file
# print('read nick\'s vocab file')
# vocab_set = set()
# with open(vocab_file) as vocab_in:
# for line in vocab_in:
# line = line.strip()
# _, name = line.split(' ')
# if name in name_to_mid:
# vocab_set.add(name_to_mid[name].replace('/', '.'))
# else:
# vocab_set.add(name)
#read the embedding file and read only the words (1st column)
embeddings_file_from_nick = "/iesl/canvas/nmonath/data/wikipedia/20160305/en/embeddings/aabt/context/target.tsv"
vocab_set = set()
with open(embeddings_file_from_nick) as embedding_file:
for line in embedding_file:
split = line.split('\t')
name = split[0]
if name in name_to_mid:
vocab_set.add(name_to_mid[name].replace('/', '.'))
else:
name = name.replace('W_SLUG_', '')
name = name.replace('_lang_EN', '')
vocab_set.add(name.lower())
#read entity vocab and check coverage
print('Reading entity vocab')
entity_vocab_file = "/home/rajarshi/research/joint-text-and-kb-inference-semantic-parsing/vocab/entity_vocab.json"
entity_vocab = {}
with open(entity_vocab_file) as entity_vocab_in:
entity_vocab = json.load(entity_vocab_in)
counter = 0
mid_counter = 0
for k, _ in entity_vocab.iteritems():
k = k.lower()
if k in vocab_set:
if k.startswith('m.'):
mid_counter += 1
counter += 1
print('Total overlap is {}'.format(counter))
print('Overlap of entities is {}'.format(mid_counter)) | bsd-3-clause |
alexandrul-ci/robotframework | utest/model/test_message.py | 7 | 1560 | import unittest
from robot.model import Message
from robot.utils.asserts import assert_equal, assert_raises
from robot.utils import PY2, PY3
if PY3:
unicode = str
class TestHtmlMessage(unittest.TestCase):
def test_empty(self):
assert_equal(Message().html_message, '')
assert_equal(Message(html=True).html_message, '')
def test_no_html(self):
assert_equal(Message('Hello, Kitty!').html_message, 'Hello, Kitty!')
assert_equal(Message('<b> & ftp://url').html_message,
'<b> & <a href="ftp://url">ftp://url</a>')
def test_html(self):
assert_equal(Message('Hello, Kitty!', html=True).html_message, 'Hello, Kitty!')
assert_equal(Message('<b> & ftp://x', html=True).html_message, '<b> & ftp://x')
class TestStringRepresentation(unittest.TestCase):
def setUp(self):
self.empty = Message()
self.ascii = Message('Kekkonen')
self.non_ascii = Message(u'hyv\xe4 nimi')
def test_unicode(self):
assert_equal(unicode(self.empty), '')
assert_equal(unicode(self.ascii), 'Kekkonen')
assert_equal(unicode(self.non_ascii), u'hyv\xe4 nimi')
if PY2:
def test_str(self):
assert_equal(str(self.empty), '')
assert_equal(str(self.ascii), 'Kekkonen')
assert_equal(str(self.non_ascii), u'hyv\xe4 nimi'.encode('UTF-8'))
def test_slots(self):
assert_raises(AttributeError, setattr, Message(), 'attr', 'value')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
benjamin-recht/benjamin-recht.github.io | code/lqrpols_original.py | 2 | 7132 | import numpy as np
import scipy.linalg as LA
def lqr_gain(A,B,Q,R):
'''
Arguments:
State transition matrices (A,B)
LQR Costs (Q,R)
Outputs:
K: optimal infinite-horizon LQR gain matrix given
'''
# solve DARE:
M=LA.solve_discrete_are(A,B,Q,R)
# K=(B'MB + R)^(-1)*(B'MA)
return np.dot(LA.inv(np.dot(np.dot(B.T,M),B)+R),(np.dot(np.dot(B.T,M),A)))
def cost_inf_K(A,B,Q,R,K):
'''
Arguments:
State transition matrices (A,B)
LQR Costs (Q,R)
Control Gain K
Outputs:
cost: Infinite time horizon LQR cost of static gain K
'''
cl_map = A+B.dot(K)
if np.amax(np.abs(LA.eigvals(cl_map)))<(1.0-1.0e-6):
cost = np.trace(LA.solve_discrete_lyapunov(cl_map.T,Q+np.dot(K.T,R.dot(K))))
else:
cost = float("inf")
return cost
def cost_finite_model(A_true,B_true,Q,R,x0,T,A_dat,B_dat):
'''
Arguments:
True Model state transition matrices (A_true,B_true)
LQR Costs (Q,R)
Initial State x0
Time Horizon T
Nominal Model state transition matrices (A_dat,B_dat)
Outputs:
cost: finite time horizon LQR cost when control is computed using
(A_dat,B_dat) but executed on system (A_true,B_true)
'''
d,p = B_true.shape
# Ricatti recursion
M = np.zeros((d,d,T))
M[:,:,-1]=Q
for k in range(T-2,-1,-1):
AMA = np.dot(A_dat.T,M[:,:,k+1].dot(A_dat))
AMB = np.dot(A_dat.T,M[:,:,k+1].dot(B_dat))
BMB = np.dot(B_dat.T,M[:,:,k+1].dot(B_dat))
M[:,:,k] = Q + AMA - np.dot(AMB,LA.inv(R+BMB).dot(AMB.T))
# compute contols and costs using these Ricatti iterates
cost = 0
x = x0
for k in range(T):
AMB = np.dot(A_dat.T,M[:,:,k].dot(B_dat))
BMB = np.dot(B_dat.T,M[:,:,k].dot(B_dat))
u = -np.dot(LA.inv(R+BMB),np.dot(AMB.T,x))
x = A_true.dot(x)+B_true.dot(u)
cost = cost+np.dot(x.T,Q.dot(x))+np.dot(u.T,R.dot(u))
return cost.flatten()[0]
def cost_finite_K(A_true,B_true,Q,R,x0,T,K):
'''
Arguments:
True Model state transition matrices (A_true,B_true)
LQR Costs (Q,R)
Initial State x0
Time Horizon T
Static Control Gain K
Outputs:
cost: finite time horizon LQR cost when control is static gain K on
system (A_true,B_true)
'''
d,p = B_true.shape
cost = 0
x = x0
for k in range(T):
u = np.dot(K,x)
x = A_true.dot(x)+B_true.dot(u)
cost = cost+np.dot(x.T,Q.dot(x))+np.dot(u.T,R.dot(u))
return cost.flatten()
def lsqr_estimator(A,B,Q,R,x0,eq_err,N,T):
'''
Arguments:
state transition matrices (A,B)
LQR Costs (Q,R)
Initial State x0
magnitude of noise in dynamics eq_err
Number of rollouts N
Time Horizon T
Outputs:
Estimated State Transition Matrices (A_nom,B_nom) from least squares
'''
d,p = B.shape
# storage matrices
X_store = np.zeros((d,N,T+1))
U_store = np.zeros((p,N,T))
# simulate
for k in range(N):
x = x0
X_store[:,k,0] = x0.flatten()
for t in range(T):
u = np.random.randn(p,1)
x = A.dot(x)+B.dot(u)+eq_err*np.random.randn(d,1)
X_store[:,k,t+1] = x.flatten()
U_store[:,k,t] = u.flatten()
### Solve for nominal model
tmp = np.linalg.lstsq(np.vstack((X_store[:,:,0:T].reshape(d,N*T),
U_store.reshape(p,N*T))).T,
X_store[:,:,1:(T+1)].reshape(d,N*T).T)[0]
A_nom = tmp[0:d,:].T
B_nom = tmp[d:(d+p),:].T
return (A_nom,B_nom)
def random_search_linear_policy(A,B,Q,R,x0,eq_err,N,T,
explore_mag = 4e-2, step_size = 5e-1, batch_size = 4):
'''
Arguments:
state transition matrices (A,B)
LQR Costs (Q,R)
Initial State x0
magnitude of noise in dynamics eq_err
Number of rollouts N
Time Horizon T
hyperparameters:
explore_mag = magnitude of the noise to explore
step_size
batch_size = number of directions per minibatches
safeguard: maximum absolute value of entries of controller gain
Outputs:
Static Control Gain K optimized on LQR cost by random search
'''
d,p = B.shape
# initial condition for K
K0 = 1e-3*np.random.randn(p,d)
###
#### ALGORITHM
K = K0
for k in range(N):
reward_store = []
mini_batch = np.zeros((p,d))
for j in range(batch_size):
V = explore_mag*np.random.randn(p,d)
for sign in [-1,1]:
x = x0
reward = 0
for t in range(T):
u = np.dot(K+sign*V,x)
x = A.dot(x)+B.dot(u)+eq_err*np.random.randn(d,1)
reward += -np.dot(x.T,Q.dot(x))-np.dot(u.T,R.dot(u))
mini_batch += (reward*sign)*V
reward_store.append(reward)
K += (step_size/np.std(reward_store)/batch_size)*mini_batch
return K
def uniform_random_linear_policy(A,B,Q,R,x0,eq_err,N,T,linf_norm=3):
'''
Arguments:
state transition matrices (A,B)
LQR Costs (Q,R)
Initial State x0
magnitude of noise in dynamics eq_err
Number of rollouts N
Time Horizon T
hyperparameters
linf_norm = maximum absolute value of entries of controller gain
Outputs:
Static Control Gain K optimized on LQR cost by uniformly sampling policies
in bounded region
'''
d,p = B.shape
#### "ALGORITHM"
best_K = np.empty((p,d))
best_reward = -float("inf")
for k in range(N):
K = np.random.uniform(-linf_norm,linf_norm,(p,d))
x = x0
reward = 0
for t in range(T):
u = np.dot(K,x)
x = A.dot(x)+B.dot(u)+eq_err*np.random.randn(d,1)
reward += -np.dot(x.T,Q.dot(x))-np.dot(u.T,R.dot(u))
if reward>best_reward:
best_reward = reward
best_K = K
return best_K
def policy_gradient_linear_policy(A,B,Q,R,x0,eq_err,N,T,
explore_mag = 5e-2,step_size = 2, batch_size = 40, safeguard = 2):
'''
Arguments:
state transition matrices (A,B)
LQR Costs (Q,R)
Initial State x0
magnitude of noise in dynamics eq_err
Number of rollouts N
Time Horizon T
hyperparameters
explore_mag magnitude of the noise to explore
step_size
batch_size: number of stochastic gradients per minibatch
safeguard: maximum absolute value of entries of controller gain
Outputs:
Static Control Gain K optimized on LQR cost by Policy Gradient
'''
d,p = B.shape
# initial condition for K
K0 = 1e-3*np.random.randn(p,d)
###
X_store = np.zeros((d,T))
V_store = np.zeros((p,T))
#### ALGORITHM
K = K0
baseline = 0
for k in range(N):
new_baseline = 0
mini_batch = np.zeros((p,d))
for j in range(batch_size):
x = x0
reward = 0
for t in range(T):
v = explore_mag*np.random.randn(p,1)
X_store[:,t] = x.flatten()
V_store[:,t] = v.flatten()
u = np.dot(K,x)+v
x = A.dot(x)+B.dot(u)+eq_err*np.random.randn(d,1)
reward += -np.dot(x.T,Q.dot(x))-np.dot(u.T,R.dot(u))
mini_batch += ((reward-baseline)/batch_size)*np.dot(V_store,X_store.T)
new_baseline += reward/batch_size
K += step_size*mini_batch
K = np.minimum(np.maximum(K,-safeguard),safeguard)
baseline = new_baseline
return K
| mit |
PythonNut/servo | tests/wpt/web-platform-tests/tools/html5lib/setup.py | 418 | 1694 | from distutils.core import setup
import os
import codecs
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
]
packages = ['html5lib'] + ['html5lib.'+name
for name in os.listdir(os.path.join('html5lib'))
if os.path.isdir(os.path.join('html5lib', name)) and
not name.startswith('.') and name != 'tests']
current_dir = os.path.dirname(__file__)
with codecs.open(os.path.join(current_dir, 'README.rst'), 'r', 'utf8') as readme_file:
with codecs.open(os.path.join(current_dir, 'CHANGES.rst'), 'r', 'utf8') as changes_file:
long_description = readme_file.read() + '\n' + changes_file.read()
setup(name='html5lib',
version='0.9999-dev',
url='https://github.com/html5lib/html5lib-python',
license="MIT License",
description='HTML parser based on the WHATWG HTML specifcation',
long_description=long_description,
classifiers=classifiers,
maintainer='James Graham',
maintainer_email='james@hoppipolla.co.uk',
packages=packages,
install_requires=[
'six',
],
)
| mpl-2.0 |
blueboxgroup/horizon | openstack_dashboard/dashboards/admin/volumes/volume_types/views.py | 33 | 9790 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Admin views for managing volumes.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.volumes.volume_types \
import forms as volume_types_forms
from openstack_dashboard.dashboards.admin.volumes.volumes \
import forms as volumes_forms
class CreateVolumeTypeView(forms.ModalFormView):
form_class = volumes_forms.CreateVolumeType
modal_header = _("Create Volume Type")
modal_id = "create_volume_type_modal"
template_name = 'admin/volumes/volume_types/create_volume_type.html'
submit_label = _("Create Volume Type")
submit_url = reverse_lazy("horizon:admin:volumes:volume_types:create_type")
success_url = 'horizon:admin:volumes:volume_types_tab'
page_title = _("Create a Volume Type")
def get_success_url(self):
return reverse(self.success_url)
class VolumeTypeEncryptionDetailView(generic.TemplateView):
template_name = ("admin/volumes/volume_types"
"/volume_encryption_type_detail.html")
page_title = _("Volume Type Encryption Details")
def get_context_data(self, **kwargs):
context = super(VolumeTypeEncryptionDetailView, self).\
get_context_data(**kwargs)
context["volume_type_encryption"] = self.get_data()
return context
@memoized.memoized_method
def get_data(self):
try:
volume_type_id = self.kwargs['volume_type_id']
self._volume_type_encryption = api.cinder.\
volume_encryption_type_get(self.request, volume_type_id)
volume_type_list = api.cinder.volume_type_list(self.request)
for volume_type in volume_type_list:
if volume_type.id == volume_type_id:
self.name = volume_type.name
self._volume_type_encryption.name = self.name
except Exception:
redirect = reverse('horizon:admin:volumes:index')
exceptions.handle(self.request,
_('Unable to retrieve volume type encryption'
' details.'),
redirect=redirect)
return None
return self._volume_type_encryption
class CreateVolumeTypeEncryptionView(forms.ModalFormView):
form_class = volume_types_forms.CreateVolumeTypeEncryption
form_id = "create_volume_form"
modal_header = _("Create Volume Type Encryption")
modal_id = "create_volume_type_modal"
template_name = ("admin/volumes/volume_types/"
"create_volume_type_encryption.html")
submit_label = _("Create Volume Type Encryption")
submit_url = "horizon:admin:volumes:volume_types:create_type_encryption"
success_url = reverse_lazy('horizon:admin:volumes:index')
page_title = _("Create an Encrypted Volume Type")
@memoized.memoized_method
def get_name(self):
try:
volume_type_list = api.cinder.volume_type_list(self.request)
for volume_type in volume_type_list:
if volume_type.id == self.kwargs['volume_type_id']:
self.name = volume_type.name
except Exception:
msg = _('Unable to retrieve volume type name.')
url = reverse('horizon:admin:volumes:index')
exceptions.handle(self.request, msg, redirect=url)
return self.name
def get_context_data(self, **kwargs):
context = super(CreateVolumeTypeEncryptionView, self).\
get_context_data(**kwargs)
context['volume_type_id'] = self.kwargs['volume_type_id']
args = (self.kwargs['volume_type_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
name = self.get_name()
return {'name': name,
'volume_type_id': self.kwargs['volume_type_id']}
class CreateQosSpecView(forms.ModalFormView):
form_class = volumes_forms.CreateQosSpec
modal_header = _("Create QoS Spec")
modal_id = "create_volume_type_modal"
template_name = 'admin/volumes/volume_types/create_qos_spec.html'
success_url = 'horizon:admin:volumes:volume_types_tab'
page_title = _("Create a QoS Spec")
submit_label = _("Create")
submit_url = reverse_lazy(
"horizon:admin:volumes:volume_types:create_qos_spec")
def get_success_url(self):
return reverse(self.success_url)
class EditQosSpecConsumerView(forms.ModalFormView):
form_class = volume_types_forms.EditQosSpecConsumer
modal_header = _("Edit Consumer of QoS Spec")
modal_id = "edit_qos_spec_modal"
template_name = 'admin/volumes/volume_types/edit_qos_spec_consumer.html'
submit_label = _("Modify Consumer")
submit_url = "horizon:admin:volumes:volume_types:edit_qos_spec_consumer"
success_url = 'horizon:admin:volumes:volume_types_tab'
page_title = _("Edit QoS Spec Consumer")
def get_success_url(self):
return reverse(self.success_url)
def get_context_data(self, **kwargs):
context = super(EditQosSpecConsumerView, self).\
get_context_data(**kwargs)
context['qos_spec_id'] = self.kwargs["qos_spec_id"]
args = (self.kwargs['qos_spec_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_object(self, *args, **kwargs):
qos_spec_id = self.kwargs['qos_spec_id']
try:
self._object = api.cinder.qos_spec_get(self.request, qos_spec_id)
except Exception:
msg = _('Unable to retrieve QoS Spec details.')
exceptions.handle(self.request, msg)
return self._object
def get_initial(self):
qos_spec = self.get_object()
qos_spec_id = self.kwargs['qos_spec_id']
return {'qos_spec_id': qos_spec_id,
'qos_spec': qos_spec}
class ManageQosSpecAssociationView(forms.ModalFormView):
form_class = volume_types_forms.ManageQosSpecAssociation
modal_header = _("Associate QoS Spec with Volume Type")
modal_id = "associate_qos_spec_modal"
template_name = 'admin/volumes/volume_types/associate_qos_spec.html'
submit_label = _("Associate")
submit_url = "horizon:admin:volumes:volume_types:"\
"manage_qos_spec_association"
success_url = 'horizon:admin:volumes:volume_types_tab'
page_title = _("Associate QoS Spec with Volume Type")
def get_success_url(self):
return reverse(self.success_url)
def get_context_data(self, **kwargs):
context = super(ManageQosSpecAssociationView, self).\
get_context_data(**kwargs)
context['type_id'] = self.kwargs["type_id"]
args = (self.kwargs['type_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_object(self, *args, **kwargs):
type_id = self.kwargs['type_id']
try:
self._object = api.cinder.volume_type_get(self.request, type_id)
except Exception:
msg = _('Unable to retrieve volume type details.')
exceptions.handle(self.request, msg)
return self._object
@memoized.memoized_method
def get_qos_specs(self, *args, **kwargs):
try:
return api.cinder.qos_spec_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve QoS Specs.'))
def find_current_qos_spec_association(self, vol_type_id):
qos_specs = self.get_qos_specs()
if qos_specs:
try:
# find out which QOS Spec is currently associated with this
# volume type, if any
# NOTE - volume type can only have ONE QOS Spec association
for qos_spec in qos_specs:
type_ids = \
api.cinder.qos_spec_get_associations(self.request,
qos_spec.id)
for vtype in type_ids:
if vtype.id == vol_type_id:
return qos_spec
except Exception:
exceptions.handle(
self.request,
_('Unable to retrieve QoS Spec association.'))
return None
def get_initial(self):
volume_type = self.get_object()
vol_type_id = self.kwargs['type_id']
cur_qos_spec_id = None
cur_qos_spec_name = None
qos_spec = self.find_current_qos_spec_association(vol_type_id)
if qos_spec:
cur_qos_spec_id = qos_spec.id
cur_qos_spec_name = qos_spec.name
return {'type_id': vol_type_id,
'name': getattr(volume_type, 'name', None),
'cur_qos_spec_id': cur_qos_spec_id,
'cur_qos_spec_name': cur_qos_spec_name,
'qos_specs': self.get_qos_specs()}
| apache-2.0 |
entropia/no-bot-is-perfect | nbip_server/nbip/models.py | 1 | 20182 | # coding=utf-8
import random
import re
from django.db import models, transaction
from django.db.models import Count, Q
from django.contrib.auth.models import User
from django.conf import settings
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
import django.utils.http
# Enumeration type
class Guess(int):
def __init__(self, value):
if value is None:
self=None
else:
value=int(value)
assert 0 <= value < 3, "value %s out of range" % value
self=value
# Breaks the admin form
#def __unicode__(self):
# return {
# 0: 'Richtig',
# 1: 'Mensch',
# 2: 'Computer',
# }[int(self)]
# Constants
CORRECT = Guess(0)
HUMAN = Guess(1)
COMPUTER = Guess(2)
# Custom fields
class GuessField(models.PositiveSmallIntegerField):
description = "Choice: Correct, Human or Bot"
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
kwargs['choices'] = [
(CORRECT, 'Richtig')
, (HUMAN, 'Mensch')
, (COMPUTER, 'Computer')
]
kwargs['null'] = True
super(GuessField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value is None:
return None
if isinstance(value, Guess):
return value
return Guess(value)
def formfield(self, **kwargs):
defaults = {'coerce': lambda x: Guess(x)}
defaults.update(kwargs)
return super(GuessField, self).formfield(**defaults)
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^nbip\.models\.GuessField"])
# Exceptions
class NotEnoughWordsException(Exception):
pass
class NotEnoughExplanationsException(Exception):
def missing_human_explanations(self):
return self.args[0]
def missing_bot_explanations(self):
return self.args[1]
# Models
class Bot(models.Model):
class Meta:
verbose_name = "Bot"
verbose_name_plural = u"Bots"
owner = models.ForeignKey(User, verbose_name="Benutzer", related_name = "bots")
name = models.CharField(max_length=200, verbose_name="Bot-Name")
apikey = models.CharField(max_length=100, unique=True, verbose_name="API-Key")
explaining = models.ForeignKey('Word', verbose_name="Erklärt gerade", null=True)
def __unicode__(self):
return "%s by %s" % (self.name, self.owner)
def word_to_explain(self):
if not self.explaining:
word = Word.random_for_bot(bot = self)
self.explaining = word
self.save()
return self.explaining
@transaction.atomic
def explain_word(self, expl):
expl = Explanation(
word = self.explaining,
explanation = expl,
bot = self);
expl.save()
self.explaining = None
self.save()
class Word(models.Model):
class Meta:
verbose_name = "Wort"
verbose_name_plural = u"Wörter"
lemma = models.CharField(max_length=200)
correct_explanation = models.CharField(max_length=1000,
verbose_name = "Korrekte Erklärung",
help_text= u"<i>Wort</i> ist ein/eine <i>Erklärung</i>")
reference = models.URLField(blank=True,
verbose_name = "Referenz",
help_text = u"URL zu Wikipedia o.ä.")
created = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User, verbose_name="Autor", related_name="submitted_words")
n_human_explanations = models.PositiveIntegerField(
verbose_name = "Anzahl menschliche Erklärungen",
default = 0)
n_bot_explanations = models.PositiveIntegerField(
verbose_name = "Anzahl Computer-Erklärungen",
default = 0)
# query components (Q objects)
@classmethod
def q_owner(cls, player):
return Q(author__exact = player.id)
@classmethod
def q_explained(cls, player):
explained = Explanation.objects.filter(author__exact = player).values("word")
return Q(id__in = explained)
@classmethod
def q_bot_explained(cls, bot):
explained = Explanation.objects.filter(bot__exact = bot).values("word")
return Q(id__in = explained)
@classmethod
def q_guessed(cls, player):
guessed = GameRound.objects.filter(player__exact = player).values("word")
return Q(id__in = guessed)
@classmethod
def q_unseen(cls, player):
return Q(~cls.q_owner(player) & ~cls.q_explained(player) & ~cls.q_guessed(player))
@classmethod
def q_answer_unseen(cls, player):
return Q(~cls.q_owner(player) & ~cls.q_guessed(player))
@classmethod
def q_possibly_complete(cls):
return Q(n_human_explanations__gte = settings.HUMAN_EXPLANATIONS) & \
Q(n_bot_explanations__gte = settings.BOT_EXPLANATIONS)
@classmethod
def random(cls, player):
''' Fetch a random to be explained '''
words = cls.objects \
.filter(cls.q_unseen(player))
if len(words) < 1:
raise NotEnoughWordsException()
needy_words = words \
.filter(n_human_explanations__lte = settings.HUMAN_EXPLANATIONS) \
.order_by('-n_human_explanations','-n_bot_explanations')
# If there are words with insufficient answers, return the one that is closest
# to having sufficient
if needy_words:
return needy_words.first()
# Otherwise return a random word
return random.choice(words)
@classmethod
def random_for_bot(cls, bot):
''' Fetch a random to be explained by a bot '''
words = cls.objects \
.exclude(cls.q_bot_explained(bot))
if len(words) < 1:
raise NotEnoughWordsException()
needy_words = words \
.filter(n_bot_explanations__lte = settings.BOT_EXPLANATIONS) \
.order_by('-n_bot_explanations','-n_human_explanations')
# If there are words with insufficient answers, return the one that is closest
# to having sufficient
if needy_words:
return needy_words.first()
# Otherwise return a random word
return random.choice(words)
def usable_for(self, player):
usable_human_explanations = \
self.explanation_set \
.filter(author__isnull = False) \
.exclude(author = player) \
.count()
usable_bot_explanations = \
self.explanation_set \
.filter(bot__isnull = False) \
.exclude(bot__in = player.bots.all()) \
.count()
return (usable_human_explanations >= settings.HUMAN_EXPLANATIONS and
usable_bot_explanations >= settings.BOT_EXPLANATIONS)
@classmethod
# similar to random, but only consider words with enough explanations
def random_explained(cls, player):
candidates = cls.objects \
.filter(cls.q_answer_unseen(player))
# This could possibly be rewritten with SQL, but seems to be tricky
words = filter(lambda w: w.usable_for(player),
candidates.filter(cls.q_possibly_complete()))
# fetches everything; be smarter if required
if not words:
best = candidates \
.order_by('-n_human_explanations', '-n_bot_explanations') \
.first()
if best:
# This is only an approximation, if the current user has explained that
# word already.
raise NotEnoughExplanationsException(
settings.HUMAN_EXPLANATIONS - best.n_human_explanations,
settings.BOT_EXPLANATIONS - best.n_bot_explanations
)
else:
raise NotEnoughWordsException()
return random.choice(words)
def update_cached_fields(self):
self.n_human_explanations = \
Explanation.objects.filter(author__isnull = False, word__exact = self.id).count()
self.n_bot_explanations = \
Explanation.objects.filter(bot__isnull = False, word__exact = self.id).count()
self.save()
def clean_an_explanation(self, e):
r = r"(eine?)? ?(" + re.escape(self.lemma) + \
r")? ?(ist)? ?(eine?) ?"
m = re.match(r, e, re.IGNORECASE)
if m:
return e[m.end(0):]
else:
return e
def clean_explanation(self):
return self.clean_an_explanation(self.correct_explanation)
def link(self):
if self.reference:
return self.reference
else:
return "https://duckduckgo.com/?q=%s" % django.utils.http.urlquote(self.lemma)
def __unicode__(self):
return self.lemma
class Explanation(models.Model):
class Meta:
verbose_name = u"Erklärung"
verbose_name_plural = u"Erklärungen"
word = models.ForeignKey(Word, verbose_name="Wort")
explanation = models.CharField(max_length=1000,
verbose_name= u"Erklärung",
help_text= u"<i>Wort</i> ist ein/eine <i>Erklärung</i>")
# exactly one of these should be not null
author = models.ForeignKey(User, verbose_name="Autor", blank=True, null=True, related_name="submitted_explanations")
bot = models.ForeignKey(Bot, verbose_name="Bot", blank=True, null=True, related_name="submitted_explanations")
def clean(self):
if self.author is None and self.bot is None:
raise ValidationError('Autor oder Bot müssen gesetzt sein.')
if self.author is not None and self.bot is not None:
raise ValidationError('Autor und Bot dürfen nicht beide gesetzt sein.')
def clean_explanation(self):
return self.word.clean_an_explanation(self.explanation)
def type(self):
'''HUMAN or COMPUTER'''
if self.author is not None:
return HUMAN
else:
return COMPUTER
def author_name(self):
if self.author is not None:
return self.author
else:
return u"„%s“ by %s" % (self.bot.name, self.bot.owner)
def __unicode__(self):
return "%s ist ein/eine %s" % (self.word.lemma, self.explanation)
# Keep Word.n_*_explanations up-to-date
@receiver(post_save, sender=Explanation)
@receiver(post_delete, sender=Explanation)
def update_word(sender, instance, **kwargs):
instance.word.update_cached_fields()
class GameRound(models.Model):
class Meta:
verbose_name = u"Spielrunde"
verbose_name_plural = u"Spielrunde"
word = models.ForeignKey(Word, verbose_name="Wort")
explanations = models.ManyToManyField(Explanation, related_name='explanation+', through='GameRoundEntry')
pos = models.PositiveSmallIntegerField()
# What the user guessed for the correct result
guess = GuessField()
player = models.ForeignKey(User, verbose_name="Spieler", related_name="gamerounds")
def __unicode__(self):
return "%s (%d)" % (self.word.lemma, self.id)
@classmethod
@transaction.atomic
def start_new_round(cls, player):
# pick a valid word where the user has not seen the answer before
word = Word.random_explained(player=player)
# fetch all possible explanations
# (excludes those by the current player, or his bots)
human_expls = word.explanation_set \
.filter(author__isnull = False) \
.exclude(author = player)
bot_expls = word.explanation_set.filter(bot__isnull = False) \
.exclude(bot__in = player.bots.all())
assert len(human_expls) >= settings.HUMAN_EXPLANATIONS, \
"n_human_explanations was not up to date? Bug in Word.useable_for()?"
assert len(bot_expls) >= settings.BOT_EXPLANATIONS, \
"n_bot_explanations was not up to date? Bug in Word.useable_for()?"
expl = random.sample(human_expls, settings.HUMAN_EXPLANATIONS) + \
random.sample(bot_expls, settings.BOT_EXPLANATIONS)
poss = range(1 + len(expl))
random.shuffle(poss)
round = GameRound(
word = word,
guess = None,
pos = poss.pop(),
player = player,
)
round.save()
for e in expl:
GameRoundEntry(
gameround = round,
explanation = e,
pos = poss.pop(),
guess = None,
).save()
return round
def get_explanations(self):
entries = self.entries.select_related('explanation', 'explanation__author', 'explanation__word','explanation__bot','explanation__bot__owner').all()
expls = [None] * (1 + len(entries))
expls[self.pos] = {
'text': self.word.clean_explanation(),
'author' : self.word.author,
'guess': self.guess,
'actual': CORRECT,
}
for e in entries:
expls[e.pos] = {
'text': e.explanation.clean_explanation(),
'guess': e.guess,
'author': e.explanation.author_name(),
'actual': e.explanation.type(),
}
return expls
def get_counts(self):
counts = {
CORRECT: 1,
HUMAN: 0,
COMPUTER: 0,
}
for e in GameRoundEntry.objects.filter(gameround=self):
counts[e.explanation.type()] += 1
return counts
@transaction.atomic
def set_guesses(self, guesses):
# TODO: These assertions should be enforced by the client code (JS)
assert self.guess is None
entries = GameRoundEntry.objects.filter(gameround=self)
required = [CORRECT] + [e.explanation.type() for e in entries]
assert len(guesses) == len(required), \
"Wrong number of answers"
assert guesses.count(CORRECT) == required.count(CORRECT), \
"Wrong number of answers 'correct'"
assert guesses.count(HUMAN) == required.count(HUMAN), \
"Wrong number of answers 'human'"
assert guesses.count(COMPUTER) == required.count(COMPUTER), \
"Wrong number of answers 'computer'"
self.guess = guesses[self.pos]
for e in entries:
assert e.pos < len(guesses)
e.guess = guesses[e.pos]
e.save()
self.save()
class GameRoundEntry(models.Model):
class Meta:
verbose_name = u"Spielrunden-Erkärung"
verbose_name_plural = u"Spielrunden-Erkärung"
ordering = ['pos']
unique_together = ('gameround','explanation','pos')
gameround = models.ForeignKey(GameRound, related_name="entries")
explanation = models.ForeignKey(Explanation)
pos = models.PositiveSmallIntegerField()
guess = GuessField()
class Stats(models.Model):
user = models.OneToOneField(User, primary_key=True)
n_words = models.PositiveIntegerField(
verbose_name = "Eingereichte Wörter",
default = 0)
n_explanations = models.PositiveIntegerField(
verbose_name = "Eingereichte Erklärungen",
default = 0)
n_games = models.PositiveIntegerField(
verbose_name = "Spielrunden",
default = 0)
n_correct = models.PositiveIntegerField(
verbose_name = "Korrekt geraten",
default = 0)
n_wrong = models.PositiveIntegerField(
verbose_name = "Falsch geraten",
default = 0)
n_detected_human = models.PositiveIntegerField(
verbose_name = "Mensch erkannt",
default = 0)
n_detected_bot = models.PositiveIntegerField(
verbose_name = "Computer erkannt",
default = 0)
n_tricked = models.PositiveIntegerField(
verbose_name = "Andere reingelegt",
default = 0)
n_not_tricked = models.PositiveIntegerField(
verbose_name = "Andere nicht reingelegt",
default = 0)
def attrs(self):
for field in self._meta.fields:
if type(field) == models.PositiveIntegerField:
yield field.verbose_name, getattr(self, field.name)
def update(self):
self.n_words = \
self.user.submitted_words.count()
self.n_explanations = \
self.user.submitted_explanations.count()
self.n_games = \
self.user.gamerounds.exclude(guess__exact = None).count()
self.n_correct = \
self.user.gamerounds.exclude(guess__exact = None).filter(guess = CORRECT).count()
self.n_wrong = \
self.user.gamerounds.exclude(guess__exact = None).exclude(guess = CORRECT).count()
self.n_detected_human = \
GameRoundEntry.objects \
.filter(explanation__bot__exact = None) \
.filter(gameround__player = self.user) \
.filter(guess = HUMAN) \
.count()
self.n_detected_bot = \
GameRoundEntry.objects \
.filter(explanation__author__exact = None) \
.filter(gameround__player = self.user) \
.filter(guess = COMPUTER) \
.count()
self.n_tricked = \
GameRoundEntry.objects \
.filter(explanation__author = self.user) \
.exclude(guess__exact = None) \
.filter(guess = CORRECT) \
.count()
self.n_not_tricked = \
GameRoundEntry.objects \
.filter(explanation__author = self.user) \
.exclude(guess__exact = None) \
.exclude(guess = CORRECT) \
.count()
self.save()
class BotStats(models.Model):
bot = models.OneToOneField(Bot, primary_key=True)
n_tricked = models.PositiveIntegerField(
verbose_name = "Andere reingelegt",
default = 0)
n_not_tricked = models.PositiveIntegerField(
verbose_name = "Andere nicht reingelegt",
default = 0)
def attrs(self):
for field in self._meta.fields:
if type(field) == models.PositiveIntegerField:
yield field.verbose_name, getattr(self, field.name)
def update(self):
self.n_tricked = \
GameRoundEntry.objects \
.filter(explanation__bot = self.bot) \
.exclude(guess__exact = None) \
.exclude(guess = COMPUTER) \
.count()
self.n_not_tricked = \
GameRoundEntry.objects \
.filter(explanation__bot = self.bot) \
.exclude(guess__exact = None) \
.filter(guess = COMPUTER) \
.count()
self.save()
# Keep Stats up-to-date
@receiver(post_save, dispatch_uid="stats update")
@receiver(post_delete, dispatch_uid="stats update 2")
def update_stats(sender, instance, **kwargs):
affected_users = set()
affected_bots = set()
if type(instance) == Word:
affected_users.add(instance.author)
elif type(instance) == Explanation:
affected_users.add(instance.author)
elif type(instance) == GameRound:
affected_users.add(instance.player)
affected_users.add(instance.word.author)
for e in instance.entries.all():
if e.explanation.type() == HUMAN:
affected_users.add(e.explanation.author)
else:
affected_bots.add(e.explanation.bot)
affected_users.add(e.explanation.bot.owner)
for u in affected_users:
if u:
# Create stats object
if not(hasattr(u, 'stats')):
u.stats = Stats(user=u)
u.stats.save()
u.stats.update()
for b in affected_bots:
if b:
# Create stats object
if not(hasattr(b, 'stats')):
b.stats = BotStats(bot=b)
b.stats.save()
b.stats.update()
| mit |
aliev/dj-stripe | tests/test_middleware.py | 14 | 4537 | import datetime
import decimal
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.utils import timezone
from djstripe.models import Customer, CurrentSubscription
from djstripe.middleware import SubscriptionPaymentMiddleware
class MiddlewareURLTest(TestCase):
def setUp(self):
self.settings(ROOT_URLCONF='tests.test_urls')
self.factory = RequestFactory()
self.user = get_user_model().objects.create_user(username="pydanny",
email="pydanny@gmail.com")
self.middleware = SubscriptionPaymentMiddleware()
def test_appname(self):
request = self.factory.get("/admin/")
request.user = self.user
response = self.middleware.process_request(request)
self.assertEqual(response, None)
def test_namespace(self):
request = self.factory.get("/djstripe/")
request.user = self.user
response = self.middleware.process_request(request)
self.assertEqual(response, None)
def test_namespace_and_url(self):
request = self.factory.get("/testapp_namespaced/")
request.user = self.user
response = self.middleware.process_request(request)
self.assertEqual(response, None)
def test_url(self):
request = self.factory.get("/testapp/")
request.user = self.user
response = self.middleware.process_request(request)
self.assertEqual(response, None)
@override_settings(DEBUG=True)
def test_djdt(self):
request = self.factory.get("/__debug__/")
request.user = self.user
response = self.middleware.process_request(request)
self.assertEqual(response, None)
class MiddlewareLogicTest(TestCase):
urls = 'tests.test_urls'
def setUp(self):
period_start = datetime.datetime(2013, 4, 1, tzinfo=timezone.utc)
period_end = datetime.datetime(2013, 4, 30, tzinfo=timezone.utc)
start = datetime.datetime(2013, 1, 1, tzinfo=timezone.utc)
self.factory = RequestFactory()
self.user = get_user_model().objects.create_user(username="pydanny",
email="pydanny@gmail.com")
self.customer = Customer.objects.create(
subscriber=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx",
card_fingerprint="YYYYYYYY",
card_last_4="2342",
card_kind="Visa"
)
self.subscription = CurrentSubscription.objects.create(
customer=self.customer,
plan="test",
current_period_start=period_start,
current_period_end=period_end,
amount=(500 / decimal.Decimal("100.0")),
status="active",
start=start,
quantity=1,
cancel_at_period_end=True
)
self.middleware = SubscriptionPaymentMiddleware()
def test_anonymous(self):
request = self.factory.get("/djstripe/")
request.user = AnonymousUser()
response = self.middleware.process_request(request)
self.assertEqual(response, None)
def test_is_staff(self):
self.user.is_staff = True
self.user.save()
request = self.factory.get("/djstripe/")
request.user = self.user
response = self.middleware.process_request(request)
self.assertEqual(response, None)
def test_is_superuser(self):
self.user.is_superuser = True
self.user.save()
request = self.factory.get("/djstripe/")
request.user = self.user
response = self.middleware.process_request(request)
self.assertEqual(response, None)
def test_customer_has_inactive_subscription(self):
request = self.factory.get("/testapp_content/")
request.user = self.user
response = self.middleware.process_request(request)
self.assertEqual(response.status_code, 302)
def test_customer_has_active_subscription(self):
end_date = datetime.datetime(2100, 4, 30, tzinfo=timezone.utc)
self.subscription.current_period_end = end_date
self.subscription.save()
request = self.factory.get("/testapp_content/")
request.user = self.user
response = self.middleware.process_request(request)
self.assertEqual(response, None)
| bsd-3-clause |
charbeljc/OCB | addons/base_report_designer/wizard/__init__.py | 421 | 1081 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_report_designer_modify
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
amar-sharma/selenium | py/test/selenium/webdriver/chrome/chrome_launcher_tests.py | 12 | 1761 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import logging
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class ChromeLauncherTests (unittest.TestCase):
def testLaunchAndCloseBrowser(self):
self.webdriver = webdriver.Chrome()
self.webdriver.quit()
def test_we_can_launch_multiple_chrome_instances(self):
self.webdriver1 = webdriver.Chrome()
self.webdriver2 = webdriver.Chrome()
self.webdriver3 = webdriver.Chrome()
self.webdriver1.quit()
self.webdriver2.quit()
self.webdriver3.quit()
def test_launch_chrome_do_not_affect_default_capabilities(self):
expected = DesiredCapabilities.CHROME.copy()
self.webdriver1 = webdriver.Chrome()
actual = DesiredCapabilities.CHROME.copy()
self.webdriver1.quit()
assert actual == expected
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
unittest.main()
| apache-2.0 |
ifduyue/polysh | polysh/control_commands_helpers.py | 3 | 4066 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# See the COPYING file for license information.
#
# Copyright (c) 2006 Guillaume Chazarain <guichaz@gmail.com>
import os
from fnmatch import fnmatch
import readline
from polysh.host_syntax import expand_syntax
from polysh.console import console_output
from polysh import dispatchers
from polysh import remote_dispatcher
def toggle_shells(command, enable):
"""Enable or disable the specified shells. If the command would have
no effect, it changes all other shells to the inverse enable value."""
selection = list(selected_shells(command))
if command and command != '*' and selection:
for i in selection:
if i.state != remote_dispatcher.STATE_DEAD and i.enabled != enable:
break
else:
toggle_shells('*', not enable)
for i in selection:
if i.state != remote_dispatcher.STATE_DEAD:
i.set_enabled(enable)
def selected_shells(command):
"""Iterator over the shells with names matching the patterns.
An empty patterns matches all the shells"""
if not command or command == '*':
for i in dispatchers.all_instances():
yield i
return
selected = set()
instance_found = False
for pattern in command.split():
found = False
for expanded_pattern in expand_syntax(pattern):
for i in dispatchers.all_instances():
instance_found = True
if fnmatch(i.display_name, expanded_pattern):
found = True
if i not in selected:
selected.add(i)
yield i
if instance_found and not found:
console_output('%s not found\n' % pattern)
def complete_shells(line, text, predicate=lambda i: True):
"""Return the shell names to include in the completion"""
res = [i.display_name + ' ' for i in dispatchers.all_instances() if \
i.display_name.startswith(text) and \
predicate(i) and \
' ' + i.display_name + ' ' not in line]
return res
def expand_local_path(path):
return os.path.expanduser(os.path.expandvars(path) or '~')
def list_control_commands():
from polysh import control_commands
return [c[3:] for c in dir(control_commands) if c.startswith('do_')]
def get_control_command(name):
from polysh import control_commands
func = getattr(control_commands, 'do_' + name)
return func
def complete_control_command(line, text):
from polysh import control_commands
if readline.get_begidx() == 0:
# Completing control command name
cmds = list_control_commands()
prefix = text[1:]
matches = [':' + cmd + ' ' for cmd in cmds if cmd.startswith(prefix)]
else:
# Completing control command parameters
cmd = line.split()[0][1:]
def_compl = lambda line: []
compl_func = getattr(control_commands, 'complete_' + cmd, def_compl)
matches = compl_func(line, text)
return matches
def handle_control_command(line):
if not line:
return
cmd_name = line.split()[0]
try:
cmd_func = get_control_command(cmd_name)
except AttributeError:
console_output('Unknown control command: %s\n' % cmd_name)
else:
parameters = line[len(cmd_name) + 1:]
cmd_func(parameters)
| gpl-2.0 |
opoplawski/ansible | test/units/executor/test_task_executor.py | 25 | 11015 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.task_executor import TaskExecutor
from ansible.playbook.play_context import PlayContext
from ansible.plugins import action_loader, lookup_loader
from units.mock.loader import DictDataLoader
class TestTaskExecutor(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_task_executor_init(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
)
def test_task_executor_run(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task._role._role_path = '/path/to/role/foo'
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
)
te._get_loop_items = MagicMock(return_value=None)
te._execute = MagicMock(return_value=dict())
res = te.run()
te._get_loop_items = MagicMock(return_value=[])
res = te.run()
te._get_loop_items = MagicMock(return_value=['a','b','c'])
te._run_loop = MagicMock(return_value=[dict(item='a', changed=True), dict(item='b', failed=True), dict(item='c')])
res = te.run()
te._get_loop_items = MagicMock(side_effect=AnsibleError(""))
res = te.run()
self.assertIn("failed", res)
def test_task_executor_get_loop_items(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.loop = 'items'
mock_task.loop_args = ['a', 'b', 'c']
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
mock_shared_loader.lookup_loader = lookup_loader
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
)
items = te._get_loop_items()
self.assertEqual(items, ['a', 'b', 'c'])
def test_task_executor_run_loop(self):
items = ['a', 'b', 'c']
fake_loader = DictDataLoader({})
mock_host = MagicMock()
def _copy():
new_item = MagicMock()
return new_item
mock_task = MagicMock()
mock_task.copy.side_effect = _copy
mock_play_context = MagicMock()
mock_shared_loader = MagicMock()
new_stdin = None
job_vars = dict()
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
)
def _execute(variables):
return dict(item=variables.get('item'))
te._squash_items = MagicMock(return_value=items)
te._execute = MagicMock(side_effect=_execute)
res = te._run_loop(items)
self.assertEqual(len(res), 3)
def test_task_executor_squash_items(self):
items = ['a', 'b', 'c']
fake_loader = DictDataLoader({})
mock_host = MagicMock()
def _evaluate_conditional(templar, variables):
item = variables.get('item')
if item == 'b':
return False
return True
mock_task = MagicMock()
mock_task.evaluate_conditional.side_effect = _evaluate_conditional
mock_play_context = MagicMock()
mock_shared_loader = None
new_stdin = None
job_vars = dict(pkg_mgr='yum')
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = mock_shared_loader,
)
mock_task.action = 'foo'
new_items = te._squash_items(items=items, variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
mock_task.action = 'yum'
new_items = te._squash_items(items=items, variables=job_vars)
self.assertEqual(new_items, [['a','c']])
mock_task.action = '{{pkg_mgr}}'
new_items = te._squash_items(items=items, variables=job_vars)
self.assertEqual(new_items, [['a', 'c']])
# Smoketests -- these won't optimize but make sure that they don't
# traceback either
mock_task.action = '{{unknown}}'
new_items = te._squash_items(items=items, variables=job_vars)
self.assertEqual(new_items, ['a', 'b', 'c'])
items = [dict(name='a', state='present'),
dict(name='b', state='present'),
dict(name='c', state='present')]
mock_task.action = 'yum'
new_items = te._squash_items(items=items, variables=job_vars)
self.assertEqual(new_items, items)
def test_task_executor_execute(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.args = dict()
mock_task.retries = 0
mock_task.delay = -1
mock_task.register = 'foo'
mock_task.until = None
mock_task.changed_when = None
mock_task.failed_when = None
mock_task.post_validate.return_value = None
# mock_task.async cannot be left unset, because on Python 3 MagicMock()
# > 0 raises a TypeError There are two reasons for using the value 1
# here: on Python 2 comparing MagicMock() > 0 returns True, and the
# other reason is that if I specify 0 here, the test fails. ;)
mock_task.async = 1
mock_play_context = MagicMock()
mock_play_context.post_validate.return_value = None
mock_play_context.update_vars.return_value = None
mock_connection = MagicMock()
mock_connection.set_host_overrides.return_value = None
mock_connection._connect.return_value = None
mock_action = MagicMock()
shared_loader = None
new_stdin = None
job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = shared_loader,
)
te._get_connection = MagicMock(return_value=mock_connection)
te._get_action_handler = MagicMock(return_value=mock_action)
mock_action.run.return_value = dict(ansible_facts=dict())
res = te._execute()
mock_task.changed_when = "1 == 1"
res = te._execute()
mock_task.changed_when = None
mock_task.failed_when = "1 == 1"
res = te._execute()
mock_task.failed_when = None
mock_task.evaluate_conditional.return_value = False
res = te._execute()
mock_task.evaluate_conditional.return_value = True
mock_task.args = dict(_raw_params='foo.yml', a='foo', b='bar')
mock_task.action = 'include'
res = te._execute()
def test_task_executor_poll_async_result(self):
fake_loader = DictDataLoader({})
mock_host = MagicMock()
mock_task = MagicMock()
mock_task.async = 3
mock_task.poll = 1
mock_play_context = MagicMock()
mock_connection = MagicMock()
mock_action = MagicMock()
shared_loader = MagicMock()
shared_loader.action_loader = action_loader
new_stdin = None
job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX")
te = TaskExecutor(
host = mock_host,
task = mock_task,
job_vars = job_vars,
play_context = mock_play_context,
new_stdin = new_stdin,
loader = fake_loader,
shared_loader_obj = shared_loader,
)
te._connection = MagicMock()
def _get(*args, **kwargs):
mock_action = MagicMock()
mock_action.run.return_value = dict(stdout='')
return mock_action
# testing with some bad values in the result passed to poll async,
# and with a bad value returned from the mock action
with patch.object(action_loader, 'get', _get):
mock_templar = MagicMock()
res = te._poll_async_result(result=dict(), templar=mock_templar)
self.assertIn('failed', res)
res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar)
self.assertIn('failed', res)
def _get(*args, **kwargs):
mock_action = MagicMock()
mock_action.run.return_value = dict(finished=1)
return mock_action
# now testing with good values
with patch.object(action_loader, 'get', _get):
mock_templar = MagicMock()
res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar)
self.assertEqual(res, dict(finished=1))
| gpl-3.0 |
MattDevo/edk2 | AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/pickle2db.py | 12 | 4089 | #!/usr/bin/env python
"""
Synopsis: %(prog)s [-h|-b|-g|-r|-a|-d] [ picklefile ] dbfile
Read the given picklefile as a series of key/value pairs and write to a new
database. If the database already exists, any contents are deleted. The
optional flags indicate the type of the output database:
-a - open using anydbm
-b - open as bsddb btree file
-d - open as dbm file
-g - open as gdbm file
-h - open as bsddb hash file
-r - open as bsddb recno file
The default is hash. If a pickle file is named it is opened for read
access. If no pickle file is named, the pickle input is read from standard
input.
Note that recno databases can only contain integer keys, so you can't dump a
hash or btree database using db2pickle.py and reconstitute it to a recno
database with %(prog)s unless your keys are integers.
"""
import getopt
try:
import bsddb
except ImportError:
bsddb = None
try:
import dbm
except ImportError:
dbm = None
try:
import gdbm
except ImportError:
gdbm = None
try:
import anydbm
except ImportError:
anydbm = None
import sys
try:
import cPickle as pickle
except ImportError:
import pickle
prog = sys.argv[0]
def usage():
sys.stderr.write(__doc__ % globals())
def main(args):
try:
opts, args = getopt.getopt(args, "hbrdag",
["hash", "btree", "recno", "dbm", "anydbm",
"gdbm"])
except getopt.error:
usage()
return 1
if len(args) == 0 or len(args) > 2:
usage()
return 1
elif len(args) == 1:
pfile = sys.stdin
dbfile = args[0]
else:
try:
pfile = open(args[0], 'rb')
except IOError:
sys.stderr.write("Unable to open %s\n" % args[0])
return 1
dbfile = args[1]
dbopen = None
for opt, arg in opts:
if opt in ("-h", "--hash"):
try:
dbopen = bsddb.hashopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-b", "--btree"):
try:
dbopen = bsddb.btopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-r", "--recno"):
try:
dbopen = bsddb.rnopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-a", "--anydbm"):
try:
dbopen = anydbm.open
except AttributeError:
sys.stderr.write("anydbm module unavailable.\n")
return 1
elif opt in ("-g", "--gdbm"):
try:
dbopen = gdbm.open
except AttributeError:
sys.stderr.write("gdbm module unavailable.\n")
return 1
elif opt in ("-d", "--dbm"):
try:
dbopen = dbm.open
except AttributeError:
sys.stderr.write("dbm module unavailable.\n")
return 1
if dbopen is None:
if bsddb is None:
sys.stderr.write("bsddb module unavailable - ")
sys.stderr.write("must specify dbtype.\n")
return 1
else:
dbopen = bsddb.hashopen
try:
db = dbopen(dbfile, 'c')
except bsddb.error:
sys.stderr.write("Unable to open %s. " % dbfile)
sys.stderr.write("Check for format or version mismatch.\n")
return 1
else:
for k in db.keys():
del db[k]
while 1:
try:
(key, val) = pickle.load(pfile)
except EOFError:
break
db[key] = val
db.close()
pfile.close()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| bsd-2-clause |
mortonjt/scipy | scipy/interpolate/tests/test_fitpack.py | 100 | 14248 | from __future__ import division, print_function, absolute_import
import os
import numpy as np
from numpy.testing import (assert_equal, assert_allclose, assert_,
TestCase, assert_raises, run_module_suite, assert_almost_equal,
assert_raises, assert_array_almost_equal)
from numpy import array, asarray, pi, sin, cos, arange, dot, ravel, sqrt, round
from scipy import interpolate
from scipy.interpolate.fitpack import (splrep, splev, bisplrep, bisplev,
sproot, splprep, splint, spalde, splder, splantider, insert, dblint)
def data_file(basename):
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', basename)
def norm2(x):
return sqrt(dot(x.T,x))
def f1(x,d=0):
if d is None:
return "sin"
if x is None:
return "sin(x)"
if d % 4 == 0:
return sin(x)
if d % 4 == 1:
return cos(x)
if d % 4 == 2:
return -sin(x)
if d % 4 == 3:
return -cos(x)
def f2(x,y=0,dx=0,dy=0):
if x is None:
return "sin(x+y)"
d = dx+dy
if d % 4 == 0:
return sin(x+y)
if d % 4 == 1:
return cos(x+y)
if d % 4 == 2:
return -sin(x+y)
if d % 4 == 3:
return -cos(x+y)
def makepairs(x, y):
"""Helper function to create an array of pairs of x and y."""
# Or itertools.product (>= python 2.6)
xy = array([[a, b] for a in asarray(x) for b in asarray(y)])
return xy.T
def put(*a):
"""Produce some output if file run directly"""
import sys
if hasattr(sys.modules['__main__'], '__put_prints'):
sys.stderr.write("".join(map(str, a)) + "\n")
class TestSmokeTests(TestCase):
"""
Smoke tests (with a few asserts) for fitpack routines -- mostly
check that they are runnable
"""
def check_1(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,at=0,xb=None,xe=None):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
x1 = a+(b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes
v,v1 = f(x),f(x1)
nk = []
def err_est(k, d):
# Assume f has all derivatives < 1
h = 1.0/float(N)
tol = 5 * h**(.75*(k-d))
if s > 0:
tol += 1e5*s
return tol
for k in range(1,6):
tck = splrep(x,v,s=s,per=per,k=k,xe=xe)
if at:
t = tck[0][k:-k]
else:
t = x1
nd = []
for d in range(k+1):
tol = err_est(k, d)
err = norm2(f(t,d)-splev(t,tck,d)) / norm2(f(t,d))
assert_(err < tol, (k, d, err, tol))
nd.append((err, tol))
nk.append(nd)
put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]" % (f(None),
repr(round(xb,3)),repr(round(xe,3)),
repr(round(a,3)),repr(round(b,3))))
if at:
str = "at knots"
else:
str = "at the middle of nodes"
put(" per=%d s=%s Evaluation %s" % (per,repr(s),str))
put(" k : |f-s|^2 |f'-s'| |f''-.. |f'''-. |f''''- |f'''''")
k = 1
for l in nk:
put(' %d : ' % k)
for r in l:
put(' %.1e %.1e' % r)
put('\n')
k = k+1
def check_2(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
ia=0,ib=2*pi,dx=0.2*pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
v = f(x)
def err_est(k, d):
# Assume f has all derivatives < 1
h = 1.0/float(N)
tol = 5 * h**(.75*(k-d))
if s > 0:
tol += 1e5*s
return tol
nk = []
for k in range(1,6):
tck = splrep(x,v,s=s,per=per,k=k,xe=xe)
nk.append([splint(ia,ib,tck),spalde(dx,tck)])
put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]" % (f(None),
repr(round(xb,3)),repr(round(xe,3)),
repr(round(a,3)),repr(round(b,3))))
put(" per=%d s=%s N=%d [a, b] = [%s, %s] dx=%s" % (per,repr(s),N,repr(round(ia,3)),repr(round(ib,3)),repr(round(dx,3))))
put(" k : int(s,[a,b]) Int.Error Rel. error of s^(d)(dx) d = 0, .., k")
k = 1
for r in nk:
if r[0] < 0:
sr = '-'
else:
sr = ' '
put(" %d %s%.8f %.1e " % (k,sr,abs(r[0]),
abs(r[0]-(f(ib,-1)-f(ia,-1)))))
d = 0
for dr in r[1]:
err = abs(1-dr/f(dx,d))
tol = err_est(k, d)
assert_(err < tol, (k, d))
put(" %.1e %.1e" % (err, tol))
d = d+1
put("\n")
k = k+1
def check_3(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
ia=0,ib=2*pi,dx=0.2*pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
v = f(x)
put(" k : Roots of s(x) approx %s x in [%s,%s]:" %
(f(None),repr(round(a,3)),repr(round(b,3))))
for k in range(1,6):
tck = splrep(x, v, s=s, per=per, k=k, xe=xe)
if k == 3:
roots = sproot(tck)
assert_allclose(splev(roots, tck), 0, atol=1e-10, rtol=1e-10)
assert_allclose(roots, pi*array([1, 2, 3, 4]), rtol=1e-3)
put(' %d : %s' % (k, repr(roots.tolist())))
else:
assert_raises(ValueError, sproot, tck)
def check_4(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
ia=0,ib=2*pi,dx=0.2*pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
x1 = a + (b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes
v,v1 = f(x),f(x1)
put(" u = %s N = %d" % (repr(round(dx,3)),N))
put(" k : [x(u), %s(x(u))] Error of splprep Error of splrep " % (f(0,None)))
for k in range(1,6):
tckp,u = splprep([x,v],s=s,per=per,k=k,nest=-1)
tck = splrep(x,v,s=s,per=per,k=k)
uv = splev(dx,tckp)
err1 = abs(uv[1]-f(uv[0]))
err2 = abs(splev(uv[0],tck)-f(uv[0]))
assert_(err1 < 1e-2)
assert_(err2 < 1e-2)
put(" %d : %s %.1e %.1e" %
(k,repr([round(z,3) for z in uv]),
err1,
err2))
put("Derivatives of parametric cubic spline at u (first function):")
k = 3
tckp,u = splprep([x,v],s=s,per=per,k=k,nest=-1)
for d in range(1,k+1):
uv = splev(dx,tckp,d)
put(" %s " % (repr(uv[0])))
def check_5(self,f=f2,kx=3,ky=3,xb=0,xe=2*pi,yb=0,ye=2*pi,Nx=20,Ny=20,s=0):
x = xb+(xe-xb)*arange(Nx+1,dtype=float)/float(Nx)
y = yb+(ye-yb)*arange(Ny+1,dtype=float)/float(Ny)
xy = makepairs(x,y)
tck = bisplrep(xy[0],xy[1],f(xy[0],xy[1]),s=s,kx=kx,ky=ky)
tt = [tck[0][kx:-kx],tck[1][ky:-ky]]
t2 = makepairs(tt[0],tt[1])
v1 = bisplev(tt[0],tt[1],tck)
v2 = f2(t2[0],t2[1])
v2.shape = len(tt[0]),len(tt[1])
err = norm2(ravel(v1-v2))
assert_(err < 1e-2, err)
put(err)
def test_smoke_splrep_splev(self):
put("***************** splrep/splev")
self.check_1(s=1e-6)
self.check_1()
self.check_1(at=1)
self.check_1(per=1)
self.check_1(per=1,at=1)
self.check_1(b=1.5*pi)
self.check_1(b=1.5*pi,xe=2*pi,per=1,s=1e-1)
def test_smoke_splint_spalde(self):
put("***************** splint/spalde")
self.check_2()
self.check_2(per=1)
self.check_2(ia=0.2*pi,ib=pi)
self.check_2(ia=0.2*pi,ib=pi,N=50)
def test_smoke_sproot(self):
put("***************** sproot")
self.check_3(a=0.1,b=15)
def test_smoke_splprep_splrep_splev(self):
put("***************** splprep/splrep/splev")
self.check_4()
self.check_4(N=50)
def test_smoke_bisplrep_bisplev(self):
put("***************** bisplev")
self.check_5()
class TestSplev(TestCase):
def test_1d_shape(self):
x = [1,2,3,4,5]
y = [4,5,6,7,8]
tck = splrep(x, y)
z = splev([1], tck)
assert_equal(z.shape, (1,))
z = splev(1, tck)
assert_equal(z.shape, ())
def test_2d_shape(self):
x = [1, 2, 3, 4, 5]
y = [4, 5, 6, 7, 8]
tck = splrep(x, y)
t = np.array([[1.0, 1.5, 2.0, 2.5],
[3.0, 3.5, 4.0, 4.5]])
z = splev(t, tck)
z0 = splev(t[0], tck)
z1 = splev(t[1], tck)
assert_equal(z, np.row_stack((z0, z1)))
def test_extrapolation_modes(self):
# test extrapolation modes
# * if ext=0, return the extrapolated value.
# * if ext=1, return 0
# * if ext=2, raise a ValueError
# * if ext=3, return the boundary value.
x = [1,2,3]
y = [0,2,4]
tck = splrep(x, y, k=1)
rstl = [[-2, 6], [0, 0], None, [0, 4]]
for ext in (0, 1, 3):
assert_array_almost_equal(splev([0, 4], tck, ext=ext), rstl[ext])
assert_raises(ValueError, splev, [0, 4], tck, ext=2)
class TestSplder(object):
def __init__(self):
# non-uniform grid, just to make it sure
x = np.linspace(0, 1, 100)**3
y = np.sin(20 * x)
self.spl = splrep(x, y)
# double check that knots are non-uniform
assert_(np.diff(self.spl[0]).ptp() > 0)
def test_inverse(self):
# Check that antiderivative + derivative is identity.
for n in range(5):
spl2 = splantider(self.spl, n)
spl3 = splder(spl2, n)
assert_allclose(self.spl[0], spl3[0])
assert_allclose(self.spl[1], spl3[1])
assert_equal(self.spl[2], spl3[2])
def test_splder_vs_splev(self):
# Check derivative vs. FITPACK
for n in range(3+1):
# Also extrapolation!
xx = np.linspace(-1, 2, 2000)
if n == 3:
# ... except that FITPACK extrapolates strangely for
# order 0, so let's not check that.
xx = xx[(xx >= 0) & (xx <= 1)]
dy = splev(xx, self.spl, n)
spl2 = splder(self.spl, n)
dy2 = splev(xx, spl2)
if n == 1:
assert_allclose(dy, dy2, rtol=2e-6)
else:
assert_allclose(dy, dy2)
def test_splantider_vs_splint(self):
# Check antiderivative vs. FITPACK
spl2 = splantider(self.spl)
# no extrapolation, splint assumes function is zero outside
# range
xx = np.linspace(0, 1, 20)
for x1 in xx:
for x2 in xx:
y1 = splint(x1, x2, self.spl)
y2 = splev(x2, spl2) - splev(x1, spl2)
assert_allclose(y1, y2)
def test_order0_diff(self):
assert_raises(ValueError, splder, self.spl, 4)
def test_kink(self):
# Should refuse to differentiate splines with kinks
spl2 = insert(0.5, self.spl, m=2)
splder(spl2, 2) # Should work
assert_raises(ValueError, splder, spl2, 3)
spl2 = insert(0.5, self.spl, m=3)
splder(spl2, 1) # Should work
assert_raises(ValueError, splder, spl2, 2)
spl2 = insert(0.5, self.spl, m=4)
assert_raises(ValueError, splder, spl2, 1)
class TestBisplrep(object):
def test_overflow(self):
a = np.linspace(0, 1, 620)
b = np.linspace(0, 1, 620)
x, y = np.meshgrid(a, b)
z = np.random.rand(*x.shape)
assert_raises(OverflowError, bisplrep, x.ravel(), y.ravel(), z.ravel(), s=0)
def test_regression_1310(self):
# Regression test for gh-1310
data = np.load(data_file('bug-1310.npz'))['data']
# Shouldn't crash -- the input data triggers work array sizes
# that caused previously some data to not be aligned on
# sizeof(double) boundaries in memory, which made the Fortran
# code to crash when compiled with -O3
bisplrep(data[:,0], data[:,1], data[:,2], kx=3, ky=3, s=0,
full_output=True)
def test_dblint():
# Basic test to see it runs and gives the correct result on a trivial
# problem. Note that `dblint` is not exposed in the interpolate namespace.
x = np.linspace(0, 1)
y = np.linspace(0, 1)
xx, yy = np.meshgrid(x, y)
rect = interpolate.RectBivariateSpline(x, y, 4 * xx * yy)
tck = list(rect.tck)
tck.extend(rect.degrees)
assert_almost_equal(dblint(0, 1, 0, 1, tck), 1)
assert_almost_equal(dblint(0, 0.5, 0, 1, tck), 0.25)
assert_almost_equal(dblint(0.5, 1, 0, 1, tck), 0.75)
assert_almost_equal(dblint(-100, 100, -100, 100, tck), 1)
def test_splev_der_k():
# regression test for gh-2188: splev(x, tck, der=k) gives garbage or crashes
# for x outside of knot range
# test case from gh-2188
tck = (np.array([0., 0., 2.5, 2.5]),
np.array([-1.56679978, 2.43995873, 0., 0.]),
1)
t, c, k = tck
x = np.array([-3, 0, 2.5, 3])
# an explicit form of the linear spline
assert_allclose(splev(x, tck), c[0] + (c[1] - c[0]) * x/t[2])
assert_allclose(splev(x, tck, 1), (c[1]-c[0]) / t[2])
# now check a random spline vs splder
np.random.seed(1234)
x = np.sort(np.random.random(30))
y = np.random.random(30)
t, c, k = splrep(x, y)
x = [t[0] - 1., t[-1] + 1.]
tck2 = splder((t, c, k), k)
assert_allclose(splev(x, (t, c, k), k), splev(x, tck2))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
dkarakats/edx-platform | common/djangoapps/student/forms.py | 54 | 9305 | """
Utility functions for validating forms
"""
from django import forms
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.contrib.auth.forms import PasswordResetForm
from django.contrib.auth.hashers import UNUSABLE_PASSWORD
from django.contrib.auth.tokens import default_token_generator
from django.utils.http import int_to_base36
from django.utils.translation import ugettext_lazy as _
from django.template import loader
from django.conf import settings
from microsite_configuration import microsite
from util.password_policy_validators import (
validate_password_length,
validate_password_complexity,
validate_password_dictionary,
)
class PasswordResetFormNoActive(PasswordResetForm):
def clean_email(self):
"""
This is a literal copy from Django 1.4.5's django.contrib.auth.forms.PasswordResetForm
Except removing the requirement of active users
Validates that a user exists with the given email address.
"""
email = self.cleaned_data["email"]
#The line below contains the only change, removing is_active=True
self.users_cache = User.objects.filter(email__iexact=email)
if not len(self.users_cache):
raise forms.ValidationError(self.error_messages['unknown'])
if any((user.password == UNUSABLE_PASSWORD)
for user in self.users_cache):
raise forms.ValidationError(self.error_messages['unusable'])
return email
def save(
self,
domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False,
token_generator=default_token_generator,
from_email=settings.DEFAULT_FROM_EMAIL,
request=None
):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
# This import is here because we are copying and modifying the .save from Django 1.4.5's
# django.contrib.auth.forms.PasswordResetForm directly, which has this import in this place.
from django.core.mail import send_mail
for user in self.users_cache:
if not domain_override:
site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
else:
site_name = domain_override
context = {
'email': user.email,
'site_name': site_name,
'uid': int_to_base36(user.id),
'user': user,
'token': token_generator.make_token(user),
'protocol': 'https' if use_https else 'http',
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME)
}
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = subject.replace('\n', '')
email = loader.render_to_string(email_template_name, context)
send_mail(subject, email, from_email, [user.email])
class TrueField(forms.BooleanField):
"""
A boolean field that only accepts "true" (case-insensitive) as true
"""
def to_python(self, value):
# CheckboxInput converts string to bool by case-insensitive match to "true" or "false"
if value is True:
return value
else:
return None
_USERNAME_TOO_SHORT_MSG = _("Username must be minimum of two characters long")
_EMAIL_INVALID_MSG = _("A properly formatted e-mail is required")
_PASSWORD_INVALID_MSG = _("A valid password is required")
_NAME_TOO_SHORT_MSG = _("Your legal name must be a minimum of two characters long")
class AccountCreationForm(forms.Form):
"""
A form to for account creation data. It is currently only used for
validation, not rendering.
"""
# TODO: Resolve repetition
username = forms.SlugField(
min_length=2,
max_length=30,
error_messages={
"required": _USERNAME_TOO_SHORT_MSG,
"invalid": _("Username should only consist of A-Z and 0-9, with no spaces."),
"min_length": _USERNAME_TOO_SHORT_MSG,
"max_length": _("Username cannot be more than %(limit_value)s characters long"),
}
)
email = forms.EmailField(
max_length=75, # Limit per RFCs is 254, but User's email field in django 1.4 only takes 75
error_messages={
"required": _EMAIL_INVALID_MSG,
"invalid": _EMAIL_INVALID_MSG,
"max_length": _("Email cannot be more than %(limit_value)s characters long"),
}
)
password = forms.CharField(
min_length=2,
error_messages={
"required": _PASSWORD_INVALID_MSG,
"min_length": _PASSWORD_INVALID_MSG,
}
)
name = forms.CharField(
min_length=2,
error_messages={
"required": _NAME_TOO_SHORT_MSG,
"min_length": _NAME_TOO_SHORT_MSG,
}
)
def __init__(
self,
data=None,
extra_fields=None,
extended_profile_fields=None,
enforce_username_neq_password=False,
enforce_password_policy=False,
tos_required=True
):
super(AccountCreationForm, self).__init__(data)
extra_fields = extra_fields or {}
self.extended_profile_fields = extended_profile_fields or {}
self.enforce_username_neq_password = enforce_username_neq_password
self.enforce_password_policy = enforce_password_policy
if tos_required:
self.fields["terms_of_service"] = TrueField(
error_messages={"required": _("You must accept the terms of service.")}
)
# TODO: These messages don't say anything about minimum length
error_message_dict = {
"level_of_education": _("A level of education is required"),
"gender": _("Your gender is required"),
"year_of_birth": _("Your year of birth is required"),
"mailing_address": _("Your mailing address is required"),
"goals": _("A description of your goals is required"),
"city": _("A city is required"),
"country": _("A country is required")
}
for field_name, field_value in extra_fields.items():
if field_name not in self.fields:
if field_name == "honor_code":
if field_value == "required":
self.fields[field_name] = TrueField(
error_messages={
"required": _("To enroll, you must follow the honor code.")
}
)
else:
required = field_value == "required"
min_length = 1 if field_name in ("gender", "level_of_education") else 2
error_message = error_message_dict.get(
field_name,
_("You are missing one or more required fields")
)
self.fields[field_name] = forms.CharField(
required=required,
min_length=min_length,
error_messages={
"required": error_message,
"min_length": error_message,
}
)
for field in self.extended_profile_fields:
if field not in self.fields:
self.fields[field] = forms.CharField(required=False)
def clean_password(self):
"""Enforce password policies (if applicable)"""
password = self.cleaned_data["password"]
if (
self.enforce_username_neq_password and
"username" in self.cleaned_data and
self.cleaned_data["username"] == password
):
raise ValidationError(_("Username and password fields cannot match"))
if self.enforce_password_policy:
try:
validate_password_length(password)
validate_password_complexity(password)
validate_password_dictionary(password)
except ValidationError, err:
raise ValidationError(_("Password: ") + "; ".join(err.messages))
return password
def clean_year_of_birth(self):
"""
Parse year_of_birth to an integer, but just use None instead of raising
an error if it is malformed
"""
try:
year_str = self.cleaned_data["year_of_birth"]
return int(year_str) if year_str is not None else None
except ValueError:
return None
@property
def cleaned_extended_profile(self):
"""
Return a dictionary containing the extended_profile_fields and values
"""
return {
key: value
for key, value in self.cleaned_data.items()
if key in self.extended_profile_fields and value is not None
}
| agpl-3.0 |
aemal/westcat | api/rest/filters.py | 2 | 11519 | ###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
"""
AmCAT-specific adaptations to rest_framework filters
(using django_filters)
activated by settings.REST_FRAMEWORK['FILTER_BACKEND']
"""
from api.rest import count
from rest_framework import filters
from django_filters import filterset
from django_filters.filters import Filter
from django_filters.filters import NumberFilter
from django.db import models
from django import forms
# Monkey patch filterset for autofield - no idea why it's not in that list
filterset.FILTER_FOR_DBFIELD_DEFAULTS[models.AutoField] = dict(filter_class=NumberFilter)
from django.forms import ValidationError
import logging; log = logging.getLogger(__name__)
ORDER_BY_FIELD = "order_by"
class InFilter(filterset.ModelMultipleChoiceFilter):
"""Filter for {'pk':[1,2,3]} / pk=1&pk=2 queries"""
def filter(self, qs, value):
if not value: return qs
values = [obj.id for obj in value]
return qs.filter(**{'%s__in' % (self.name): value})
class AmCATFilterSet(filterset.FilterSet):
"""
- Allow descending / ascending order
- Allow filtering on pk and ordering by default
"""
pk = InFilter(name='id', queryset=None)
# This overrides the default FilterSet value
order_by_field = ORDER_BY_FIELD
def __init__(self, *args, **kargs):
super(AmCATFilterSet, self).__init__(*args, **kargs)
self.filters["pk"].field.queryset = self.queryset
def __len__(self):
"""Default implementation does len(self.qs) which runs the whole query..."""
return count.count(self.qs)
def get_ordering_field(self):
"""
- Add descending order (-field) to the list of valid choices
- Make MultipleChoiceField to allow for ordering on more than one field
(requires changing the order_by behaviour in qs below)
"""
field = super(AmCATFilterSet, self).get_ordering_field()
if field:
choices = field.choices + [("-"+key, label + " (Desc)") for (key, label) in field.choices]
return forms.MultipleChoiceField(label=field.label, required=field.required, choices=choices)
return field
@property
def qs(self):
# 'Monkey patch' to allow order by to take muliple values from MultipleChoiceField
# function is copy/paste from filterset.py except for import, order_by, and silent validation exceptions
from django.utils import six
if not hasattr(self, '_qs'):
qs = self.queryset.all()
for name, filter_ in six.iteritems(self.filters):
try:
if self.is_bound:
data = self.form[name].data
else:
data = self.form.initial.get(
name, self.form[name].field.initial)
# change
val = None if data == 'null' else self.form.fields[name].clean(data)
# end of change
qs = filter_.filter(qs, val)
except forms.ValidationError:
raise# was: pass
if self._meta.order_by:
try:
order_field = self.form.fields[self.order_by_field]
data = self.form[self.order_by_field].data
# changes
if data and data != ['']:
value = order_field.clean(data)
if value:
qs = qs.order_by(*value)
# end of changes
except forms.ValidationError:
raise# was:pass
self._qs = qs
return self._qs
class AmCATFilterBackend(filters.DjangoFilterBackend):
default_filter_set = AmCATFilterSet
def get_filter_class(self, view, *args, **kargs):
filter_class = super(AmCATFilterBackend, self).get_filter_class(view, *args, **kargs)
filter_class._meta.order_by = True
return filter_class
###########################################################################
# U N I T T E S T S #
###########################################################################
from amcat.tools import amcattest
from api.rest.apitestcase import ApiTestCase
class TestFilters(ApiTestCase):
def _get_ids(self, resource, rtype=set, **filters):
result = self.get(resource, **filters)
return rtype(row['id'] for row in result['results'])
def test_uniqueness(self):
from amcat.models import ArticleSet
from api.rest.resources import ArticleResource
a1 = amcattest.create_test_article()
as1 = ArticleSet.objects.create(name="foo", project=a1.project)
as2 = ArticleSet.objects.create(name="bar", project=a1.project)
as1.add(a1)
as2.add(a1)
arts = self._get_ids(ArticleResource, list, articlesets_set__id=[as1.id, as2.id])
self.assertEquals(1, len(arts))
def test_order_by(self):
from api.rest.resources import ProjectResource
p = amcattest.create_test_project(name="a", active=True)
p2 = amcattest.create_test_project(name="b", active=True)
p3 = amcattest.create_test_project(name="c", active=False)
# Ascending order
res = self.get(ProjectResource, order_by="name")
self.assertEqual([p["name"] for p in res['results']], ["a", "b", "c"])
# Descending order
res = self.get(ProjectResource, order_by="-name")
self.assertEqual([p["name"] for p in res['results']], ["c", "b", "a"])
# Multiple order by
res = self.get(ProjectResource, order_by=["active", "name"])
self.assertEqual([p["name"] for p in res['results']], ["c", "a", "b"])
res = self.get(ProjectResource, order_by=["active", "-name"])
self.assertEqual([p["name"] for p in res['results']], ["c", "b", "a"])
def test_filter(self):
from amcat.models import Role
from api.rest.resources import ProjectResource
r = Role.objects.get(label='admin', projectlevel=True)
p = amcattest.create_test_project(name="test")
p2 = amcattest.create_test_project(name="not a test", guest_role=r)
p3 = amcattest.create_test_project(name="anothertest")
# no filter
self.assertEqual(self._get_ids(ProjectResource), {p.id, p2.id, p3.id})
# Filter on simple fields: id, pk, and name
self.assertEqual(self._get_ids(ProjectResource, id=p2.id), {p2.id})
self.assertEqual(self._get_ids(ProjectResource, name=p.name), {p.id})
self.assertEqual(self._get_ids(ProjectResource, pk=p.id), {p.id})
# Filter on directly related fields
self.assertEqual(self._get_ids(ProjectResource, guest_role__id=r.id), {p2.id})
# Filter on 1-to-many field
#aset = amcattest.create_test_set(project=p)
#self.assertEqual(self._get_ids(ProjectResource, articlesets_set__id=aset.id), {p.id})
# Filter on more n-on-m field: project roles
u = amcattest.create_test_user()
self.assertEqual(self._get_ids(ProjectResource, projectrole__user__id=u.id), set())
from amcat.models import ProjectRole
ProjectRole.objects.create(project=p3, user=u, role=r)
self.assertEqual(self._get_ids(ProjectResource, projectrole__user__id=u.id), {p3.id})
# Filter on multiple values of same key. Expect them to be OR'ed.
#self.assertEqual(self._get_ids(ProjectResource, id=[p.id, p2.id]), {p2.id, p.id})
self.assertEqual(self._get_ids(ProjectResource, pk=[p.id, p2.id]), {p2.id, p.id})
def test_filter_articlemeta(self):
# Filter on date ranges and make sure normal filters still work
p = amcattest.create_test_project(name="test")
a1 = amcattest.create_test_article(project=p, date="2012-01-01")
a2 = amcattest.create_test_article(project=p, date="2012-02-01")
a3 = amcattest.create_test_article(project=p, date="2012-03-01")
from api.rest.resources import ArticleMetaResource
# filter on article set
s = amcattest.create_test_set(articles=[a1, a2])
self.assertEqual(self._get_ids(ArticleMetaResource, articleset=s.id), {a1.id, a2.id})
# filter on dates
self.assertEqual(self._get_ids(ArticleMetaResource, project=p.id), {a1.id, a2.id, a3.id})
self.assertEqual(self._get_ids(ArticleMetaResource, project=p.id, date='2012-01-01'), {a1.id})
self.assertEqual(self._get_ids(ArticleMetaResource, project=p.id, date_from='2012-01-15'), {a2.id, a3.id})
self.assertEqual(self._get_ids(ArticleMetaResource, project=p.id, date_to='2012-01-15'), {a1.id})
# Filter on multiple pk values
#self.assertEqual(self._get_ids(ArticleMetaResource, pk_in=",".join(map(str, [a1.id, a2.id]))), {a1.id, a2.id})
self.assertEqual(self._get_ids(ArticleMetaResource, pk=[a1.id, a2.id]), {a1.id, a2.id})
def _assertEqualIDs(self, resource, ids, **filters):
self.assertEqual(self._get_ids(resource, **filters), ids)
def test_datatables_echo(self):
from api.rest.resources import ProjectResource
res = self.get(ProjectResource, datatables_options='{"sEcho":"3"}')
self.assertEqual(res['echo'], "3")
def todo_test_datatables_search(self):
"""
Not yet implemented.
"""
from api.rest.resources import ProjectResource
p = amcattest.create_test_project(name="test")
p2 = amcattest.create_test_project(name="not a test")
self._assertEqualIDs(
ProjectResource, {p2.id},
datatables_options='{"sSearch":"not"}'
)
# Test totals
res = self.get(ProjectResource, datatables_options='{}')
self.assertEqual(res['total'], 2)
self.assertEqual(res['subtotal'], 2)
res = self.get(ProjectResource, datatables_options='{"sSearch":"not"}')
self.assertEqual(res['total'], 2)
self.assertEqual(res['subtotal'], 1)
| agpl-3.0 |
zxjzxj9/FlaskBoard | web/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/markers.py | 72 | 7573 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from pkg_resources.extern.pyparsing import Literal as L # noqa
from ._compat import string_types
from .specifiers import Specifier, InvalidSpecifier
__all__ = [
"InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
"Marker", "default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
class Variable(Node):
pass
class Value(Node):
pass
VARIABLE = (
L("implementation_version") |
L("platform_python_implementation") |
L("implementation_name") |
L("python_full_version") |
L("platform_release") |
L("platform_version") |
L("platform_machine") |
L("platform_system") |
L("python_version") |
L("sys_platform") |
L("os_name") |
L("os.name") | # PEP-345
L("sys.platform") | # PEP-345
L("platform.version") | # PEP-345
L("platform.machine") | # PEP-345
L("platform.python_implementation") | # PEP-345
L("extra")
)
VARIABLE.setParseAction(lambda s, l, t: Variable(t[0].replace('.', '_')))
VERSION_CMP = (
L("===") |
L("==") |
L(">=") |
L("<=") |
L("!=") |
L("~=") |
L(">") |
L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (isinstance(marker, list) and len(marker) == 1 and
isinstance(marker[0], (list, tuple))):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return '{0} {1} "{2}"'.format(*marker)
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs, op, rhs):
try:
spec = Specifier("".join([op, rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op)
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
_undefined = object()
def _get_env(environment, name):
value = environment.get(name, _undefined)
if value is _undefined:
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
groups = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
version = '{0.major}.{0.minor}.{0.micro}'.format(info)
kind = info.releaselevel
if kind != 'final':
version += kind[0] + str(info.serial)
return version
def default_environment():
if hasattr(sys, 'implementation'):
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
else:
iver = '0'
implementation_name = ''
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": platform.python_version()[:3],
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc:e.loc + 8])
raise InvalidMarker(err_str)
def __str__(self):
return _format_marker(self._markers)
def __repr__(self):
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
| apache-2.0 |
pompiduskus/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
mgogoulos/libcloud | libcloud/test/compute/test_vcloud.py | 5 | 36861 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import httplib, b
from libcloud.compute.drivers.vcloud import TerremarkDriver, VCloudNodeDriver, Subject
from libcloud.compute.drivers.vcloud import VCloud_1_5_NodeDriver, ControlAccess
from libcloud.compute.drivers.vcloud import VCloud_5_1_NodeDriver
from libcloud.compute.drivers.vcloud import VCloud_5_5_NodeDriver
from libcloud.compute.drivers.vcloud import Vdc
from libcloud.compute.base import Node, NodeImage
from libcloud.compute.types import NodeState
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import VCLOUD_PARAMS
class TerremarkTests(unittest.TestCase, TestCaseMixin):
def setUp(self):
VCloudNodeDriver.connectionCls.host = "test"
VCloudNodeDriver.connectionCls.conn_class = TerremarkMockHttp
TerremarkMockHttp.type = None
self.driver = TerremarkDriver(*VCLOUD_PARAMS)
def test_list_images(self):
ret = self.driver.list_images()
self.assertEqual(
ret[0].id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vAppTemplate/5')
def test_list_sizes(self):
ret = self.driver.list_sizes()
self.assertEqual(ret[0].ram, 512)
def test_create_node(self):
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
node = self.driver.create_node(
name='testerpart2',
image=image,
size=size,
vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224',
network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725',
cpus=2,
)
self.assertTrue(isinstance(node, Node))
self.assertEqual(
node.id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vapp/14031')
self.assertEqual(node.name, 'testerpart2')
def test_list_nodes(self):
ret = self.driver.list_nodes()
node = ret[0]
self.assertEqual(
node.id, 'https://services.vcloudexpress.terremark.com/api/v0.8/vapp/14031')
self.assertEqual(node.name, 'testerpart2')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.public_ips, [])
self.assertEqual(node.private_ips, ['10.112.78.69'])
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
class VCloud_1_5_Tests(unittest.TestCase, TestCaseMixin):
def setUp(self):
VCloudNodeDriver.connectionCls.host = 'test'
VCloudNodeDriver.connectionCls.conn_class = VCloud_1_5_MockHttp
VCloud_1_5_MockHttp.type = None
self.driver = VCloud_1_5_NodeDriver(*VCLOUD_PARAMS)
def test_list_images(self):
ret = self.driver.list_images()
self.assertEqual(
'https://vm-vcloud/api/vAppTemplate/vappTemplate-ac1bc027-bf8c-4050-8643-4971f691c158', ret[0].id)
def test_list_sizes(self):
ret = self.driver.list_sizes()
self.assertEqual(ret[0].ram, 512)
def test_networks(self):
ret = self.driver.networks
self.assertEqual(
ret[0].get('href'), 'https://vm-vcloud/api/network/dca8b667-6c8f-4c3e-be57-7a9425dba4f4')
def test_create_node(self):
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
node = self.driver.create_node(
name='testNode',
image=image,
size=size,
ex_vdc='MyVdc',
ex_network='vCloud - Default',
cpus=2,
)
self.assertTrue(isinstance(node, Node))
self.assertEqual(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', node.id)
self.assertEqual('testNode', node.name)
def test_create_node_clone(self):
image = self.driver.list_nodes()[0]
node = self.driver.create_node(name='testNode', image=image)
self.assertTrue(isinstance(node, Node))
self.assertEqual(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', node.id)
self.assertEqual('testNode', node.name)
def test_list_nodes(self):
ret = self.driver.list_nodes()
node = ret[0]
self.assertEqual(
node.id, 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a')
self.assertEqual(node.name, 'testNode')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.public_ips, ['65.41.67.2'])
self.assertEqual(node.private_ips, ['65.41.67.2'])
self.assertEqual(node.extra, {'vdc': 'MyVdc',
'vms': [{
'id': 'https://vm-vcloud/api/vApp/vm-dd75d1d3-5b7b-48f0-aff3-69622ab7e045',
'name': 'testVm',
'state': NodeState.RUNNING,
'public_ips': ['65.41.67.2'],
'private_ips': ['65.41.67.2'],
'os_type': 'rhel5_64Guest'
}]})
node = ret[1]
self.assertEqual(
node.id, 'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b')
self.assertEqual(node.name, 'testNode2')
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.public_ips, ['192.168.0.103'])
self.assertEqual(node.private_ips, ['192.168.0.100'])
self.assertEqual(node.extra, {'vdc': 'MyVdc',
'vms': [{
'id': 'https://vm-vcloud/api/vApp/vm-dd75d1d3-5b7b-48f0-aff3-69622ab7e046',
'name': 'testVm2',
'state': NodeState.RUNNING,
'public_ips': ['192.168.0.103'],
'private_ips': ['192.168.0.100'],
'os_type': 'rhel5_64Guest'
}]})
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_validate_vm_names(self):
# valid inputs
self.driver._validate_vm_names(['host-n-ame-name'])
self.driver._validate_vm_names(['tc-mybuild-b1'])
self.driver._validate_vm_names(None)
# invalid inputs
self.assertRaises(
ValueError, self.driver._validate_vm_names, ['invalid.host'])
self.assertRaises(
ValueError, self.driver._validate_vm_names, ['inv-alid.host'])
self.assertRaises(
ValueError, self.driver._validate_vm_names, ['hostnametoooolong'])
self.assertRaises(
ValueError, self.driver._validate_vm_names, ['host$name'])
def test_change_vm_names(self):
self.driver._change_vm_names(
'/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', ['changed1', 'changed2'])
def test_change_vm_admin_password(self):
self.driver.ex_change_vm_admin_password(
'/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', "12345678")
def test_is_node(self):
self.assertTrue(self.driver._is_node(
Node('testId', 'testNode', state=0, public_ips=[], private_ips=[], driver=self.driver)))
self.assertFalse(self.driver._is_node(
NodeImage('testId', 'testNode', driver=self.driver)))
def test_ex_deploy(self):
node = self.driver.ex_deploy_node(
Node('/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6a', 'testNode', state=0,
public_ips=[], private_ips=[], driver=self.driver),
ex_force_customization=False)
self.assertEqual(node.state, NodeState.RUNNING)
def test_ex_undeploy(self):
node = self.driver.ex_undeploy_node(
Node('https://test/api/vApp/undeployTest', 'testNode', state=0,
public_ips=[], private_ips=[], driver=self.driver))
self.assertEqual(node.state, NodeState.STOPPED)
def test_ex_undeploy_with_error(self):
node = self.driver.ex_undeploy_node(
Node('https://test/api/vApp/undeployErrorTest', 'testNode',
state=0, public_ips=[], private_ips=[], driver=self.driver))
self.assertEqual(node.state, NodeState.STOPPED)
def test_ex_find_node(self):
node = self.driver.ex_find_node('testNode')
self.assertEqual(node.name, "testNode")
node = self.driver.ex_find_node('testNode', self.driver.vdcs[0])
self.assertEqual(node.name, "testNode")
node = self.driver.ex_find_node('testNonExisting', self.driver.vdcs[0])
self.assertEqual(node, None)
def test_ex_add_vm_disk__with_invalid_values(self):
self.assertRaises(
ValueError, self.driver.ex_add_vm_disk, 'dummy', 'invalid value')
self.assertRaises(
ValueError, self.driver.ex_add_vm_disk, 'dummy', '-1')
def test_ex_add_vm_disk(self):
self.driver.ex_add_vm_disk('https://test/api/vApp/vm-test', '20')
def test_ex_set_vm_cpu__with_invalid_values(self):
self.assertRaises(ValueError, self.driver.ex_set_vm_cpu, 'dummy', 50)
self.assertRaises(ValueError, self.driver.ex_set_vm_cpu, 'dummy', -1)
def test_ex_set_vm_cpu(self):
self.driver.ex_set_vm_cpu('https://test/api/vApp/vm-test', 4)
def test_ex_set_vm_memory__with_invalid_values(self):
self.assertRaises(
ValueError, self.driver.ex_set_vm_memory, 'dummy', 777)
self.assertRaises(
ValueError, self.driver.ex_set_vm_memory, 'dummy', -1024)
def test_ex_set_vm_memory(self):
self.driver.ex_set_vm_memory('https://test/api/vApp/vm-test', 1024)
def test_vdcs(self):
vdcs = self.driver.vdcs
self.assertEqual(len(vdcs), 1)
self.assertEqual(
vdcs[0].id, 'https://vm-vcloud/api/vdc/3d9ae28c-1de9-4307-8107-9356ff8ba6d0')
self.assertEqual(vdcs[0].name, 'MyVdc')
self.assertEqual(vdcs[0].allocation_model, 'AllocationPool')
self.assertEqual(vdcs[0].storage.limit, 5120000)
self.assertEqual(vdcs[0].storage.used, 1984512)
self.assertEqual(vdcs[0].storage.units, 'MB')
self.assertEqual(vdcs[0].cpu.limit, 160000)
self.assertEqual(vdcs[0].cpu.used, 0)
self.assertEqual(vdcs[0].cpu.units, 'MHz')
self.assertEqual(vdcs[0].memory.limit, 527360)
self.assertEqual(vdcs[0].memory.used, 130752)
self.assertEqual(vdcs[0].memory.units, 'MB')
def test_ex_list_nodes(self):
self.assertEqual(
len(self.driver.ex_list_nodes()), len(self.driver.list_nodes()))
def test_ex_list_nodes__masked_exception(self):
"""
Test that we don't mask other exceptions.
"""
brokenVdc = Vdc('/api/vdc/brokenVdc', 'brokenVdc', self.driver)
self.assertRaises(AnotherError, self.driver.ex_list_nodes, (brokenVdc))
def test_ex_power_off(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
self.driver.ex_power_off_node(node)
def test_ex_query(self):
results = self.driver.ex_query(
'user', filter='name==jrambo', page=2, page_size=30, sort_desc='startDate')
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['type'], 'UserRecord')
self.assertEqual(results[0]['name'], 'jrambo')
self.assertEqual(results[0]['isLdapUser'], 'true')
def test_ex_get_control_access(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
control_access = self.driver.ex_get_control_access(node)
self.assertEqual(
control_access.everyone_access_level, ControlAccess.AccessLevel.READ_ONLY)
self.assertEqual(len(control_access.subjects), 1)
self.assertEqual(control_access.subjects[0].type, 'group')
self.assertEqual(control_access.subjects[0].name, 'MyGroup')
self.assertEqual(control_access.subjects[
0].id, 'https://vm-vcloud/api/admin/group/b8202c48-7151-4e61-9a6c-155474c7d413')
self.assertEqual(control_access.subjects[
0].access_level, ControlAccess.AccessLevel.FULL_CONTROL)
def test_ex_set_control_access(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
control_access = ControlAccess(node, None, [Subject(
name='MyGroup',
type='group',
access_level=ControlAccess.AccessLevel.FULL_CONTROL)])
self.driver.ex_set_control_access(node, control_access)
def test_ex_get_metadata(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
metadata = self.driver.ex_get_metadata(node)
self.assertEqual(metadata, {'owners': 'msamia@netsuite.com'})
def test_ex_set_metadata_entry(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
self.driver.ex_set_metadata_entry(node, 'foo', 'bar')
class VCloud_5_1_Tests(unittest.TestCase, TestCaseMixin):
def setUp(self):
VCloudNodeDriver.connectionCls.host = 'test'
VCloudNodeDriver.connectionCls.conn_class = VCloud_1_5_MockHttp
VCloud_1_5_MockHttp.type = None
self.driver = VCloudNodeDriver(
*VCLOUD_PARAMS, **{'api_version': '5.1'})
self.assertTrue(isinstance(self.driver, VCloud_5_1_NodeDriver))
def _test_create_node_valid_ex_vm_memory(self):
# TODO: Hook up the fixture
values = [4, 1024, 4096]
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
for value in values:
self.driver.create_node(
name='testerpart2',
image=image,
size=size,
vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224',
network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725',
cpus=2,
ex_vm_memory=value
)
def test_create_node_invalid_ex_vm_memory(self):
values = [1, 3, 7]
image = self.driver.list_images()[0]
size = self.driver.list_sizes()[0]
for value in values:
try:
self.driver.create_node(
name='testerpart2',
image=image,
size=size,
vdc='https://services.vcloudexpress.terremark.com/api/v0.8/vdc/224',
network='https://services.vcloudexpress.terremark.com/api/v0.8/network/725',
cpus=2,
ex_vm_memory=value
)
except ValueError:
pass
else:
self.fail('Exception was not thrown')
def test_list_images(self):
ret = self.driver.list_images()
self.assertEqual(
'https://vm-vcloud/api/vAppTemplate/vappTemplate-ac1bc027-bf8c-4050-8643-4971f691c158', ret[0].id)
class VCloud_5_5_Tests(unittest.TestCase, TestCaseMixin):
def setUp(self):
VCloudNodeDriver.connectionCls.host = 'test'
VCloudNodeDriver.connectionCls.conn_class = VCloud_5_5_MockHttp
VCloud_5_5_MockHttp.type = None
self.driver = VCloudNodeDriver(
*VCLOUD_PARAMS, **{'api_version': '5.5'})
self.assertTrue(isinstance(self.driver, VCloud_5_5_NodeDriver))
def test_ex_create_snapshot(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
self.driver.ex_create_snapshot(node)
def test_ex_remove_snapshots(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
self.driver.ex_remove_snapshots(node)
def test_ex_revert_to_snapshot(self):
node = Node(
'https://vm-vcloud/api/vApp/vapp-8c57a5b6-e61b-48ca-8a78-3b70ee65ef6b',
'testNode', NodeState.RUNNING, [], [], self.driver)
self.driver.ex_revert_to_snapshot(node)
def test_ex_acquire_mks_ticket(self):
node = self.driver.ex_find_node('testNode')
self.driver.ex_acquire_mks_ticket(node.id)
class TerremarkMockHttp(MockHttp):
fixtures = ComputeFileFixtures('terremark')
def _api_v0_8_login(self, method, url, body, headers):
headers['set-cookie'] = 'vcloud-token=testtoken'
body = self.fixtures.load('api_v0_8_login.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_org_240(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_org_240.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_vdc_224(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_vdc_224.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_vdc_224_catalog(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_vdc_224_catalog.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_catalogItem_5(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_catalogItem_5.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_vdc_224_action_instantiateVAppTemplate(self, method, url, body, headers):
body = self.fixtures.load(
'api_v0_8_vdc_224_action_instantiateVAppTemplate.xml')
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _api_v0_8_vapp_14031_action_deploy(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_vapp_14031_action_deploy.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_task_10496(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_task_10496.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_vapp_14031_power_action_powerOn(self, method, url, body, headers):
body = self.fixtures.load(
'api_v0_8_vapp_14031_power_action_powerOn.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_vapp_14031(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('api_v0_8_vapp_14031_get.xml')
elif method == 'DELETE':
body = ''
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_vapp_14031_power_action_reset(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_vapp_14031_power_action_reset.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_vapp_14031_power_action_poweroff(self, method, url, body, headers):
body = self.fixtures.load(
'api_v0_8_vapp_14031_power_action_poweroff.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
def _api_v0_8_task_11001(self, method, url, body, headers):
body = self.fixtures.load('api_v0_8_task_11001.xml')
return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED])
class AnotherErrorMember(Exception):
"""
helper class for the synthetic exception
"""
def __init__(self):
self.tag = 'Error'
def get(self, foo):
return 'ACCESS_TO_RESOURCE_IS_FORBIDDEN_1'
class AnotherError(Exception):
pass
class VCloud_1_5_MockHttp(MockHttp, unittest.TestCase):
fixtures = ComputeFileFixtures('vcloud_1_5')
def request(self, method, url, body=None, headers=None, raw=False, stream=False):
self.assertTrue(url.startswith('/api/'),
('"%s" is invalid. Needs to '
'start with "/api". The passed URL should be just '
'the path, not full URL.', url))
super(VCloud_1_5_MockHttp, self).request(method, url, body, headers,
raw)
def _api_sessions(self, method, url, body, headers):
headers['x-vcloud-authorization'] = 'testtoken'
body = self.fixtures.load('api_sessions.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_org(self, method, url, body, headers):
body = self.fixtures.load('api_org.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a(self, method, url, body, headers):
body = self.fixtures.load(
'api_org_96726c78_4ae3_402f_b08b_7a78c6903d2a.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4(self, method, url, body, headers):
body = self.fixtures.load(
'api_network_dca8b667_6c8f_4c3e_be57_7a9425dba4f4.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0(self, method, url, body, headers):
body = self.fixtures.load(
'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vdc_brokenVdc(self, method, url, body, headers):
body = self.fixtures.load('api_vdc_brokenVdc.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_errorRaiser(self, method, url, body, headers):
m = AnotherErrorMember()
raise AnotherError(m)
def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate(self, method, url, body, headers):
body = self.fixtures.load(
'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_instantiateVAppTemplate.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_powerOn(self, method, url, body, headers):
return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers)
# Clone
def _api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp(self, method, url, body, headers):
body = self.fixtures.load(
'api_vdc_3d9ae28c_1de9_4307_8107_9356ff8ba6d0_action_cloneVApp.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_networkConnectionSection(self, method, url, body, headers):
body = self.fixtures.load(
'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a(self, method, url, body, headers):
status = httplib.OK
if method == 'GET':
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a.xml')
status = httplib.OK
elif method == 'DELETE':
body = self.fixtures.load(
'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b(self, method, url, body, headers):
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c(self, method, url, body, headers):
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6c.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045(self, method, url, body, headers):
body = self.fixtures.load(
'put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'get_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml')
status = httplib.OK
else:
body = self.fixtures.load(
'put_api_vApp_vm_dd75d1d3_5b7b_48f0_aff3_69622ab7e045_guestCustomizationSection.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_reset(self, method, url, body, headers):
return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers)
def _api_task_b034df55_fe81_4798_bc81_1f0fd0ead450(self, method, url, body, headers):
body = self.fixtures.load(
'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4(self, method, url, body, headers):
body = self.fixtures.load(
'api_catalog_cddb3cb2_3394_4b14_b831_11fbc4028da4.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_catalogItem_3132e037_759b_4627_9056_ca66466fa607(self, method, url, body, headers):
body = self.fixtures.load(
'api_catalogItem_3132e037_759b_4627_9056_ca66466fa607.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_deployTest(self, method, url, body, headers):
body = self.fixtures.load('api_task_deploy.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_action_deploy(self, method, url, body, headers):
body = self.fixtures.load('api_task_deploy.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_task_deploy(self, method, url, body, headers):
body = self.fixtures.load('api_task_deploy.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_vApp_undeployTest(self, method, url, body, headers):
body = self.fixtures.load('api_vApp_undeployTest.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_undeployTest_action_undeploy(self, method, url, body, headers):
body = self.fixtures.load('api_task_undeploy.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_task_undeploy(self, method, url, body, headers):
body = self.fixtures.load('api_task_undeploy.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_undeployErrorTest(self, method, url, body, headers):
body = self.fixtures.load('api_vApp_undeployTest.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_undeployErrorTest_action_undeploy(self, method, url, body, headers):
if b('shutdown') in b(body):
body = self.fixtures.load('api_task_undeploy_error.xml')
else:
body = self.fixtures.load('api_task_undeploy.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_task_undeployError(self, method, url, body, headers):
body = self.fixtures.load('api_task_undeploy_error.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_access_to_resource_forbidden(self, method, url, body, headers):
raise Exception(
ET.fromstring(self.fixtures.load('api_vApp_vapp_access_to_resource_forbidden.xml')))
def _api_vApp_vm_test(self, method, url, body, headers):
body = self.fixtures.load('api_vApp_vm_test.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vm_test_virtualHardwareSection_disks(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'get_api_vApp_vm_test_virtualHardwareSection_disks.xml')
status = httplib.OK
else:
body = self.fixtures.load(
'put_api_vApp_vm_test_virtualHardwareSection_disks.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vm_test_virtualHardwareSection_cpu(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'get_api_vApp_vm_test_virtualHardwareSection_cpu.xml')
status = httplib.OK
else:
body = self.fixtures.load(
'put_api_vApp_vm_test_virtualHardwareSection_cpu.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vm_test_virtualHardwareSection_memory(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load(
'get_api_vApp_vm_test_virtualHardwareSection_memory.xml')
status = httplib.OK
else:
body = self.fixtures.load(
'put_api_vApp_vm_test_virtualHardwareSection_memory.xml')
status = httplib.ACCEPTED
return status, body, headers, httplib.responses[status]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_powerOff(self, method, url, body, headers):
return self._api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(method, url, body, headers)
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_power_action_all(self, method, url, body, headers):
assert method == 'POST'
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_power_action_all.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
def _api_query(self, method, url, body, headers):
assert method == 'GET'
if 'type=user' in url:
self.assertTrue('page=2' in url)
self.assertTrue('filter=(name==jrambo)' in url)
self.assertTrue('sortDesc=startDate')
body = self.fixtures.load('api_query_user.xml')
elif 'type=group' in url:
body = self.fixtures.load('api_query_group.xml')
else:
raise AssertionError('Unexpected query type')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_metadata(self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load('api_vapp_post_metadata.xml')
return httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]
else:
body = self.fixtures.load('api_vapp_get_metadata.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_controlAccess(self, method, url, body, headers):
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_action_controlAccess(self, method, url, body, headers):
body = str(body)
self.assertTrue(method == 'POST')
self.assertTrue(
'<IsSharedToEveryone>false</IsSharedToEveryone>' in body)
self.assertTrue(
'<Subject href="https://vm-vcloud/api/admin/group/b8202c48-7151-4e61-9a6c-155474c7d413" />' in body)
self.assertTrue('<AccessLevel>FullControl</AccessLevel>' in body)
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6a_controlAccess.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413(self, method, url, body, headers):
body = self.fixtures.load(
'api_admin_group_b8202c48_7151_4e61_9a6c_155474c7d413.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
class VCloud_5_5_MockHttp(VCloud_1_5_MockHttp):
# TODO: Move 5.5 fixtures to their own folder
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_action_createSnapshot(self, method, url, body, headers):
assert method == 'POST'
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_create_snapshot.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_task_fab4b26f_4f2e_4d49_ad01_ae9324bbfe48(self, method, url, body, headers):
body = self.fixtures.load(
'api_task_b034df55_fe81_4798_bc81_1f0fd0ead450.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_action_removeAllSnapshots(self, method, url, body, headers):
assert method == 'POST'
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_remove_snapshots.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_task_2518935e_b315_4d8e_9e99_9275f751877c(self, method, url, body, headers):
body = self.fixtures.load(
'api_task_2518935e_b315_4d8e_9e99_9275f751877c.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_action_revertToCurrentSnapshot(self, method, url, body, headers):
assert method == 'POST'
body = self.fixtures.load(
'api_vApp_vapp_8c57a5b6_e61b_48ca_8a78_3b70ee65ef6b_revert_snapshot.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
def _api_task_fe75d3af_f5a3_44a5_b016_ae0bdadfc32b(self, method, url, body, headers):
body = self.fixtures.load(
'api_task_fe75d3af_f5a3_44a5_b016_ae0bdadfc32b.xml')
return httplib.OK, body, headers, httplib.responses[httplib.OK]
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
swcloud/api-client-staging | generated/python/proto-google-cloud-datastore-v1/google/cloud/proto/datastore/v1/datastore_pb2.py | 10 | 52972 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/proto/datastore/v1/datastore.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.proto.datastore.v1 import entity_pb2 as google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_entity__pb2
from google.cloud.proto.datastore.v1 import query_pb2 as google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_query__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/proto/datastore/v1/datastore.proto',
package='google.datastore.v1',
syntax='proto3',
serialized_pb=_b('\n/google/cloud/proto/datastore/v1/datastore.proto\x12\x13google.datastore.v1\x1a\x1cgoogle/api/annotations.proto\x1a,google/cloud/proto/datastore/v1/entity.proto\x1a+google/cloud/proto/datastore/v1/query.proto\"\x83\x01\n\rLookupRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12\x36\n\x0cread_options\x18\x01 \x01(\x0b\x32 .google.datastore.v1.ReadOptions\x12&\n\x04keys\x18\x03 \x03(\x0b\x32\x18.google.datastore.v1.Key\"\xa2\x01\n\x0eLookupResponse\x12\x30\n\x05\x66ound\x18\x01 \x03(\x0b\x32!.google.datastore.v1.EntityResult\x12\x32\n\x07missing\x18\x02 \x03(\x0b\x32!.google.datastore.v1.EntityResult\x12*\n\x08\x64\x65\x66\x65rred\x18\x03 \x03(\x0b\x32\x18.google.datastore.v1.Key\"\x84\x02\n\x0fRunQueryRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12\x36\n\x0cpartition_id\x18\x02 \x01(\x0b\x32 .google.datastore.v1.PartitionId\x12\x36\n\x0cread_options\x18\x01 \x01(\x0b\x32 .google.datastore.v1.ReadOptions\x12+\n\x05query\x18\x03 \x01(\x0b\x32\x1a.google.datastore.v1.QueryH\x00\x12\x32\n\tgql_query\x18\x07 \x01(\x0b\x32\x1d.google.datastore.v1.GqlQueryH\x00\x42\x0c\n\nquery_type\"s\n\x10RunQueryResponse\x12\x34\n\x05\x62\x61tch\x18\x01 \x01(\x0b\x32%.google.datastore.v1.QueryResultBatch\x12)\n\x05query\x18\x02 \x01(\x0b\x32\x1a.google.datastore.v1.Query\"-\n\x17\x42\x65ginTransactionRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\"/\n\x18\x42\x65ginTransactionResponse\x12\x13\n\x0btransaction\x18\x01 \x01(\x0c\":\n\x0fRollbackRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12\x13\n\x0btransaction\x18\x01 \x01(\x0c\"\x12\n\x10RollbackResponse\"\x83\x02\n\rCommitRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12\x35\n\x04mode\x18\x05 \x01(\x0e\x32\'.google.datastore.v1.CommitRequest.Mode\x12\x15\n\x0btransaction\x18\x01 \x01(\x0cH\x00\x12\x30\n\tmutations\x18\x06 \x03(\x0b\x32\x1d.google.datastore.v1.Mutation\"F\n\x04Mode\x12\x14\n\x10MODE_UNSPECIFIED\x10\x00\x12\x11\n\rTRANSACTIONAL\x10\x01\x12\x15\n\x11NON_TRANSACTIONAL\x10\x02\x42\x16\n\x14transaction_selector\"f\n\x0e\x43ommitResponse\x12=\n\x10mutation_results\x18\x03 \x03(\x0b\x32#.google.datastore.v1.MutationResult\x12\x15\n\rindex_updates\x18\x04 \x01(\x05\"P\n\x12\x41llocateIdsRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12&\n\x04keys\x18\x01 \x03(\x0b\x32\x18.google.datastore.v1.Key\"=\n\x13\x41llocateIdsResponse\x12&\n\x04keys\x18\x01 \x03(\x0b\x32\x18.google.datastore.v1.Key\"\x87\x02\n\x08Mutation\x12-\n\x06insert\x18\x04 \x01(\x0b\x32\x1b.google.datastore.v1.EntityH\x00\x12-\n\x06update\x18\x05 \x01(\x0b\x32\x1b.google.datastore.v1.EntityH\x00\x12-\n\x06upsert\x18\x06 \x01(\x0b\x32\x1b.google.datastore.v1.EntityH\x00\x12*\n\x06\x64\x65lete\x18\x07 \x01(\x0b\x32\x18.google.datastore.v1.KeyH\x00\x12\x16\n\x0c\x62\x61se_version\x18\x08 \x01(\x03H\x01\x42\x0b\n\toperationB\x1d\n\x1b\x63onflict_detection_strategy\"c\n\x0eMutationResult\x12%\n\x03key\x18\x03 \x01(\x0b\x32\x18.google.datastore.v1.Key\x12\x0f\n\x07version\x18\x04 \x01(\x03\x12\x19\n\x11\x63onflict_detected\x18\x05 \x01(\x08\"\xd5\x01\n\x0bReadOptions\x12L\n\x10read_consistency\x18\x01 \x01(\x0e\x32\x30.google.datastore.v1.ReadOptions.ReadConsistencyH\x00\x12\x15\n\x0btransaction\x18\x02 \x01(\x0cH\x00\"M\n\x0fReadConsistency\x12 \n\x1cREAD_CONSISTENCY_UNSPECIFIED\x10\x00\x12\n\n\x06STRONG\x10\x01\x12\x0c\n\x08\x45VENTUAL\x10\x02\x42\x12\n\x10\x63onsistency_type2\xdb\x06\n\tDatastore\x12~\n\x06Lookup\x12\".google.datastore.v1.LookupRequest\x1a#.google.datastore.v1.LookupResponse\"+\x82\xd3\xe4\x93\x02%\" /v1/projects/{project_id}:lookup:\x01*\x12\x86\x01\n\x08RunQuery\x12$.google.datastore.v1.RunQueryRequest\x1a%.google.datastore.v1.RunQueryResponse\"-\x82\xd3\xe4\x93\x02\'\"\"/v1/projects/{project_id}:runQuery:\x01*\x12\xa6\x01\n\x10\x42\x65ginTransaction\x12,.google.datastore.v1.BeginTransactionRequest\x1a-.google.datastore.v1.BeginTransactionResponse\"5\x82\xd3\xe4\x93\x02/\"*/v1/projects/{project_id}:beginTransaction:\x01*\x12~\n\x06\x43ommit\x12\".google.datastore.v1.CommitRequest\x1a#.google.datastore.v1.CommitResponse\"+\x82\xd3\xe4\x93\x02%\" /v1/projects/{project_id}:commit:\x01*\x12\x86\x01\n\x08Rollback\x12$.google.datastore.v1.RollbackRequest\x1a%.google.datastore.v1.RollbackResponse\"-\x82\xd3\xe4\x93\x02\'\"\"/v1/projects/{project_id}:rollback:\x01*\x12\x92\x01\n\x0b\x41llocateIds\x12\'.google.datastore.v1.AllocateIdsRequest\x1a(.google.datastore.v1.AllocateIdsResponse\"0\x82\xd3\xe4\x93\x02*\"%/v1/projects/{project_id}:allocateIds:\x01*B\x85\x01\n\x17\x63om.google.datastore.v1B\x0e\x44\x61tastoreProtoP\x01Z<google.golang.org/genproto/googleapis/datastore/v1;datastore\xaa\x02\x19Google.Cloud.Datastore.V1b\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_entity__pb2.DESCRIPTOR,google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_query__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_COMMITREQUEST_MODE = _descriptor.EnumDescriptor(
name='Mode',
full_name='google.datastore.v1.CommitRequest.Mode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MODE_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRANSACTIONAL', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NON_TRANSACTIONAL', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1214,
serialized_end=1284,
)
_sym_db.RegisterEnumDescriptor(_COMMITREQUEST_MODE)
_READOPTIONS_READCONSISTENCY = _descriptor.EnumDescriptor(
name='ReadConsistency',
full_name='google.datastore.v1.ReadOptions.ReadConsistency',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='READ_CONSISTENCY_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRONG', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVENTUAL', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2043,
serialized_end=2120,
)
_sym_db.RegisterEnumDescriptor(_READOPTIONS_READCONSISTENCY)
_LOOKUPREQUEST = _descriptor.Descriptor(
name='LookupRequest',
full_name='google.datastore.v1.LookupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1.LookupRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='read_options', full_name='google.datastore.v1.LookupRequest.read_options', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='keys', full_name='google.datastore.v1.LookupRequest.keys', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=194,
serialized_end=325,
)
_LOOKUPRESPONSE = _descriptor.Descriptor(
name='LookupResponse',
full_name='google.datastore.v1.LookupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='found', full_name='google.datastore.v1.LookupResponse.found', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='missing', full_name='google.datastore.v1.LookupResponse.missing', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deferred', full_name='google.datastore.v1.LookupResponse.deferred', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=328,
serialized_end=490,
)
_RUNQUERYREQUEST = _descriptor.Descriptor(
name='RunQueryRequest',
full_name='google.datastore.v1.RunQueryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1.RunQueryRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='partition_id', full_name='google.datastore.v1.RunQueryRequest.partition_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='read_options', full_name='google.datastore.v1.RunQueryRequest.read_options', index=2,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='query', full_name='google.datastore.v1.RunQueryRequest.query', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gql_query', full_name='google.datastore.v1.RunQueryRequest.gql_query', index=4,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='query_type', full_name='google.datastore.v1.RunQueryRequest.query_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=493,
serialized_end=753,
)
_RUNQUERYRESPONSE = _descriptor.Descriptor(
name='RunQueryResponse',
full_name='google.datastore.v1.RunQueryResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch', full_name='google.datastore.v1.RunQueryResponse.batch', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='query', full_name='google.datastore.v1.RunQueryResponse.query', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=755,
serialized_end=870,
)
_BEGINTRANSACTIONREQUEST = _descriptor.Descriptor(
name='BeginTransactionRequest',
full_name='google.datastore.v1.BeginTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1.BeginTransactionRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=872,
serialized_end=917,
)
_BEGINTRANSACTIONRESPONSE = _descriptor.Descriptor(
name='BeginTransactionResponse',
full_name='google.datastore.v1.BeginTransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transaction', full_name='google.datastore.v1.BeginTransactionResponse.transaction', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=919,
serialized_end=966,
)
_ROLLBACKREQUEST = _descriptor.Descriptor(
name='RollbackRequest',
full_name='google.datastore.v1.RollbackRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1.RollbackRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction', full_name='google.datastore.v1.RollbackRequest.transaction', index=1,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=968,
serialized_end=1026,
)
_ROLLBACKRESPONSE = _descriptor.Descriptor(
name='RollbackResponse',
full_name='google.datastore.v1.RollbackResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1028,
serialized_end=1046,
)
_COMMITREQUEST = _descriptor.Descriptor(
name='CommitRequest',
full_name='google.datastore.v1.CommitRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1.CommitRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mode', full_name='google.datastore.v1.CommitRequest.mode', index=1,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction', full_name='google.datastore.v1.CommitRequest.transaction', index=2,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mutations', full_name='google.datastore.v1.CommitRequest.mutations', index=3,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_COMMITREQUEST_MODE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='transaction_selector', full_name='google.datastore.v1.CommitRequest.transaction_selector',
index=0, containing_type=None, fields=[]),
],
serialized_start=1049,
serialized_end=1308,
)
_COMMITRESPONSE = _descriptor.Descriptor(
name='CommitResponse',
full_name='google.datastore.v1.CommitResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mutation_results', full_name='google.datastore.v1.CommitResponse.mutation_results', index=0,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='index_updates', full_name='google.datastore.v1.CommitResponse.index_updates', index=1,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1310,
serialized_end=1412,
)
_ALLOCATEIDSREQUEST = _descriptor.Descriptor(
name='AllocateIdsRequest',
full_name='google.datastore.v1.AllocateIdsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.datastore.v1.AllocateIdsRequest.project_id', index=0,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='keys', full_name='google.datastore.v1.AllocateIdsRequest.keys', index=1,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1414,
serialized_end=1494,
)
_ALLOCATEIDSRESPONSE = _descriptor.Descriptor(
name='AllocateIdsResponse',
full_name='google.datastore.v1.AllocateIdsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='keys', full_name='google.datastore.v1.AllocateIdsResponse.keys', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1496,
serialized_end=1557,
)
_MUTATION = _descriptor.Descriptor(
name='Mutation',
full_name='google.datastore.v1.Mutation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='insert', full_name='google.datastore.v1.Mutation.insert', index=0,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='update', full_name='google.datastore.v1.Mutation.update', index=1,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='upsert', full_name='google.datastore.v1.Mutation.upsert', index=2,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='delete', full_name='google.datastore.v1.Mutation.delete', index=3,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='base_version', full_name='google.datastore.v1.Mutation.base_version', index=4,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='operation', full_name='google.datastore.v1.Mutation.operation',
index=0, containing_type=None, fields=[]),
_descriptor.OneofDescriptor(
name='conflict_detection_strategy', full_name='google.datastore.v1.Mutation.conflict_detection_strategy',
index=1, containing_type=None, fields=[]),
],
serialized_start=1560,
serialized_end=1823,
)
_MUTATIONRESULT = _descriptor.Descriptor(
name='MutationResult',
full_name='google.datastore.v1.MutationResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.datastore.v1.MutationResult.key', index=0,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='google.datastore.v1.MutationResult.version', index=1,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='conflict_detected', full_name='google.datastore.v1.MutationResult.conflict_detected', index=2,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1825,
serialized_end=1924,
)
_READOPTIONS = _descriptor.Descriptor(
name='ReadOptions',
full_name='google.datastore.v1.ReadOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='read_consistency', full_name='google.datastore.v1.ReadOptions.read_consistency', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction', full_name='google.datastore.v1.ReadOptions.transaction', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_READOPTIONS_READCONSISTENCY,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='consistency_type', full_name='google.datastore.v1.ReadOptions.consistency_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=1927,
serialized_end=2140,
)
_LOOKUPREQUEST.fields_by_name['read_options'].message_type = _READOPTIONS
_LOOKUPREQUEST.fields_by_name['keys'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_entity__pb2._KEY
_LOOKUPRESPONSE.fields_by_name['found'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_query__pb2._ENTITYRESULT
_LOOKUPRESPONSE.fields_by_name['missing'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_query__pb2._ENTITYRESULT
_LOOKUPRESPONSE.fields_by_name['deferred'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_entity__pb2._KEY
_RUNQUERYREQUEST.fields_by_name['partition_id'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_entity__pb2._PARTITIONID
_RUNQUERYREQUEST.fields_by_name['read_options'].message_type = _READOPTIONS
_RUNQUERYREQUEST.fields_by_name['query'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_query__pb2._QUERY
_RUNQUERYREQUEST.fields_by_name['gql_query'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_query__pb2._GQLQUERY
_RUNQUERYREQUEST.oneofs_by_name['query_type'].fields.append(
_RUNQUERYREQUEST.fields_by_name['query'])
_RUNQUERYREQUEST.fields_by_name['query'].containing_oneof = _RUNQUERYREQUEST.oneofs_by_name['query_type']
_RUNQUERYREQUEST.oneofs_by_name['query_type'].fields.append(
_RUNQUERYREQUEST.fields_by_name['gql_query'])
_RUNQUERYREQUEST.fields_by_name['gql_query'].containing_oneof = _RUNQUERYREQUEST.oneofs_by_name['query_type']
_RUNQUERYRESPONSE.fields_by_name['batch'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_query__pb2._QUERYRESULTBATCH
_RUNQUERYRESPONSE.fields_by_name['query'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_query__pb2._QUERY
_COMMITREQUEST.fields_by_name['mode'].enum_type = _COMMITREQUEST_MODE
_COMMITREQUEST.fields_by_name['mutations'].message_type = _MUTATION
_COMMITREQUEST_MODE.containing_type = _COMMITREQUEST
_COMMITREQUEST.oneofs_by_name['transaction_selector'].fields.append(
_COMMITREQUEST.fields_by_name['transaction'])
_COMMITREQUEST.fields_by_name['transaction'].containing_oneof = _COMMITREQUEST.oneofs_by_name['transaction_selector']
_COMMITRESPONSE.fields_by_name['mutation_results'].message_type = _MUTATIONRESULT
_ALLOCATEIDSREQUEST.fields_by_name['keys'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_entity__pb2._KEY
_ALLOCATEIDSRESPONSE.fields_by_name['keys'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_entity__pb2._KEY
_MUTATION.fields_by_name['insert'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_entity__pb2._ENTITY
_MUTATION.fields_by_name['update'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_entity__pb2._ENTITY
_MUTATION.fields_by_name['upsert'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_entity__pb2._ENTITY
_MUTATION.fields_by_name['delete'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_entity__pb2._KEY
_MUTATION.oneofs_by_name['operation'].fields.append(
_MUTATION.fields_by_name['insert'])
_MUTATION.fields_by_name['insert'].containing_oneof = _MUTATION.oneofs_by_name['operation']
_MUTATION.oneofs_by_name['operation'].fields.append(
_MUTATION.fields_by_name['update'])
_MUTATION.fields_by_name['update'].containing_oneof = _MUTATION.oneofs_by_name['operation']
_MUTATION.oneofs_by_name['operation'].fields.append(
_MUTATION.fields_by_name['upsert'])
_MUTATION.fields_by_name['upsert'].containing_oneof = _MUTATION.oneofs_by_name['operation']
_MUTATION.oneofs_by_name['operation'].fields.append(
_MUTATION.fields_by_name['delete'])
_MUTATION.fields_by_name['delete'].containing_oneof = _MUTATION.oneofs_by_name['operation']
_MUTATION.oneofs_by_name['conflict_detection_strategy'].fields.append(
_MUTATION.fields_by_name['base_version'])
_MUTATION.fields_by_name['base_version'].containing_oneof = _MUTATION.oneofs_by_name['conflict_detection_strategy']
_MUTATIONRESULT.fields_by_name['key'].message_type = google_dot_cloud_dot_proto_dot_datastore_dot_v1_dot_entity__pb2._KEY
_READOPTIONS.fields_by_name['read_consistency'].enum_type = _READOPTIONS_READCONSISTENCY
_READOPTIONS_READCONSISTENCY.containing_type = _READOPTIONS
_READOPTIONS.oneofs_by_name['consistency_type'].fields.append(
_READOPTIONS.fields_by_name['read_consistency'])
_READOPTIONS.fields_by_name['read_consistency'].containing_oneof = _READOPTIONS.oneofs_by_name['consistency_type']
_READOPTIONS.oneofs_by_name['consistency_type'].fields.append(
_READOPTIONS.fields_by_name['transaction'])
_READOPTIONS.fields_by_name['transaction'].containing_oneof = _READOPTIONS.oneofs_by_name['consistency_type']
DESCRIPTOR.message_types_by_name['LookupRequest'] = _LOOKUPREQUEST
DESCRIPTOR.message_types_by_name['LookupResponse'] = _LOOKUPRESPONSE
DESCRIPTOR.message_types_by_name['RunQueryRequest'] = _RUNQUERYREQUEST
DESCRIPTOR.message_types_by_name['RunQueryResponse'] = _RUNQUERYRESPONSE
DESCRIPTOR.message_types_by_name['BeginTransactionRequest'] = _BEGINTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['BeginTransactionResponse'] = _BEGINTRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['RollbackRequest'] = _ROLLBACKREQUEST
DESCRIPTOR.message_types_by_name['RollbackResponse'] = _ROLLBACKRESPONSE
DESCRIPTOR.message_types_by_name['CommitRequest'] = _COMMITREQUEST
DESCRIPTOR.message_types_by_name['CommitResponse'] = _COMMITRESPONSE
DESCRIPTOR.message_types_by_name['AllocateIdsRequest'] = _ALLOCATEIDSREQUEST
DESCRIPTOR.message_types_by_name['AllocateIdsResponse'] = _ALLOCATEIDSRESPONSE
DESCRIPTOR.message_types_by_name['Mutation'] = _MUTATION
DESCRIPTOR.message_types_by_name['MutationResult'] = _MUTATIONRESULT
DESCRIPTOR.message_types_by_name['ReadOptions'] = _READOPTIONS
LookupRequest = _reflection.GeneratedProtocolMessageType('LookupRequest', (_message.Message,), dict(
DESCRIPTOR = _LOOKUPREQUEST,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.LookupRequest)
))
_sym_db.RegisterMessage(LookupRequest)
LookupResponse = _reflection.GeneratedProtocolMessageType('LookupResponse', (_message.Message,), dict(
DESCRIPTOR = _LOOKUPRESPONSE,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.LookupResponse)
))
_sym_db.RegisterMessage(LookupResponse)
RunQueryRequest = _reflection.GeneratedProtocolMessageType('RunQueryRequest', (_message.Message,), dict(
DESCRIPTOR = _RUNQUERYREQUEST,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.RunQueryRequest)
))
_sym_db.RegisterMessage(RunQueryRequest)
RunQueryResponse = _reflection.GeneratedProtocolMessageType('RunQueryResponse', (_message.Message,), dict(
DESCRIPTOR = _RUNQUERYRESPONSE,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.RunQueryResponse)
))
_sym_db.RegisterMessage(RunQueryResponse)
BeginTransactionRequest = _reflection.GeneratedProtocolMessageType('BeginTransactionRequest', (_message.Message,), dict(
DESCRIPTOR = _BEGINTRANSACTIONREQUEST,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.BeginTransactionRequest)
))
_sym_db.RegisterMessage(BeginTransactionRequest)
BeginTransactionResponse = _reflection.GeneratedProtocolMessageType('BeginTransactionResponse', (_message.Message,), dict(
DESCRIPTOR = _BEGINTRANSACTIONRESPONSE,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.BeginTransactionResponse)
))
_sym_db.RegisterMessage(BeginTransactionResponse)
RollbackRequest = _reflection.GeneratedProtocolMessageType('RollbackRequest', (_message.Message,), dict(
DESCRIPTOR = _ROLLBACKREQUEST,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.RollbackRequest)
))
_sym_db.RegisterMessage(RollbackRequest)
RollbackResponse = _reflection.GeneratedProtocolMessageType('RollbackResponse', (_message.Message,), dict(
DESCRIPTOR = _ROLLBACKRESPONSE,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.RollbackResponse)
))
_sym_db.RegisterMessage(RollbackResponse)
CommitRequest = _reflection.GeneratedProtocolMessageType('CommitRequest', (_message.Message,), dict(
DESCRIPTOR = _COMMITREQUEST,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.CommitRequest)
))
_sym_db.RegisterMessage(CommitRequest)
CommitResponse = _reflection.GeneratedProtocolMessageType('CommitResponse', (_message.Message,), dict(
DESCRIPTOR = _COMMITRESPONSE,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.CommitResponse)
))
_sym_db.RegisterMessage(CommitResponse)
AllocateIdsRequest = _reflection.GeneratedProtocolMessageType('AllocateIdsRequest', (_message.Message,), dict(
DESCRIPTOR = _ALLOCATEIDSREQUEST,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.AllocateIdsRequest)
))
_sym_db.RegisterMessage(AllocateIdsRequest)
AllocateIdsResponse = _reflection.GeneratedProtocolMessageType('AllocateIdsResponse', (_message.Message,), dict(
DESCRIPTOR = _ALLOCATEIDSRESPONSE,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.AllocateIdsResponse)
))
_sym_db.RegisterMessage(AllocateIdsResponse)
Mutation = _reflection.GeneratedProtocolMessageType('Mutation', (_message.Message,), dict(
DESCRIPTOR = _MUTATION,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.Mutation)
))
_sym_db.RegisterMessage(Mutation)
MutationResult = _reflection.GeneratedProtocolMessageType('MutationResult', (_message.Message,), dict(
DESCRIPTOR = _MUTATIONRESULT,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.MutationResult)
))
_sym_db.RegisterMessage(MutationResult)
ReadOptions = _reflection.GeneratedProtocolMessageType('ReadOptions', (_message.Message,), dict(
DESCRIPTOR = _READOPTIONS,
__module__ = 'google.cloud.proto.datastore.v1.datastore_pb2'
# @@protoc_insertion_point(class_scope:google.datastore.v1.ReadOptions)
))
_sym_db.RegisterMessage(ReadOptions)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\027com.google.datastore.v1B\016DatastoreProtoP\001Z<google.golang.org/genproto/googleapis/datastore/v1;datastore\252\002\031Google.Cloud.Datastore.V1'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
class DatastoreStub(object):
"""Each RPC normalizes the partition IDs of the keys in its input entities,
and always returns entities with keys with normalized partition IDs.
This applies to all keys and entities, including those in values, except keys
with both an empty path and an empty or unset partition ID. Normalization of
input keys sets the project ID (if not already set) to the project ID from
the request.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Lookup = channel.unary_unary(
'/google.datastore.v1.Datastore/Lookup',
request_serializer=LookupRequest.SerializeToString,
response_deserializer=LookupResponse.FromString,
)
self.RunQuery = channel.unary_unary(
'/google.datastore.v1.Datastore/RunQuery',
request_serializer=RunQueryRequest.SerializeToString,
response_deserializer=RunQueryResponse.FromString,
)
self.BeginTransaction = channel.unary_unary(
'/google.datastore.v1.Datastore/BeginTransaction',
request_serializer=BeginTransactionRequest.SerializeToString,
response_deserializer=BeginTransactionResponse.FromString,
)
self.Commit = channel.unary_unary(
'/google.datastore.v1.Datastore/Commit',
request_serializer=CommitRequest.SerializeToString,
response_deserializer=CommitResponse.FromString,
)
self.Rollback = channel.unary_unary(
'/google.datastore.v1.Datastore/Rollback',
request_serializer=RollbackRequest.SerializeToString,
response_deserializer=RollbackResponse.FromString,
)
self.AllocateIds = channel.unary_unary(
'/google.datastore.v1.Datastore/AllocateIds',
request_serializer=AllocateIdsRequest.SerializeToString,
response_deserializer=AllocateIdsResponse.FromString,
)
class DatastoreServicer(object):
"""Each RPC normalizes the partition IDs of the keys in its input entities,
and always returns entities with keys with normalized partition IDs.
This applies to all keys and entities, including those in values, except keys
with both an empty path and an empty or unset partition ID. Normalization of
input keys sets the project ID (if not already set) to the project ID from
the request.
"""
def Lookup(self, request, context):
"""Looks up entities by key.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunQuery(self, request, context):
"""Queries for entities.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BeginTransaction(self, request, context):
"""Begins a new transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Commit(self, request, context):
"""Commits a transaction, optionally creating, deleting or modifying some
entities.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Rollback(self, request, context):
"""Rolls back a transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AllocateIds(self, request, context):
"""Allocates IDs for the given keys, which is useful for referencing an entity
before it is inserted.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DatastoreServicer_to_server(servicer, server):
rpc_method_handlers = {
'Lookup': grpc.unary_unary_rpc_method_handler(
servicer.Lookup,
request_deserializer=LookupRequest.FromString,
response_serializer=LookupResponse.SerializeToString,
),
'RunQuery': grpc.unary_unary_rpc_method_handler(
servicer.RunQuery,
request_deserializer=RunQueryRequest.FromString,
response_serializer=RunQueryResponse.SerializeToString,
),
'BeginTransaction': grpc.unary_unary_rpc_method_handler(
servicer.BeginTransaction,
request_deserializer=BeginTransactionRequest.FromString,
response_serializer=BeginTransactionResponse.SerializeToString,
),
'Commit': grpc.unary_unary_rpc_method_handler(
servicer.Commit,
request_deserializer=CommitRequest.FromString,
response_serializer=CommitResponse.SerializeToString,
),
'Rollback': grpc.unary_unary_rpc_method_handler(
servicer.Rollback,
request_deserializer=RollbackRequest.FromString,
response_serializer=RollbackResponse.SerializeToString,
),
'AllocateIds': grpc.unary_unary_rpc_method_handler(
servicer.AllocateIds,
request_deserializer=AllocateIdsRequest.FromString,
response_serializer=AllocateIdsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.datastore.v1.Datastore', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaDatastoreServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Each RPC normalizes the partition IDs of the keys in its input entities,
and always returns entities with keys with normalized partition IDs.
This applies to all keys and entities, including those in values, except keys
with both an empty path and an empty or unset partition ID. Normalization of
input keys sets the project ID (if not already set) to the project ID from
the request.
"""
def Lookup(self, request, context):
"""Looks up entities by key.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def RunQuery(self, request, context):
"""Queries for entities.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def BeginTransaction(self, request, context):
"""Begins a new transaction.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Commit(self, request, context):
"""Commits a transaction, optionally creating, deleting or modifying some
entities.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Rollback(self, request, context):
"""Rolls back a transaction.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def AllocateIds(self, request, context):
"""Allocates IDs for the given keys, which is useful for referencing an entity
before it is inserted.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaDatastoreStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Each RPC normalizes the partition IDs of the keys in its input entities,
and always returns entities with keys with normalized partition IDs.
This applies to all keys and entities, including those in values, except keys
with both an empty path and an empty or unset partition ID. Normalization of
input keys sets the project ID (if not already set) to the project ID from
the request.
"""
def Lookup(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Looks up entities by key.
"""
raise NotImplementedError()
Lookup.future = None
def RunQuery(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Queries for entities.
"""
raise NotImplementedError()
RunQuery.future = None
def BeginTransaction(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Begins a new transaction.
"""
raise NotImplementedError()
BeginTransaction.future = None
def Commit(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Commits a transaction, optionally creating, deleting or modifying some
entities.
"""
raise NotImplementedError()
Commit.future = None
def Rollback(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Rolls back a transaction.
"""
raise NotImplementedError()
Rollback.future = None
def AllocateIds(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Allocates IDs for the given keys, which is useful for referencing an entity
before it is inserted.
"""
raise NotImplementedError()
AllocateIds.future = None
def beta_create_Datastore_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('google.datastore.v1.Datastore', 'AllocateIds'): AllocateIdsRequest.FromString,
('google.datastore.v1.Datastore', 'BeginTransaction'): BeginTransactionRequest.FromString,
('google.datastore.v1.Datastore', 'Commit'): CommitRequest.FromString,
('google.datastore.v1.Datastore', 'Lookup'): LookupRequest.FromString,
('google.datastore.v1.Datastore', 'Rollback'): RollbackRequest.FromString,
('google.datastore.v1.Datastore', 'RunQuery'): RunQueryRequest.FromString,
}
response_serializers = {
('google.datastore.v1.Datastore', 'AllocateIds'): AllocateIdsResponse.SerializeToString,
('google.datastore.v1.Datastore', 'BeginTransaction'): BeginTransactionResponse.SerializeToString,
('google.datastore.v1.Datastore', 'Commit'): CommitResponse.SerializeToString,
('google.datastore.v1.Datastore', 'Lookup'): LookupResponse.SerializeToString,
('google.datastore.v1.Datastore', 'Rollback'): RollbackResponse.SerializeToString,
('google.datastore.v1.Datastore', 'RunQuery'): RunQueryResponse.SerializeToString,
}
method_implementations = {
('google.datastore.v1.Datastore', 'AllocateIds'): face_utilities.unary_unary_inline(servicer.AllocateIds),
('google.datastore.v1.Datastore', 'BeginTransaction'): face_utilities.unary_unary_inline(servicer.BeginTransaction),
('google.datastore.v1.Datastore', 'Commit'): face_utilities.unary_unary_inline(servicer.Commit),
('google.datastore.v1.Datastore', 'Lookup'): face_utilities.unary_unary_inline(servicer.Lookup),
('google.datastore.v1.Datastore', 'Rollback'): face_utilities.unary_unary_inline(servicer.Rollback),
('google.datastore.v1.Datastore', 'RunQuery'): face_utilities.unary_unary_inline(servicer.RunQuery),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Datastore_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('google.datastore.v1.Datastore', 'AllocateIds'): AllocateIdsRequest.SerializeToString,
('google.datastore.v1.Datastore', 'BeginTransaction'): BeginTransactionRequest.SerializeToString,
('google.datastore.v1.Datastore', 'Commit'): CommitRequest.SerializeToString,
('google.datastore.v1.Datastore', 'Lookup'): LookupRequest.SerializeToString,
('google.datastore.v1.Datastore', 'Rollback'): RollbackRequest.SerializeToString,
('google.datastore.v1.Datastore', 'RunQuery'): RunQueryRequest.SerializeToString,
}
response_deserializers = {
('google.datastore.v1.Datastore', 'AllocateIds'): AllocateIdsResponse.FromString,
('google.datastore.v1.Datastore', 'BeginTransaction'): BeginTransactionResponse.FromString,
('google.datastore.v1.Datastore', 'Commit'): CommitResponse.FromString,
('google.datastore.v1.Datastore', 'Lookup'): LookupResponse.FromString,
('google.datastore.v1.Datastore', 'Rollback'): RollbackResponse.FromString,
('google.datastore.v1.Datastore', 'RunQuery'): RunQueryResponse.FromString,
}
cardinalities = {
'AllocateIds': cardinality.Cardinality.UNARY_UNARY,
'BeginTransaction': cardinality.Cardinality.UNARY_UNARY,
'Commit': cardinality.Cardinality.UNARY_UNARY,
'Lookup': cardinality.Cardinality.UNARY_UNARY,
'Rollback': cardinality.Cardinality.UNARY_UNARY,
'RunQuery': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.datastore.v1.Datastore', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.