code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from __future__ import absolute_import
from .adaptation import PulseAudioDriver
import pyglet
_debug = pyglet.options['debug_media']
def create_audio_driver():
driver = PulseAudioDriver()
driver.connect()
if _debug:
driver.dump_debug_info()
return driver
|
nicememory/pie
|
pyglet/pyglet/media/drivers/pulse/__init__.py
|
Python
|
apache-2.0
| 1,997
|
from functools import wraps
from django.utils.decorators import decorator_from_middleware_with_args, available_attrs
from django.utils.cache import patch_cache_control, add_never_cache_headers
from django.middleware.cache import CacheMiddleware
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
get_current_site().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
if len(args) != 1 or callable(args[0]):
raise TypeError("cache_page has a single mandatory positional argument: timeout")
cache_timeout = args[0]
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
if kwargs:
raise TypeError("cache_page has two optional keyword arguments: cache and key_prefix")
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=cache_timeout, cache_alias=cache_alias, key_prefix=key_prefix)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc, assigned=available_attrs(viewfunc))
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
|
912/M-new
|
virtualenvironment/experimental/lib/python2.7/site-packages/django/views/decorators/cache.py
|
Python
|
gpl-2.0
| 2,280
|
# jsonclient.py
# A simple JSONRPC client library, created to work with Go servers
# Written by Stephen Day
# Modified by Bruce Eckel to work with both Python 2 & 3
import json, socket, itertools, time
from datetime import datetime
class JSONClient(object):
def __init__(self, addr, codec=json):
self._socket = socket.create_connection(addr)
self._id_iter = itertools.count()
self._codec = codec
def _message(self, name, *params):
return dict(id=next(self._id_iter),
params=list(params),
method=name)
def call(self, name, *params):
request = self._message(name, *params)
id = request.get('id')
msg = self._codec.dumps(request)
self._socket.sendall(msg.encode())
# This will actually have to loop if resp is bigger
response = self._socket.recv(4096)
response = self._codec.loads(response.decode())
if response.get('id') != id:
raise Exception("expected id=%s, received id=%s: %s"
%(id, response.get('id'),
response.get('error')))
if response.get('error') is not None:
raise Exception(response.get('error'))
return response.get('result')
def close(self):
self._socket.close()
rpc =JSONClient(("127.0.0.1", 2012))
cd = {"ToR":"*voice",
"Category": "call",
"Tenant": "cgrates.org",
"Subject": "1001",
"Destination": "1002",
"TimeStart": "2014-04-03T11:12:23.190554134+02:00",
"TimeEnd": "2014-04-03T11:13:23.190554134+02:00",
"CallDuration": 60000000000,
}
# alternative to the above
#s = socket.create_connection(("127.0.0.1", 2012))
#s.sendall(json.dumps({"id": 1, "method": "Responder.GetCost", "params": [cd]}))
#print(s.recv(4096))
start_time = time.time()
i = 0
runs = 1e4
result = ""
for i in range(int(runs) + 1):
result = rpc.call("Responder.GetCost", cd)
print(i, result)
duration = time.time() - start_time
print("Elapsed: %ds resulted: %d req/s." % (duration, runs/duration))
|
cgrates/cgrates
|
data/tester/cgr-tester.py
|
Python
|
gpl-3.0
| 2,102
|
from apps.donations.models import MonthlyBatch
from django.utils.timezone import now, timedelta
class MonthlyBatchService(object):
def __init__(self, date=None):
batches = MonthlyBatch.objects.order_by('-date')
if batches.count():
last_batch = batches.all()[0]
else:
last_batch = None
if date:
self.batch, created = MonthlyBatch.objects.get_or_create(date=date)
if created:
self.generate_donations()
else:
if last_batch.date > (now() - timedelta(days=10)):
self.batch = last_batch
else:
self.batch = MonthlyBatch.objects.create(date=date)
def generate_donations(self):
self.batch
|
jfterpstra/bluebottle
|
bluebottle/recurring_donations/service.py
|
Python
|
bsd-3-clause
| 759
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Mapper for fusing spatial distance matrix to connectivity-based feature matrix.
Author: kongxiangzheng@gmail.com
Date: 07/23/2012
Editors: [plz add own name after edit here]
"""
__docformat__ = 'restructuredtext'
import numpy as np
from scipy.sparse import issparse
from mvpa2.mappers.base import Mapper
from mvpa2.datasets.base import Dataset
class FuseConstrainMapper(Mapper):
"""Mapper to add spatial distance matrix to connectivity-based feature matrix.
"""
def __init__(self, distmask=None, **kwargs):
"""
parameters
----------
distmask : ndarray-like matrix or sparse matrix, or a dataset.
The distmask of voxels to present their neighbors.
Usually we do not set it.
"""
Mapper.__init__(self, **kwargs)
self.__distmask = distmask
def forward_dataset(self, cc_ds, dist_ds, con=0.2):
# cc = cross-correlation
out_ds = cc_ds.copy(sa=[])
ccmtx = cc_ds.samples
distmtx = dist_ds.samples
if ccmtx.shape != distmtx.shape:
return
mergedmtx = self._tomergemtx(ccmtx, distmtx, con)
if self.__distmask != None:
if isinstance(self.__distmask, Dataset):
distmask = self.__distmask.samples
if sp.issparse(distmask):
distmask = distmask.todense()
elif issparse(self.__distmask):
distmask = distmask.todense()
elif isinstance(self.__distmask, np.ndarray):
distmask = self.__distmask
else:
raise RuntimeError('%distmask should be a matrix or a dataset with a matrix.')
distmtx = distmtx*distmask
out_ds.samples = mergedmtx
return out_ds
def _tomergemtx(self, ccmtx, distmtx, con):
""" Ref.
Rogier B. Mars' Diffusion-Weighted Imaging Tractography-Based Parcellation
"""
distmtx = distmtx.max() - distmtx
ccmtx = ccmtx.max() - ccmtx
mindist = distmtx.min()
maxdist = distmtx.max()
mincc = ccmtx.min()
maxcc = ccmtx.max()
distmtx = ((distmtx-mindist)/(maxdist-mindist))*(maxcc-mincc)+mincc
print 'factor: ', con
ccmtx = np.sqrt((1-con)*np.dot(ccmtx.T,ccmtx)+con*np.dot(distmtx.T,distmtx))
return ccmtx
|
BNUCNL/FreeROI
|
froi/algorithm/unused/fuseconstrainmapper.py
|
Python
|
bsd-3-clause
| 2,646
|
import pytest
from numpy.testing import assert_array_equal
from landlab import RasterModelGrid
from landlab.io.netcdf import from_netcdf, to_netcdf
@pytest.mark.parametrize("include", ((), [], set(), None))
def test_include_keyword_is_empty(tmpdir, format, include):
grid = RasterModelGrid((4, 3), xy_spacing=(2, 5), xy_of_lower_left=(-2.0, 10.0))
grid.add_ones("elev", at="node")
grid.add_zeros("elev", at="link")
grid.add_empty("temp", at="node")
with tmpdir.as_cwd():
to_netcdf(grid, "test.nc", format=format)
actual = from_netcdf("test.nc", include=include)
assert len(actual.at_node) == 0
assert len(actual.at_link) == 0
@pytest.mark.parametrize("include", ("*", ("*",), ("at_node:*", "at_link:*")))
@pytest.mark.parametrize("exclude", (None, ()))
def test_include_everything(tmpdir, format, include, exclude):
grid = RasterModelGrid((4, 3), xy_spacing=(2, 5), xy_of_lower_left=(-2.0, 10.0))
grid.add_ones("elev", at="node")
grid.add_zeros("elev", at="link")
grid.add_empty("temp", at="node")
with tmpdir.as_cwd():
to_netcdf(grid, "test.nc", format=format)
actual = from_netcdf("test.nc", include=include)
assert set(actual.at_node) == set(["elev", "temp"])
assert set(actual.at_link) == set(["elev"])
@pytest.mark.parametrize(
"include,exclude", [(("*", "*")), ((None, None)), (([], None))]
)
def test_exclude_everything(tmpdir, format, include, exclude):
grid = RasterModelGrid((4, 3), xy_spacing=(2, 5), xy_of_lower_left=(-2.0, 10.0))
grid.add_ones("elev", at="node")
grid.add_zeros("elev", at="link")
grid.add_empty("temp", at="node")
with tmpdir.as_cwd():
to_netcdf(grid, "test.nc", format=format)
actual = from_netcdf("test.nc", include=include, exclude=exclude)
assert len(actual.at_node) == 0
assert len(actual.at_link) == 0
@pytest.mark.parametrize(
"grid_type", ["HexModelGrid", "RadialModelGrid", "RasterModelGrid"]
)
def test_from_grid(datadir, grid_type):
grid = from_netcdf(datadir / "test-{0}.nc".format(grid_type))
assert grid.__class__.__name__ == grid_type
assert_array_equal(grid.at_node["elev"], 1.0)
assert_array_equal(grid.at_node["temp"], 1.0)
assert_array_equal(grid.at_link["elev"], 0.0)
|
cmshobe/landlab
|
tests/io/netcdf/test_from_netcdf.py
|
Python
|
mit
| 2,314
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from PyQt4 import QtGui
from PyQt4 import QtCore
from logindialog import login
from exitdialog import exit
from msgdialog import MessageDialog
from msgdialog import msg
from ipaddressdialog import ipaddressinput
from urlinputdialog import urlinput
from numinputdialog import numinput
from confirmdialog import confirm
from confirmdialog import ConfirmDialog
from basedialog import DynamicTextWidget
__version__ = '0.1.0'
__all__ = ['DynamicTextWidget', 'ConfirmDialog', 'MessageDialog','login', 'exit', 'msg', 'ipaddressinput', 'urlinput', 'numinput', 'confirm']
__author__ = 'dragondjf(dragondjf@gmail.com)'
|
jacklee0810/QMarkdowner
|
utildialog/__init__.py
|
Python
|
mit
| 656
|
"""
Support to interface with Sonos players (via SoCo).
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.sonos/
"""
import datetime
import logging
from os import path
import socket
import urllib
import voluptuous as vol
from homeassistant.components.media_player import (
ATTR_MEDIA_ENQUEUE, DOMAIN, MEDIA_TYPE_MUSIC, SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_CLEAR_PLAYLIST,
SUPPORT_SELECT_SOURCE, MediaPlayerDevice)
from homeassistant.const import (
STATE_IDLE, STATE_PAUSED, STATE_PLAYING, STATE_UNKNOWN, STATE_OFF,
ATTR_ENTITY_ID)
from homeassistant.config import load_yaml_config_file
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['SoCo==0.12']
_LOGGER = logging.getLogger(__name__)
# The soco library is excessively chatty when it comes to logging and
# causes a LOT of spam in the logs due to making a http connection to each
# speaker every 10 seconds. Quiet it down a bit to just actual problems.
_SOCO_LOGGER = logging.getLogger('soco')
_SOCO_LOGGER.setLevel(logging.ERROR)
_REQUESTS_LOGGER = logging.getLogger('requests')
_REQUESTS_LOGGER.setLevel(logging.ERROR)
SUPPORT_SONOS = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE |\
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_PLAY_MEDIA |\
SUPPORT_SEEK | SUPPORT_CLEAR_PLAYLIST | SUPPORT_SELECT_SOURCE
SERVICE_GROUP_PLAYERS = 'sonos_group_players'
SERVICE_UNJOIN = 'sonos_unjoin'
SERVICE_SNAPSHOT = 'sonos_snapshot'
SERVICE_RESTORE = 'sonos_restore'
SERVICE_SET_TIMER = 'sonos_set_sleep_timer'
SERVICE_CLEAR_TIMER = 'sonos_clear_sleep_timer'
SUPPORT_SOURCE_LINEIN = 'Line-in'
SUPPORT_SOURCE_TV = 'TV'
# Service call validation schemas
ATTR_SLEEP_TIME = 'sleep_time'
SONOS_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.entity_ids,
})
SONOS_SET_TIMER_SCHEMA = SONOS_SCHEMA.extend({
vol.Required(ATTR_SLEEP_TIME): vol.All(vol.Coerce(int),
vol.Range(min=0, max=86399))
})
# List of devices that have been registered
DEVICES = []
# pylint: disable=unused-argument, too-many-locals
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Sonos platform."""
import soco
global DEVICES
if discovery_info:
player = soco.SoCo(discovery_info)
# if device allready exists by config
if player.uid in DEVICES:
return True
if player.is_visible:
device = SonosDevice(hass, player)
add_devices([device])
if not DEVICES:
register_services(hass)
DEVICES.append(device)
return True
return False
players = None
hosts = config.get('hosts', None)
if hosts:
# Support retro compatibility with comma separated list of hosts
# from config
hosts = hosts.split(',') if isinstance(hosts, str) else hosts
players = []
for host in hosts:
players.append(soco.SoCo(socket.gethostbyname(host)))
if not players:
players = soco.discover(interface_addr=config.get('interface_addr',
None))
if not players:
_LOGGER.warning('No Sonos speakers found.')
return False
DEVICES = [SonosDevice(hass, p) for p in players]
add_devices(DEVICES)
register_services(hass)
_LOGGER.info('Added %s Sonos speakers', len(players))
return True
def register_services(hass):
"""Register all services for sonos devices."""
descriptions = load_yaml_config_file(
path.join(path.dirname(__file__), 'services.yaml'))
hass.services.register(DOMAIN, SERVICE_GROUP_PLAYERS,
_group_players_service,
descriptions.get(SERVICE_GROUP_PLAYERS),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_UNJOIN,
_unjoin_service,
descriptions.get(SERVICE_UNJOIN),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SNAPSHOT,
_snapshot_service,
descriptions.get(SERVICE_SNAPSHOT),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_RESTORE,
_restore_service,
descriptions.get(SERVICE_RESTORE),
schema=SONOS_SCHEMA)
hass.services.register(DOMAIN, SERVICE_SET_TIMER,
_set_sleep_timer_service,
descriptions.get(SERVICE_SET_TIMER),
schema=SONOS_SET_TIMER_SCHEMA)
hass.services.register(DOMAIN, SERVICE_CLEAR_TIMER,
_clear_sleep_timer_service,
descriptions.get(SERVICE_CLEAR_TIMER),
schema=SONOS_SCHEMA)
def _apply_service(service, service_func, *service_func_args):
"""Internal func for applying a service."""
entity_ids = service.data.get('entity_id')
if entity_ids:
_devices = [device for device in DEVICES
if device.entity_id in entity_ids]
else:
_devices = DEVICES
for device in _devices:
service_func(device, *service_func_args)
device.update_ha_state(True)
def _group_players_service(service):
"""Group media players, use player as coordinator."""
_apply_service(service, SonosDevice.group_players)
def _unjoin_service(service):
"""Unjoin the player from a group."""
_apply_service(service, SonosDevice.unjoin)
def _snapshot_service(service):
"""Take a snapshot."""
_apply_service(service, SonosDevice.snapshot)
def _restore_service(service):
"""Restore a snapshot."""
_apply_service(service, SonosDevice.restore)
def _set_sleep_timer_service(service):
"""Set a timer."""
_apply_service(service,
SonosDevice.set_sleep_timer,
service.data[ATTR_SLEEP_TIME])
def _clear_sleep_timer_service(service):
"""Set a timer."""
_apply_service(service,
SonosDevice.clear_sleep_timer)
def only_if_coordinator(func):
"""Decorator for coordinator.
If used as decorator, avoid calling the decorated method if player is not
a coordinator. If not, a grouped speaker (not in coordinator role) will
throw soco.exceptions.SoCoSlaveException.
Also, partially catch exceptions like:
soco.exceptions.SoCoUPnPException: UPnP Error 701 received:
Transition not available from <player ip address>
"""
def wrapper(*args, **kwargs):
"""Decorator wrapper."""
if args[0].is_coordinator:
from soco.exceptions import SoCoUPnPException
try:
func(*args, **kwargs)
except SoCoUPnPException:
_LOGGER.error('command "%s" for Sonos device "%s" '
'not available in this mode',
func.__name__, args[0].name)
else:
_LOGGER.debug('Ignore command "%s" for Sonos device "%s" (%s)',
func.__name__, args[0].name, 'not coordinator')
return wrapper
# pylint: disable=too-many-instance-attributes, too-many-public-methods
# pylint: disable=abstract-method
class SonosDevice(MediaPlayerDevice):
"""Representation of a Sonos device."""
# pylint: disable=too-many-arguments
def __init__(self, hass, player):
"""Initialize the Sonos device."""
from soco.snapshot import Snapshot
self.hass = hass
self.volume_increment = 5
self._player = player
self._speaker_info = None
self._name = None
self._coordinator = None
self._media_content_id = None
self._media_duration = None
self._media_image_url = None
self._media_artist = None
self._media_album_name = None
self._media_title = None
self.update()
self.soco_snapshot = Snapshot(self._player)
@property
def should_poll(self):
"""Polling needed."""
return True
def update_sonos(self, now):
"""Update state, called by track_utc_time_change."""
self.update_ha_state(True)
@property
def unique_id(self):
"""Return an unique ID."""
return self._player.uid
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._status == 'PAUSED_PLAYBACK':
return STATE_PAUSED
if self._status == 'PLAYING':
return STATE_PLAYING
if self._status == 'STOPPED':
return STATE_IDLE
if self._status == 'OFF':
return STATE_OFF
return STATE_UNKNOWN
@property
def is_coordinator(self):
"""Return true if player is a coordinator."""
return self._player.is_coordinator
def update(self):
"""Retrieve latest state."""
self._speaker_info = self._player.get_speaker_info()
self._name = self._speaker_info['zone_name'].replace(
' (R)', '').replace(' (L)', '')
if self.available:
self._status = self._player.get_current_transport_info().get(
'current_transport_state')
trackinfo = self._player.get_current_track_info()
if trackinfo['uri'].startswith('x-rincon:'):
# this speaker is a slave, find the coordinator
# the uri of the track is 'x-rincon:{coordinator-id}'
coordinator_id = trackinfo['uri'][9:]
coordinators = [device for device in DEVICES
if device.unique_id == coordinator_id]
self._coordinator = coordinators[0] if coordinators else None
else:
self._coordinator = None
if not self._coordinator:
mediainfo = self._player.avTransport.GetMediaInfo([
('InstanceID', 0)
])
duration = trackinfo.get('duration', '0:00')
# if the speaker is playing from the "line-in" source, getting
# track metadata can return NOT_IMPLEMENTED, which breaks the
# volume logic below
if duration == 'NOT_IMPLEMENTED':
duration = None
else:
duration = sum(60 ** x[0] * int(x[1]) for x in enumerate(
reversed(duration.split(':'))))
media_image_url = trackinfo.get('album_art', None)
media_artist = trackinfo.get('artist', None)
media_album_name = trackinfo.get('album', None)
media_title = trackinfo.get('title', None)
if media_image_url in ('', 'NOT_IMPLEMENTED', None):
# fallback to asking the speaker directly
media_image_url = \
'http://{host}:{port}/getaa?s=1&u={uri}'.format(
host=self._player.ip_address,
port=1400,
uri=urllib.parse.quote(mediainfo['CurrentURI'])
)
if media_artist in ('', 'NOT_IMPLEMENTED', None):
# if listening to a radio stream the media_artist field
# will be empty and the title field will contain the
# filename that is being streamed
current_uri_metadata = mediainfo["CurrentURIMetaData"]
if current_uri_metadata not in \
('', 'NOT_IMPLEMENTED', None):
# currently soco does not have an API for this
import soco
current_uri_metadata = soco.xml.XML.fromstring(
soco.utils.really_utf8(current_uri_metadata))
md_title = current_uri_metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
if md_title not in ('', 'NOT_IMPLEMENTED', None):
media_artist = ''
media_title = md_title
self._media_content_id = trackinfo.get('title', None)
self._media_duration = duration
self._media_image_url = media_image_url
self._media_artist = media_artist
self._media_album_name = media_album_name
self._media_title = media_title
else:
self._status = 'OFF'
self._coordinator = None
self._media_content_id = None
self._media_duration = None
self._media_image_url = None
self._media_artist = None
self._media_album_name = None
self._media_title = None
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._player.volume / 100.0
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
return self._player.mute
@property
def media_content_id(self):
"""Content ID of current playing media."""
if self._coordinator:
return self._coordinator.media_content_id
else:
return self._media_content_id
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._coordinator:
return self._coordinator.media_duration
else:
return self._media_duration
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._coordinator:
return self._coordinator.media_image_url
else:
return self._media_image_url
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
if self._coordinator:
return self._coordinator.media_artist
else:
return self._media_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
if self._coordinator:
return self._coordinator.media_album_name
else:
return self._media_album_name
@property
def media_title(self):
"""Title of current playing media."""
if self._player.is_playing_line_in:
return SUPPORT_SOURCE_LINEIN
if self._player.is_playing_tv:
return SUPPORT_SOURCE_TV
if self._coordinator:
return self._coordinator.media_title
else:
return self._media_title
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
if not self.source_list:
# some devices do not allow source selection
return SUPPORT_SONOS ^ SUPPORT_SELECT_SOURCE
return SUPPORT_SONOS
def volume_up(self):
"""Volume up media player."""
self._player.volume += self.volume_increment
def volume_down(self):
"""Volume down media player."""
self._player.volume -= self.volume_increment
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._player.volume = str(int(volume * 100))
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self._player.mute = mute
def select_source(self, source):
"""Select input source."""
if source == SUPPORT_SOURCE_LINEIN:
self._player.switch_to_line_in()
elif source == SUPPORT_SOURCE_TV:
self._player.switch_to_tv()
@property
def source_list(self):
"""List of available input sources."""
model_name = self._speaker_info['model_name']
if 'PLAY:5' in model_name:
return [SUPPORT_SOURCE_LINEIN]
elif 'PLAYBAR' in model_name:
return [SUPPORT_SOURCE_LINEIN, SUPPORT_SOURCE_TV]
@property
def source(self):
"""Name of the current input source."""
if self._player.is_playing_line_in:
return SUPPORT_SOURCE_LINEIN
if self._player.is_playing_tv:
return SUPPORT_SOURCE_TV
return None
@only_if_coordinator
def turn_off(self):
"""Turn off media player."""
self._player.pause()
def media_play(self):
"""Send play command."""
if self._coordinator:
self._coordinator.media_play()
else:
self._player.play()
def media_pause(self):
"""Send pause command."""
if self._coordinator:
self._coordinator.media_pause()
else:
self._player.pause()
def media_next_track(self):
"""Send next track command."""
if self._coordinator:
self._coordinator.media_next_track()
else:
self._player.next()
def media_previous_track(self):
"""Send next track command."""
if self._coordinator:
self._coordinator.media_previous_track()
else:
self._player.previous()
def media_seek(self, position):
"""Send seek command."""
if self._coordinator:
self._coordinator.media_seek(position)
else:
self._player.seek(str(datetime.timedelta(seconds=int(position))))
def clear_playlist(self):
"""Clear players playlist."""
if self._coordinator:
self._coordinator.clear_playlist()
else:
self._player.clear_queue()
@only_if_coordinator
def turn_on(self):
"""Turn the media player on."""
self._player.play()
def play_media(self, media_type, media_id, **kwargs):
"""
Send the play_media command to the media player.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the queue.
"""
if self._coordinator:
self._coordinator.play_media(media_type, media_id, **kwargs)
else:
if kwargs.get(ATTR_MEDIA_ENQUEUE):
from soco.exceptions import SoCoUPnPException
try:
self._player.add_uri_to_queue(media_id)
except SoCoUPnPException:
_LOGGER.error('Error parsing media uri "%s", '
"please check it's a valid media resource "
'supported by Sonos', media_id)
else:
self._player.play_uri(media_id)
def group_players(self):
"""Group all players under this coordinator."""
if self._coordinator:
self._coordinator.group_players()
else:
self._player.partymode()
@only_if_coordinator
def unjoin(self):
"""Unjoin the player from a group."""
self._player.unjoin()
@only_if_coordinator
def snapshot(self):
"""Snapshot the player."""
self.soco_snapshot.snapshot()
@only_if_coordinator
def restore(self):
"""Restore snapshot for the player."""
self.soco_snapshot.restore(True)
@only_if_coordinator
def set_sleep_timer(self, sleep_time):
"""Set the timer on the player."""
self._player.set_sleep_timer(sleep_time)
@only_if_coordinator
def clear_sleep_timer(self):
"""Clear the timer on the player."""
self._player.set_sleep_timer(None)
@property
def available(self):
"""Return True if player is reachable, False otherwise."""
try:
sock = socket.create_connection(
address=(self._player.ip_address, 1443),
timeout=3)
sock.close()
return True
except socket.error:
return False
|
betrisey/home-assistant
|
homeassistant/components/media_player/sonos.py
|
Python
|
mit
| 20,355
|
"""initial migration
Revision ID: 3277cb11e991
Revises: None
Create Date: 2015-05-10 08:39:17.826382
"""
# revision identifiers, used by Alembic.
revision = '3277cb11e991'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_posts_timestamp'), 'posts', ['timestamp'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_posts_timestamp'), table_name='posts')
op.drop_table('posts')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
### end Alembic commands ###
|
Pritesh242/python
|
neo1218/0023/web/migrations/versions/3277cb11e991_initial_migration.py
|
Python
|
mit
| 1,626
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'textthem.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
dkua/textthem
|
textthem/urls.py
|
Python
|
mit
| 299
|
from django.apps import AppConfig
class LocalidadesConfig(AppConfig):
name = 'localidades'
|
rafaelferrero/sigcaw
|
localidades/apps.py
|
Python
|
gpl-3.0
| 97
|
#!/usr/bin/python
def app(environ, start_response):
request = environ['QUERY_STRING']
start_response("200 OK", [
("Content-Type", "text/plain"),
])
return [request.replace('&','\n') ]
|
smartybit/stepic_webtech1
|
web/hello.py
|
Python
|
mit
| 221
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
# Snapshot configured backups
# Meant to be run once/day
# Each run creates a snapshot of indexname-epochtimestamp
# .conf file will determine what indexes are operated on
# Create a starter .conf file with backupDiscover.py
# You must create the s3 bucket (options.aws_bucket) first paying attention to
# the region assigned to the bucket.
# Snapshots will be placed in:
# options.aws_bucket/elasticsearch/YYYY-MM/servername/indices/indexname
import sys
import os
from datetime import datetime
from datetime import timedelta
from datetime import date
from configlib import getConfig, OptionParser
import calendar
import socket
import boto
import boto.s3
import requests
import json
from os.path import expanduser
from mozdef_util.utilities.logger import logger
def main():
logger.debug('started')
try:
esserver = options.esservers[0]
s3 = boto.connect_s3(
aws_access_key_id=options.aws_access_key_id,
aws_secret_access_key=options.aws_secret_access_key
)
idate = date.strftime(datetime.utcnow() - timedelta(days=1), '%Y%m%d')
bucketdate = date.strftime(datetime.utcnow() - timedelta(days=1), '%Y-%m')
hostname = socket.gethostname()
# Create or update snapshot configuration
logger.debug('Configuring snapshot repository')
snapshot_config = {
"type": "s3",
"settings": {
"bucket": options.aws_bucket,
"base_path": "elasticsearch/{0}/{1}".format(bucketdate, hostname),
"region": "{0}".format(options.aws_region)
}
}
r = requests.put('%s/_snapshot/s3backup' % esserver, data=json.dumps(snapshot_config))
if 'status' in r.json():
logger.error("Error while registering snapshot repo: %s" % r.text)
else:
logger.debug('snapshot repo registered')
# do the actual snapshotting
for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
if dobackup == '1':
index_to_snapshot = index
if rotation == 'daily':
index_to_snapshot += '-%s' % idate
elif rotation == 'monthly':
index_to_snapshot += '-%s' % idate[:6]
logger.debug('Creating %s snapshot (this may take a while)...' % index_to_snapshot)
snapshot_config = {
'indices': index_to_snapshot
}
epoch = calendar.timegm(datetime.utcnow().utctimetuple())
r = requests.put(
'{0}/_snapshot/s3backup/{1}-{2}?wait_for_completion=true'.format(esserver, index_to_snapshot, epoch),
data=json.dumps(snapshot_config)
)
if 'status' in r.json():
logger.error('Error snapshotting %s: %s' % (index_to_snapshot, r.json()))
else:
logger.debug('snapshot %s finished' % index_to_snapshot)
# create a restore script
# referencing the latest snapshot
localpath = '%s/%s-restore.sh' % (expanduser("~"), index)
with open(localpath, 'w') as f:
logger.debug('Writing %s' % localpath)
f.write("""
#!/bin/bash
echo -n "Restoring the snapshot..."
curl -s -XPOST "%s/_snapshot/s3backup/%s-%s/_restore?wait_for_completion=true"
echo "DONE!"
""" % (esserver, index_to_snapshot, epoch))
# upload the restore script
bucket = s3.get_bucket(options.aws_bucket)
key = bucket.new_key('elasticsearch/%s/%s/%s-%s-%s-restore.sh' % (
bucketdate, hostname, index, idate, epoch))
key.set_contents_from_filename(localpath)
# removing local file
os.remove(localpath)
except boto.exception.NoAuthHandlerFound:
logger.error("No auth handler found, check your credentials")
except Exception as e:
logger.error("Unhandled exception, terminating: %r" % e)
def initConfig():
# output our log to stdout or syslog
options.output = getConfig(
'output',
'stdout',
options.configfile
)
# syslog hostname
options.sysloghostname = getConfig(
'sysloghostname',
'localhost',
options.configfile
)
options.syslogport = getConfig(
'syslogport',
514,
options.configfile
)
options.esservers = list(getConfig(
'esservers',
'http://localhost:9200',
options.configfile).split(',')
)
options.indices = list(getConfig(
'backup_indices',
'events,alerts,.kibana',
options.configfile).split(',')
)
options.dobackup = list(getConfig(
'backup_dobackup',
'1,1,1',
options.configfile).split(',')
)
options.rotation = list(getConfig(
'backup_rotation',
'daily,monthly,none',
options.configfile).split(',')
)
options.pruning = list(getConfig(
'backup_pruning',
'20,0,0',
options.configfile).split(',')
)
# aws credentials to use to send files to s3
options.aws_access_key_id = getConfig(
'aws_access_key_id',
'',
options.configfile
)
options.aws_secret_access_key = getConfig(
'aws_secret_access_key',
'',
options.configfile
)
options.aws_region = getConfig(
'aws_region',
'us-west-1',
options.configfile
)
options.aws_bucket = getConfig(
'aws_bucket',
'',
options.configfile
)
if __name__ == '__main__':
parser = OptionParser()
defaultconfigfile = sys.argv[0].replace('.py', '.conf')
parser.add_option("-c",
dest='configfile',
default=defaultconfigfile,
help="configuration file to use")
(options, args) = parser.parse_args()
initConfig()
main()
|
Phrozyn/MozDef
|
cron/backupSnapshot.py
|
Python
|
mpl-2.0
| 6,390
|
__author__ = 'renhao.cui'
import utilities
import operator
def normalization(inputList):
outputList = []
total = sum(inputList)
for score in inputList:
outputList.append(score/total)
return outputList
def mappingTrainer(labelSet1, labelSet2, limit):
set2 = []
for sets in labelSet2:
seg = sets.split(' ')
set2.append(seg)
corpus1 = [] #alchemy
corpus2 = [] #keyword
trainSet1 = []
trainSet2 = []
for topics in labelSet1:
trainSet1.append(topics)
for topic in topics:
if topic not in corpus1:
corpus1.append(topic)
for topics in set2:
trainSet2.append(topics)
for topic in topics:
if topic not in corpus2:
corpus2.append(topic)
(confScore, model, cand) = utilities.generateModel(corpus1, corpus2, trainSet1, trainSet2)
outputModel = utilities.filterModel(confScore, limit)
return outputModel, cand
def mappingTrainer2(labelSet1, labelSet2, limit):
set2 = []
for sets in labelSet2:
seg = sets.split(' ')
set2.append(seg)
corpus1 = [] # from domain
corpus2 = [] # to domain
trainSet1 = []
trainSet2 = []
for topics in labelSet1:
trainSet1.append(topics)
for topic in topics:
if topic not in corpus1:
corpus1.append(topic)
for topics in set2:
trainSet2.append(topics)
for topic in topics:
if topic not in corpus2:
corpus2.append(topic)
(confScore, model, cand, candProb) = utilities.generateModel(corpus1, corpus2, trainSet1, trainSet2)
outputModel = utilities.filterModel(confScore, limit)
return model, cand, candProb
def mappingTrainer3(labelSet1, labelSet2):
set2 = []
for sets in labelSet2:
seg = sets.split(' ')
set2.append(seg)
corpus1 = [] # from domain
corpus2 = [] # to domain
trainSet1 = []
trainSet2 = []
for topics in labelSet1:
topic = sorted(topics.items(), key=operator.itemgetter(1))[2][0]
topicList = [topic]
trainSet1.append(topicList)
for topic in topicList:
if topic not in corpus1:
corpus1.append(topic)
for topics in set2:
trainSet2.append(topics)
for topic in topics:
if topic not in corpus2:
corpus2.append(topic)
(confScore, model, cand, candProb) = utilities.generateModel2(corpus1, corpus2, trainSet1, trainSet2)
return model, cand, candProb
def mappingTrainer4(alchemyLabels, keywordLabels):
alchemyCorpus = [] # from domain
keywordCorpus = [] # to domain
alchemySet = []
keywordSet = []
for label in alchemyLabels:
tempList = []
for (topic, prob) in label.items():
if topic not in alchemyCorpus:
alchemyCorpus.append(topic)
tempList.append(prob)
normList = normalization(tempList)
tempDict = {}
for index, (topic, prob) in enumerate(label.items()):
tempDict[topic] = normList[index]
alchemySet.append(tempDict)
for label in keywordLabels:
if label not in keywordCorpus:
keywordCorpus.append(label)
keywordSet.append({label: 1.0})
if len(keywordSet) != len(alchemySet):
print 'Error in data size!'
(model, cand, candProb) = utilities.generateModel3(alchemyCorpus, keywordCorpus, alchemySet, keywordSet)
return model, cand, candProb
def mappingInfer(model, cand, testSet, testList):
predictions = utilities.outputMappingResult(model, cand, testSet, testList, 3)
return predictions
def mappingInfer2(model, cand, testSet, testList):
predictions = utilities.outputMappingResult2(model, cand, testSet, testList)
return predictions
def mappingInfer3(model, cand, candProb, testSet):
predictions = utilities.outputMappingResult3(model, cand, candProb, testSet, 3)
return predictions
|
renhaocui/ensembleTopic
|
combinedMapping.py
|
Python
|
mit
| 3,997
|
from functools import cmp_to_key
class Solution:
def largestNumber(self, nums: List[int]) -> str:
def cmp(s1, s2):
if s1 + s2 > s2 + s1:
return -1
elif s1 + s2 < s2 + s1:
return 1
else:
return 0
result = ''.join(sorted(map(str, nums), key=cmp_to_key(cmp)))
return '0' if result[0] == '0' else result
class Solution2:
def largestNumber(self, nums: List[int]) -> str:
class largerNumKey(str):
def __lt__(x, y):
return x + y > y + x
result = ''.join(sorted(map(str, nums), key=largerNumKey))
return '0' if result[0] == '0' else result
|
jiadaizhao/LeetCode
|
0101-0200/0179-Largest Number/0179-Largest Number.py
|
Python
|
mit
| 727
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-08 18:31
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GeoKitTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.SlugField(max_length=250, unique=True)),
('description', models.TextField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('field_names', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), null=True, size=None)),
],
),
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(null=True)),
('properties', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
('table', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='geokit_tables.GeoKitTable')),
],
),
]
|
Applied-GeoSolutions/geokit
|
geokit_tables/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 1,514
|
# encoding: utf-8
def word_calc(string):
numbers = {
'один': '1', 'два': '2', 'три': '3',
'четыре': '4', 'пять': '5', 'шесть': '6',
'семь': '7', 'восемь': '8', 'девять': '9',
'ноль': '0', 'плюс': '+', 'минус': '-',
'умножить': '*', 'разделить': '/', 'и': '.',
}
result = ''
string = string.lower().strip().replace(' на', '')
string = string.split(' ')[2:]
for num in string:
result += numbers[num]
return result
if __name__ == '__main__':
if 'сколько будет четыре и пять умножить на шесть и два'.startswith('сколько будет'):
print(word_calc('сколько будет четыре и пять умножить на шесть и два'))
|
mightydok/moscowpy3
|
homework1/word_calc.py
|
Python
|
gpl-3.0
| 840
|
import os
import uuid
import weakref
import collections
import functools
import numba
from numba.core import types, errors, utils, config
# Exported symbols
from numba.core.typing.typeof import typeof_impl # noqa: F401
from numba.core.typing.asnumbatype import as_numba_type # noqa: F401
from numba.core.typing.templates import infer, infer_getattr # noqa: F401
from numba.core.imputils import ( # noqa: F401
lower_builtin, lower_getattr, lower_getattr_generic, # noqa: F401
lower_setattr, lower_setattr_generic, lower_cast) # noqa: F401
from numba.core.datamodel import models # noqa: F401
from numba.core.datamodel import register_default as register_model # noqa: F401, E501
from numba.core.pythonapi import box, unbox, reflect, NativeValue # noqa: F401
from numba._helperlib import _import_cython_function # noqa: F401
from numba.core.serialize import ReduceMixin
def type_callable(func):
"""
Decorate a function as implementing typing for the callable *func*.
*func* can be a callable object (probably a global) or a string
denoting a built-in operation (such 'getitem' or '__array_wrap__')
"""
from numba.core.typing.templates import (CallableTemplate, infer,
infer_global)
if not callable(func) and not isinstance(func, str):
raise TypeError("`func` should be a function or string")
try:
func_name = func.__name__
except AttributeError:
func_name = str(func)
def decorate(typing_func):
def generic(self):
return typing_func(self.context)
name = "%s_CallableTemplate" % (func_name,)
bases = (CallableTemplate,)
class_dict = dict(key=func, generic=generic)
template = type(name, bases, class_dict)
infer(template)
if callable(func):
infer_global(func, types.Function(template))
return typing_func
return decorate
# By default, an *overload* does not have a cpython wrapper because it is not
# callable from python.
_overload_default_jit_options = {'no_cpython_wrapper': True}
def overload(func, jit_options={}, strict=True, inline='never',
prefer_literal=False, **kwargs):
"""
A decorator marking the decorated function as typing and implementing
*func* in nopython mode.
The decorated function will have the same formal parameters as *func*
and be passed the Numba types of those parameters. It should return
a function implementing *func* for the given types.
Here is an example implementing len() for tuple types::
@overload(len)
def tuple_len(seq):
if isinstance(seq, types.BaseTuple):
n = len(seq)
def len_impl(seq):
return n
return len_impl
Compiler options can be passed as an dictionary using the **jit_options**
argument.
Overloading strictness (that the typing and implementing signatures match)
is enforced by the **strict** keyword argument, it is recommended that this
is set to True (default).
To handle a function that accepts imprecise types, an overload
definition can return 2-tuple of ``(signature, impl_function)``, where
the ``signature`` is a ``typing.Signature`` specifying the precise
signature to be used; and ``impl_function`` is the same implementation
function as in the simple case.
If the kwarg inline determines whether the overload is inlined in the
calling function and can be one of three values:
* 'never' (default) - the overload is never inlined.
* 'always' - the overload is always inlined.
* a function that takes two arguments, both of which are instances of a
namedtuple with fields:
* func_ir
* typemap
* calltypes
* signature
The first argument holds the information from the caller, the second
holds the information from the callee. The function should return Truthy
to determine whether to inline, this essentially permitting custom
inlining rules (typical use might be cost models).
The *prefer_literal* option allows users to control if literal types should
be tried first or last. The default (`False`) is to use non-literal types.
Implementations that can specialize based on literal values should set the
option to `True`. Note, this option maybe expanded in the near future to
allow for more control (e.g. disabling non-literal types).
**kwargs prescribes additional arguments passed through to the overload
template. The only accepted key at present is 'target' which is a string
corresponding to the target that this overload should be bound against.
"""
from numba.core.typing.templates import make_overload_template, infer_global
# set default options
opts = _overload_default_jit_options.copy()
opts.update(jit_options) # let user options override
# TODO: abort now if the kwarg 'target' relates to an unregistered target,
# this requires sorting out the circular imports first.
def decorate(overload_func):
template = make_overload_template(func, overload_func, opts, strict,
inline, prefer_literal, **kwargs)
infer(template)
if callable(func):
infer_global(func, types.Function(template))
return overload_func
return decorate
def register_jitable(*args, **kwargs):
"""
Register a regular python function that can be executed by the python
interpreter and can be compiled into a nopython function when referenced
by other jit'ed functions. Can be used as::
@register_jitable
def foo(x, y):
return x + y
Or, with compiler options::
@register_jitable(_nrt=False) # disable runtime allocation
def foo(x, y):
return x + y
"""
def wrap(fn):
# It is just a wrapper for @overload
inline = kwargs.pop('inline', 'never')
@overload(fn, jit_options=kwargs, inline=inline, strict=False)
def ov_wrap(*args, **kwargs):
return fn
return fn
if kwargs:
return wrap
else:
return wrap(*args)
def overload_attribute(typ, attr, **kwargs):
"""
A decorator marking the decorated function as typing and implementing
attribute *attr* for the given Numba type in nopython mode.
*kwargs* are passed to the underlying `@overload` call.
Here is an example implementing .nbytes for array types::
@overload_attribute(types.Array, 'nbytes')
def array_nbytes(arr):
def get(arr):
return arr.size * arr.itemsize
return get
"""
# TODO implement setters
from numba.core.typing.templates import make_overload_attribute_template
def decorate(overload_func):
template = make_overload_attribute_template(
typ, attr, overload_func,
inline=kwargs.get('inline', 'never'),
)
infer_getattr(template)
overload(overload_func, **kwargs)(overload_func)
return overload_func
return decorate
def _overload_method_common(typ, attr, **kwargs):
"""Common code for overload_method and overload_classmethod
"""
from numba.core.typing.templates import make_overload_method_template
def decorate(overload_func):
copied_kwargs = kwargs.copy() # avoid mutating parent dict
template = make_overload_method_template(
typ, attr, overload_func,
inline=copied_kwargs.pop('inline', 'never'),
prefer_literal=copied_kwargs.pop('prefer_literal', False),
**copied_kwargs,
)
infer_getattr(template)
overload(overload_func, **kwargs)(overload_func)
return overload_func
return decorate
def overload_method(typ, attr, **kwargs):
"""
A decorator marking the decorated function as typing and implementing
method *attr* for the given Numba type in nopython mode.
*kwargs* are passed to the underlying `@overload` call.
Here is an example implementing .take() for array types::
@overload_method(types.Array, 'take')
def array_take(arr, indices):
if isinstance(indices, types.Array):
def take_impl(arr, indices):
n = indices.shape[0]
res = np.empty(n, arr.dtype)
for i in range(n):
res[i] = arr[indices[i]]
return res
return take_impl
"""
return _overload_method_common(typ, attr, **kwargs)
def overload_classmethod(typ, attr, **kwargs):
"""
A decorator marking the decorated function as typing and implementing
classmethod *attr* for the given Numba type in nopython mode.
Similar to ``overload_method``.
Here is an example implementing a classmethod on the Array type to call
``np.arange()``::
@overload_classmethod(types.Array, "make")
def ov_make(cls, nitems):
def impl(cls, nitems):
return np.arange(nitems)
return impl
The above code will allow the following to work in jit-compiled code::
@njit
def foo(n):
return types.Array.make(n)
"""
return _overload_method_common(types.TypeRef(typ), attr, **kwargs)
def make_attribute_wrapper(typeclass, struct_attr, python_attr):
"""
Make an automatic attribute wrapper exposing member named *struct_attr*
as a read-only attribute named *python_attr*.
The given *typeclass*'s model must be a StructModel subclass.
"""
from numba.core.typing.templates import AttributeTemplate
from numba.core.datamodel import default_manager
from numba.core.datamodel.models import StructModel
from numba.core.imputils import impl_ret_borrowed
from numba.core import cgutils
if not isinstance(typeclass, type) or not issubclass(typeclass, types.Type):
raise TypeError("typeclass should be a Type subclass, got %s"
% (typeclass,))
def get_attr_fe_type(typ):
"""
Get the Numba type of member *struct_attr* in *typ*.
"""
model = default_manager.lookup(typ)
if not isinstance(model, StructModel):
raise TypeError("make_struct_attribute_wrapper() needs a type "
"with a StructModel, but got %s" % (model,))
return model.get_member_fe_type(struct_attr)
@infer_getattr
class StructAttribute(AttributeTemplate):
key = typeclass
def generic_resolve(self, typ, attr):
if attr == python_attr:
return get_attr_fe_type(typ)
@lower_getattr(typeclass, python_attr)
def struct_getattr_impl(context, builder, typ, val):
val = cgutils.create_struct_proxy(typ)(context, builder, value=val)
attrty = get_attr_fe_type(typ)
attrval = getattr(val, struct_attr)
return impl_ret_borrowed(context, builder, attrty, attrval)
class _Intrinsic(ReduceMixin):
"""
Dummy callable for intrinsic
"""
_memo = weakref.WeakValueDictionary()
# hold refs to last N functions deserialized, retaining them in _memo
# regardless of whether there is another reference
_recent = collections.deque(maxlen=config.FUNCTION_CACHE_SIZE)
__uuid = None
def __init__(self, name, defn, **kwargs):
self._ctor_kwargs = kwargs
self._name = name
self._defn = defn
functools.update_wrapper(self, defn)
@property
def _uuid(self):
"""
An instance-specific UUID, to avoid multiple deserializations of
a given instance.
Note this is lazily-generated, for performance reasons.
"""
u = self.__uuid
if u is None:
u = str(uuid.uuid1())
self._set_uuid(u)
return u
def _set_uuid(self, u):
assert self.__uuid is None
self.__uuid = u
self._memo[u] = self
self._recent.append(self)
def _register(self):
# _ctor_kwargs
from numba.core.typing.templates import (make_intrinsic_template,
infer_global)
template = make_intrinsic_template(self, self._defn, self._name,
self._ctor_kwargs)
infer(template)
infer_global(self, types.Function(template))
def __call__(self, *args, **kwargs):
"""
This is only defined to pretend to be a callable from CPython.
"""
msg = '{0} is not usable in pure-python'.format(self)
raise NotImplementedError(msg)
def __repr__(self):
return "<intrinsic {0}>".format(self._name)
def __deepcopy__(self, memo):
# NOTE: Intrinsic are immutable and we don't need to copy.
# This is triggered from deepcopy of statements.
return self
def _reduce_states(self):
"""
NOTE: part of ReduceMixin protocol
"""
return dict(uuid=self._uuid, name=self._name, defn=self._defn)
@classmethod
def _rebuild(cls, uuid, name, defn):
"""
NOTE: part of ReduceMixin protocol
"""
try:
return cls._memo[uuid]
except KeyError:
llc = cls(name=name, defn=defn)
llc._register()
llc._set_uuid(uuid)
return llc
def intrinsic(*args, **kwargs):
"""
A decorator marking the decorated function as typing and implementing
*func* in nopython mode using the llvmlite IRBuilder API. This is an escape
hatch for expert users to build custom LLVM IR that will be inlined to
the caller.
The first argument to *func* is the typing context. The rest of the
arguments corresponds to the type of arguments of the decorated function.
These arguments are also used as the formal argument of the decorated
function. If *func* has the signature ``foo(typing_context, arg0, arg1)``,
the decorated function will have the signature ``foo(arg0, arg1)``.
The return values of *func* should be a 2-tuple of expected type signature,
and a code-generation function that will passed to ``lower_builtin``.
For unsupported operation, return None.
Here is an example implementing a ``cast_int_to_byte_ptr`` that cast
any integer to a byte pointer::
@intrinsic
def cast_int_to_byte_ptr(typingctx, src):
# check for accepted types
if isinstance(src, types.Integer):
# create the expected type signature
result_type = types.CPointer(types.uint8)
sig = result_type(types.uintp)
# defines the custom code generation
def codegen(context, builder, signature, args):
# llvm IRBuilder code here
[src] = args
rtype = signature.return_type
llrtype = context.get_value_type(rtype)
return builder.inttoptr(src, llrtype)
return sig, codegen
"""
# Make inner function for the actual work
def _intrinsic(func):
name = getattr(func, '__name__', str(func))
llc = _Intrinsic(name, func, **kwargs)
llc._register()
return llc
if not kwargs:
# No option is given
return _intrinsic(*args)
else:
# options are given, create a new callable to recv the
# definition function
def wrapper(func):
return _intrinsic(func)
return wrapper
def get_cython_function_address(module_name, function_name):
"""
Get the address of a Cython function.
Args
----
module_name:
Name of the Cython module
function_name:
Name of the Cython function
Returns
-------
A Python int containing the address of the function
"""
return _import_cython_function(module_name, function_name)
def include_path():
"""Returns the C include directory path.
"""
include_dir = os.path.dirname(os.path.dirname(numba.__file__))
path = os.path.abspath(include_dir)
return path
def sentry_literal_args(pysig, literal_args, args, kwargs):
"""Ensures that the given argument types (in *args* and *kwargs*) are
literally typed for a function with the python signature *pysig* and the
list of literal argument names in *literal_args*.
Alternatively, this is the same as::
SentryLiteralArgs(literal_args).for_pysig(pysig).bind(*args, **kwargs)
"""
boundargs = pysig.bind(*args, **kwargs)
# Find literal argument positions and whether it is satisfied.
request_pos = set()
missing = False
for i, (k, v) in enumerate(boundargs.arguments.items()):
if k in literal_args:
request_pos.add(i)
if not isinstance(v, types.Literal):
missing = True
if missing:
# Yes, there are missing required literal arguments
e = errors.ForceLiteralArg(request_pos)
# A helper function to fold arguments
def folded(args, kwargs):
out = pysig.bind(*args, **kwargs).arguments.values()
return tuple(out)
raise e.bind_fold_arguments(folded)
class SentryLiteralArgs(collections.namedtuple(
'_SentryLiteralArgs', ['literal_args'])):
"""
Parameters
----------
literal_args : Sequence[str]
A sequence of names for literal arguments
Examples
--------
The following line:
>>> SentryLiteralArgs(literal_args).for_pysig(pysig).bind(*args, **kwargs)
is equivalent to:
>>> sentry_literal_args(pysig, literal_args, args, kwargs)
"""
def for_function(self, func):
"""Bind the sentry to the signature of *func*.
Parameters
----------
func : Function
A python function.
Returns
-------
obj : BoundLiteralArgs
"""
return self.for_pysig(utils.pysignature(func))
def for_pysig(self, pysig):
"""Bind the sentry to the given signature *pysig*.
Parameters
----------
pysig : inspect.Signature
Returns
-------
obj : BoundLiteralArgs
"""
return BoundLiteralArgs(
pysig=pysig,
literal_args=self.literal_args,
)
class BoundLiteralArgs(collections.namedtuple(
'BoundLiteralArgs', ['pysig', 'literal_args'])):
"""
This class is usually created by SentryLiteralArgs.
"""
def bind(self, *args, **kwargs):
"""Bind to argument types.
"""
return sentry_literal_args(
self.pysig,
self.literal_args,
args,
kwargs,
)
def is_jitted(function):
"""Returns True if a function is wrapped by one of the Numba @jit
decorators, for example: numba.jit, numba.njit
The purpose of this function is to provide a means to check if a function is
already JIT decorated.
"""
# don't want to export this so import locally
from numba.core.dispatcher import Dispatcher
return isinstance(function, Dispatcher)
|
cpcloud/numba
|
numba/core/extending.py
|
Python
|
bsd-2-clause
| 19,331
|
# -*- coding: utf-8 -*-
from .slowmatrix import SlowMatrix
from ..matrix import AbstractMatrix
import timeit
import numpy
class FastMatrix(SlowMatrix):
"""
Matrika z množenjem s Strassenovim algoritmom.
"""
def multiply(self, left, right):
"""
V trenutno matriko zapiše produkt podanih matrik.
Množenje izvede s Strassenovim algoritmom.
"""
assert left.ncol() == right.nrow(), \
"Dimenzije matrik ne dopuščajo množenja!"
assert self.nrow() == left.nrow() and right.ncol() == self.ncol(), \
"Dimenzije ciljne matrike ne ustrezajo dimenzijam produkta!"
l = right.ncol() # št.stolpcev prve = št.vrstic druge
n = left.nrow() #št.vrstic prve
m = right.nrow() #št.stolpcev druge
#skica
# m l l
# [ ] [ ] [ ]
# n [ ] * m [ ] = n [ ]
# [ ] [ ] [ ]
n2 = 0
l2 = 0
m2 = 0
while (n2 == 0 or m2 == 0 or l2 == 0): #n2,m2 in l2 predstavljajo največje število, ki je večkratnik 2 in manjše od indeksov n,m in l
if 2**u > n and n2 == 0:
n2 = 2**(u-1)
if 2**u > l and l2 == 0:
l2 = 2**(u-1)
if 2**u > m and m2 == 0:
m2 = 2**(u-1)
u += 1
#print(n2, l2, m2)
if l==1 or n==1 or m==1: #robni pogoj, ko postane eden od parametrov l,m ali n enak 1. V tem primeru uporabimo naivno množenj
C = SlowMatrix.multiply(self,left,right)
return C
else: #primer, kjer nobena od komponent ni enaka 1
C = AbstractMatrix([([0, ] * l), ]*n) #velikost ciljne matrike
print(m2,n2,l2)
A11 = left[0:(n2//2),0:(m2//2)] #razdelitev bločnega dela leve matrike na štiri enako veliko matrike
A12 = left[0:n2//2,(m2//2):m2]
A21 = left[n2//2:n2,0:m2//2]
A22 = left[n2//2:n2,m2//2:m2]
B11 = right[0:(m2//2),0:(l2//2)] #razdelitev bločnega dela desnega matrike na štiri enako veliko matrike
B12 = right[0:m2//2,(l2//2):l2]
B21 = right[m2//2:m2,0:l2//2]
B22 = right[m2//2:m2,l2//2:l2]
#print(AbstractMatrix([([0, ] * (l2//2)), ] * (n2 // 2)))
M1 = FastMatrix.multiply(AbstractMatrix([([0, ] * (l2//2)), ] * (n2 // 2)),(A11 + A22),(B11 + B22)) #rekurzivni klici za množenje posameznih blokov
M2 = FastMatrix.multiply(AbstractMatrix([([0, ] * (l2//2)), ] * (n2 // 2)),(A21 + A22),B11)
M3 = FastMatrix.multiply(AbstractMatrix([([0, ] * (l2//2)), ] * (n2 // 2)),A11,(B12-B22))
M4 = FastMatrix.multiply(AbstractMatrix([([0, ] * (l2//2)), ] * (n2 // 2)),A22,(B21-B11))
M5 = FastMatrix.multiply(AbstractMatrix([([0, ] * (l2//2)), ] * (n2 // 2)),(A11 + A12),B22)
M6 = FastMatrix.multiply(AbstractMatrix([([0, ] * (l2//2)), ] * (n2 // 2)),(A21 - A11),(B11 + B12))
M7 = FastMatrix.multiply(AbstractMatrix([([0, ] * (l2//2)), ] * (n2 // 2)),(A12 - A22),(B21 + B22))
C11 = M1 + M4 - M5 + M7 #komponente C, dobljene iz sesštevanja M-jev
C12 = M3 + M5
C21 = M2 + M4
C22 = M1 - M2 + M3 + M6
C[0:n2 // 2, 0:l2 // 2]= C11[0:n2//2,0:l2//2] #združitev komponent v C
C[0:n2//2,(l2//2):l2] = C12[0:n2//2,0:l2//2]
C[n2//2:n2,0:l2//2] = C21[0:n2//2,0:l2//2]
C[n2//2:n2,l2//2:l2] = C22[0:n2//2,0:l2//2]
#odvečni deli, ki niso deli bloka
#print(n, m,l)
for n1 in range(n):
for l1 in range(l):
for m1 in range(m):
#print(n1,n-n2,"nji",m1, m-m2,"mji", l1, l - l2, "lji")
if not (n1 < n2 and m1 < m2 and l1 < l2):
C[n1, l1] += left[n1, m1] * right[m1, l1]
return C
# A = AbstractMatrix([[1, 2, 3, 4],
# [5, 6, 7, 8],
# [9, 10, 11, 12],
# [13, 14, 15, 16],
# [3,3,3,3]])
# B = AbstractMatrix([[1, 2, 3, 4],
# [5, 6, 7, 8],
# [9, 10, 11, 12],
# [13, 14, 15, 16]])
# F = AbstractMatrix([[0, 0, 0, 0],
# [0, 0, 0, 0],
# [0, 0, 0, 0],
# [0, 0, 0, 0],
# [0,0,0,0]])
# #print(SlowMatrix.multiply(F,A,B))
# #print(FastMatrix.multiply(F,A,B))
#
# T = AbstractMatrix([[1,2,1,0,0,3,3,3,3,3,5],
# [2,1,0,1,0,4,5,6,7,8,8],
# [1,2,5,3,3,5,5,6,2,7,7]])
# S = AbstractMatrix([[2,8,2,0,0,1,2],
# [1,3,0,1,8,5,2],
# [0,2,0,1,0,1,2],
# [2,1,2,1,2,1,2],
# [3,4,6,2,3,1,3],
# [2, 1, 2, 1, 2, 1, 2],
# [2, 1, 2, 1, 2, 1, 2],
# [2, 1, 2, 1, 2, 1, 2],
# [2, 1, 2, 1, 2, 1, 2],
# [2, 1, 2, 1, 2, 1, 2],
# [2, 1, 2, 1, 2, 1, 2],])
# U = AbstractMatrix([([0, ] * 7), ] * 3)
#
# #print(SlowMatrix.multiply(U,T,S))
# #print(FastMatrix.multiply(U,T,S))
|
markun9/PSA1
|
naloge/2016/dn1/matrix/JureMarkun/fastmatrix.py
|
Python
|
mit
| 5,195
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.modules import (
TransformerSentenceEncoderLayer
)
from fairseq.model_parallel.modules import ModelParallelMultiheadAttention
try:
from fairseq.model_parallel.megatron.mpu import (
ColumnParallelLinear,
RowParallelLinear,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
class ModelParallelTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer):
"""
Implements a Model Parallel Transformer Encoder Layer used in
BERT/XLM style pre-trained models.
"""
def build_fc1(self, input_dim, output_dim, **unused):
return ColumnParallelLinear(input_dim, output_dim, gather_output=False)
def build_fc2(self, input_dim, output_dim, **unused):
return RowParallelLinear(input_dim, output_dim, input_is_parallel=True)
def build_self_attention(
self,
embed_dim,
num_attention_heads,
dropout,
**kwargs,
):
return ModelParallelMultiheadAttention(
embed_dim,
num_attention_heads,
dropout=dropout,
self_attention=True
)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
return x, None
|
hfp/libxsmm
|
samples/deeplearning/sparse_training/fairseq/fairseq/model_parallel/modules/transformer_sentence_encoder_layer.py
|
Python
|
bsd-3-clause
| 2,446
|
"""
Tests for DatetimeArray
"""
import operator
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas.core.arrays import DatetimeArray
from pandas.core.arrays.datetimes import sequence_to_dt64ns
import pandas.util.testing as tm
class TestDatetimeArrayConstructor:
def test_only_1dim_accepted(self):
arr = np.array([0, 1, 2, 3], dtype="M8[h]").astype("M8[ns]")
with pytest.raises(ValueError, match="Only 1-dimensional"):
# 2-dim
DatetimeArray(arr.reshape(2, 2))
with pytest.raises(ValueError, match="Only 1-dimensional"):
# 0-dim
DatetimeArray(arr[[0]].squeeze())
def test_freq_validation(self):
# GH#24623 check that invalid instances cannot be created with the
# public constructor
arr = np.arange(5, dtype=np.int64) * 3600 * 10 ** 9
msg = (
"Inferred frequency H from passed values does not "
"conform to passed frequency W-SUN"
)
with pytest.raises(ValueError, match=msg):
DatetimeArray(arr, freq="W")
@pytest.mark.parametrize(
"meth",
[
DatetimeArray._from_sequence,
sequence_to_dt64ns,
pd.to_datetime,
pd.DatetimeIndex,
],
)
def test_mixing_naive_tzaware_raises(self, meth):
# GH#24569
arr = np.array([pd.Timestamp("2000"), pd.Timestamp("2000", tz="CET")])
msg = (
"Cannot mix tz-aware with tz-naive values|"
"Tz-aware datetime.datetime cannot be converted "
"to datetime64 unless utc=True"
)
for obj in [arr, arr[::-1]]:
# check that we raise regardless of whether naive is found
# before aware or vice-versa
with pytest.raises(ValueError, match=msg):
meth(obj)
def test_from_pandas_array(self):
arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10 ** 9
result = DatetimeArray._from_sequence(arr, freq="infer")
expected = pd.date_range("1970-01-01", periods=5, freq="H")._data
tm.assert_datetime_array_equal(result, expected)
def test_mismatched_timezone_raises(self):
arr = DatetimeArray(
np.array(["2000-01-01T06:00:00"], dtype="M8[ns]"),
dtype=DatetimeTZDtype(tz="US/Central"),
)
dtype = DatetimeTZDtype(tz="US/Eastern")
with pytest.raises(TypeError, match="Timezone of the array"):
DatetimeArray(arr, dtype=dtype)
def test_non_array_raises(self):
with pytest.raises(ValueError, match="list"):
DatetimeArray([1, 2, 3])
def test_other_type_raises(self):
with pytest.raises(
ValueError, match="The dtype of 'values' is incorrect.*bool"
):
DatetimeArray(np.array([1, 2, 3], dtype="bool"))
def test_incorrect_dtype_raises(self):
with pytest.raises(ValueError, match="Unexpected value for 'dtype'."):
DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="category")
def test_freq_infer_raises(self):
with pytest.raises(ValueError, match="Frequency inference"):
DatetimeArray(np.array([1, 2, 3], dtype="i8"), freq="infer")
def test_copy(self):
data = np.array([1, 2, 3], dtype="M8[ns]")
arr = DatetimeArray(data, copy=False)
assert arr._data is data
arr = DatetimeArray(data, copy=True)
assert arr._data is not data
class TestDatetimeArrayComparisons:
# TODO: merge this into tests/arithmetic/test_datetime64 once it is
# sufficiently robust
def test_cmp_dt64_arraylike_tznaive(self, all_compare_operators):
# arbitrary tz-naive DatetimeIndex
opname = all_compare_operators.strip("_")
op = getattr(operator, opname)
dti = pd.date_range("2016-01-1", freq="MS", periods=9, tz=None)
arr = DatetimeArray(dti)
assert arr.freq == dti.freq
assert arr.tz == dti.tz
right = dti
expected = np.ones(len(arr), dtype=bool)
if opname in ["ne", "gt", "lt"]:
# for these the comparisons should be all-False
expected = ~expected
result = op(arr, arr)
tm.assert_numpy_array_equal(result, expected)
for other in [right, np.array(right)]:
# TODO: add list and tuple, and object-dtype once those
# are fixed in the constructor
result = op(arr, other)
tm.assert_numpy_array_equal(result, expected)
result = op(other, arr)
tm.assert_numpy_array_equal(result, expected)
class TestDatetimeArray:
def test_astype_to_same(self):
arr = DatetimeArray._from_sequence(["2000"], tz="US/Central")
result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False)
assert result is arr
@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
def test_astype_int(self, dtype):
arr = DatetimeArray._from_sequence([pd.Timestamp("2000"), pd.Timestamp("2001")])
result = arr.astype(dtype)
if np.dtype(dtype).kind == "u":
expected_dtype = np.dtype("uint64")
else:
expected_dtype = np.dtype("int64")
expected = arr.astype(expected_dtype)
assert result.dtype == expected_dtype
tm.assert_numpy_array_equal(result, expected)
def test_tz_setter_raises(self):
arr = DatetimeArray._from_sequence(["2000"], tz="US/Central")
with pytest.raises(AttributeError, match="tz_localize"):
arr.tz = "UTC"
def test_setitem_different_tz_raises(self):
data = np.array([1, 2, 3], dtype="M8[ns]")
arr = DatetimeArray(data, copy=False, dtype=DatetimeTZDtype(tz="US/Central"))
with pytest.raises(ValueError, match="None"):
arr[0] = pd.Timestamp("2000")
with pytest.raises(ValueError, match="US/Central"):
arr[0] = pd.Timestamp("2000", tz="US/Eastern")
def test_setitem_clears_freq(self):
a = DatetimeArray(pd.date_range("2000", periods=2, freq="D", tz="US/Central"))
a[0] = pd.Timestamp("2000", tz="US/Central")
assert a.freq is None
def test_repeat_preserves_tz(self):
dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central")
arr = DatetimeArray(dti)
repeated = arr.repeat([1, 1])
# preserves tz and values, but not freq
expected = DatetimeArray(arr.asi8, freq=None, dtype=arr.dtype)
tm.assert_equal(repeated, expected)
def test_value_counts_preserves_tz(self):
dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central")
arr = DatetimeArray(dti).repeat([4, 3])
result = arr.value_counts()
# Note: not tm.assert_index_equal, since `freq`s do not match
assert result.index.equals(dti)
arr[-2] = pd.NaT
result = arr.value_counts()
expected = pd.Series([1, 4, 2], index=[pd.NaT, dti[0], dti[1]])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("method", ["pad", "backfill"])
def test_fillna_preserves_tz(self, method):
dti = pd.date_range("2000-01-01", periods=5, freq="D", tz="US/Central")
arr = DatetimeArray(dti, copy=True)
arr[2] = pd.NaT
fill_val = dti[1] if method == "pad" else dti[3]
expected = DatetimeArray._from_sequence(
[dti[0], dti[1], fill_val, dti[3], dti[4]], freq=None, tz="US/Central"
)
result = arr.fillna(method=method)
tm.assert_extension_array_equal(result, expected)
# assert that arr and dti were not modified in-place
assert arr[2] is pd.NaT
assert dti[2] == pd.Timestamp("2000-01-03", tz="US/Central")
def test_array_interface_tz(self):
tz = "US/Central"
data = DatetimeArray(pd.date_range("2017", periods=2, tz=tz))
result = np.asarray(data)
expected = np.array(
[
pd.Timestamp("2017-01-01T00:00:00", tz=tz),
pd.Timestamp("2017-01-02T00:00:00", tz=tz),
],
dtype=object,
)
tm.assert_numpy_array_equal(result, expected)
result = np.asarray(data, dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = np.asarray(data, dtype="M8[ns]")
expected = np.array(
["2017-01-01T06:00:00", "2017-01-02T06:00:00"], dtype="M8[ns]"
)
tm.assert_numpy_array_equal(result, expected)
def test_array_interface(self):
data = DatetimeArray(pd.date_range("2017", periods=2))
expected = np.array(
["2017-01-01T00:00:00", "2017-01-02T00:00:00"], dtype="datetime64[ns]"
)
result = np.asarray(data)
tm.assert_numpy_array_equal(result, expected)
result = np.asarray(data, dtype=object)
expected = np.array(
[pd.Timestamp("2017-01-01T00:00:00"), pd.Timestamp("2017-01-02T00:00:00")],
dtype=object,
)
tm.assert_numpy_array_equal(result, expected)
class TestSequenceToDT64NS:
def test_tz_dtype_mismatch_raises(self):
arr = DatetimeArray._from_sequence(["2000"], tz="US/Central")
with pytest.raises(TypeError, match="data is already tz-aware"):
sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="UTC"))
def test_tz_dtype_matches(self):
arr = DatetimeArray._from_sequence(["2000"], tz="US/Central")
result, _, _ = sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="US/Central"))
tm.assert_numpy_array_equal(arr._data, result)
class TestReductions:
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_min_max(self, tz):
arr = DatetimeArray._from_sequence(
[
"2000-01-03",
"2000-01-03",
"NaT",
"2000-01-02",
"2000-01-05",
"2000-01-04",
],
tz=tz,
)
result = arr.min()
expected = pd.Timestamp("2000-01-02", tz=tz)
assert result == expected
result = arr.max()
expected = pd.Timestamp("2000-01-05", tz=tz)
assert result == expected
result = arr.min(skipna=False)
assert result is pd.NaT
result = arr.max(skipna=False)
assert result is pd.NaT
@pytest.mark.parametrize("tz", [None, "US/Central"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_empty(self, skipna, tz):
arr = DatetimeArray._from_sequence([], tz=tz)
result = arr.min(skipna=skipna)
assert result is pd.NaT
result = arr.max(skipna=skipna)
assert result is pd.NaT
|
toobaz/pandas
|
pandas/tests/arrays/test_datetimes.py
|
Python
|
bsd-3-clause
| 10,859
|
# coding = utf-8
import urllib
import urllib.parse
import urllib.request
import threading
import queue
threads = 5
target_url = "http://testphp.vulnweb.com"
wordlist_file = "./tmp/all.txt"
resume = None
user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:19.0) Gecko/20100101 Firefox/19.0"
word_queue = None
def build_wordlist(wordlist_file):
global resume
fd = open(wordlist_file, "rb")
raw_words = fd.readlines()
fd.close()
found_resume = False
words = queue.Queue()
for word in raw_words:
#rstrip() 删除 string 字符串末尾的指定字符(默认为空格)
word = word.decode().rstrip()
if resume is None:
resume = word
if resume is not None:
if found_resume:
words.put(word)
else:
if word == resume:
found_resume = True
print("Resuming wordlist from %s" % resume)
else:
words.put(word)
return words
def dir_bruter(extensions=None):
while not word_queue.empty():
attempt = word_queue.get()
attempt_list = []
if "." not in attempt:
attempt_list.append("/%s/" % attempt)
else:
attempt_list.append("/%s" % attempt)
if extensions:
for extension in extensions:
attempt_list.append("/%s%s" % (attempt, extension))
for brute in attempt_list:
# quote() 将url中的特殊字符或汉字encode成指定编码, 比如如果url里面的空格
url = "%s%s" % (target_url, urllib.parse.quote(brute))
try:
headers = {}
headers["User-Agent"] = user_agent
r = urllib.request.Request(url, headers=headers)
response = urllib.request.urlopen(r)
if len(response.read()):
print("[%d]==> %s" % (response.code, url))
except urllib.request.HTTPError as e:
print("Failed [%d]==> %s" % (e.code, url))
resume = attempt
if e.code != 404:
print("!!! %d => %s" % (e.code, url))
pass
word_queue = build_wordlist(wordlist_file)
extensions = [".php",".bak",".orig",".inc"]
for i in range(threads):
t = threading.Thread(target=dir_bruter, args=(extensions,))
t.start()
|
xieyajie/BackHatPython
|
backhatpython05/content_bruter.py
|
Python
|
apache-2.0
| 2,398
|
"""
Tests for `kolibri.utils.options` module.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import tempfile
import mock
import pytest
from kolibri.utils import conf
from kolibri.utils import options
logger = logging.getLogger(__name__)
LOG_LOGGER = []
def log_logger(logger_instance, LEVEL, msg, args, **kwargs):
"""
Monkeypatching for logging.Logger._log to scoop up log messages if we wanna
test something specific was logged.
"""
LOG_LOGGER.append(
(LEVEL, msg)
)
# Call the original function
logger_instance.__log(LEVEL, msg, args, **kwargs)
def activate_log_logger(monkeypatch):
"""
Activates logging everything to ``LOG_LOGGER`` with the monkeypatch pattern
of py.test (test accepts a ``monkeypatch`` argument)
"""
monkeypatch.setattr(logging.Logger, '__log', logging.Logger._log, raising=False)
monkeypatch.setattr(logging.Logger, '_log', log_logger)
def test_option_reading_and_precedence_rules():
"""
Checks that options can be read from a dummy options.ini file, and overridden by env vars.
"""
_CONTENT_DIR = "/mycontentdir"
_HTTP_PORT_INI = 7007
_HTTP_PORT_ENV = 9009
_, tmp_ini_path = tempfile.mkstemp(prefix='options', suffix='.ini')
with open(tmp_ini_path, "w") as f:
f.write("\n".join([
"[Paths]",
"CONTENT_DIR = {dir}".format(dir=_CONTENT_DIR),
"[Deployment]",
"HTTP_PORT = {port}".format(port=_HTTP_PORT_INI),
]))
# when env vars are empty, values are drawn from ini file
with mock.patch.dict(os.environ, {'KOLIBRI_CONTENT_DIR': '', 'KOLIBRI_HTTP_PORT': '', 'KOLIBRI_LISTEN_PORT': ''}):
OPTIONS = options.read_options_file(conf.KOLIBRI_HOME, ini_filename=tmp_ini_path)
assert OPTIONS["Paths"]["CONTENT_DIR"] == _CONTENT_DIR
assert OPTIONS["Deployment"]["HTTP_PORT"] == _HTTP_PORT_INI
# when an env var is set, use those instead of ini file values
with mock.patch.dict(os.environ, {'KOLIBRI_HTTP_PORT': '', 'KOLIBRI_LISTEN_PORT': str(_HTTP_PORT_ENV)}):
OPTIONS = options.read_options_file(conf.KOLIBRI_HOME, ini_filename=tmp_ini_path)
assert OPTIONS["Deployment"]["HTTP_PORT"] == _HTTP_PORT_ENV
# when a higher precedence env var is set, it overrides the lower precedence env var
with mock.patch.dict(os.environ, {'KOLIBRI_HTTP_PORT': str(_HTTP_PORT_ENV), 'KOLIBRI_LISTEN_PORT': '88888'}):
OPTIONS = options.read_options_file(conf.KOLIBRI_HOME, ini_filename=tmp_ini_path)
assert OPTIONS["Deployment"]["HTTP_PORT"] == _HTTP_PORT_ENV
def test_improper_settings_display_errors_and_exit(monkeypatch):
"""
Checks that options can be read from a dummy options.ini file, and overridden by env vars.
"""
activate_log_logger(monkeypatch)
_, tmp_ini_path = tempfile.mkstemp(prefix='options', suffix='.ini')
# non-numeric arguments for an integer option in the ini file cause it to bail
with open(tmp_ini_path, "w") as f:
f.write("\n".join([
"[Deployment]",
"HTTP_PORT = abba",
]))
with mock.patch.dict(os.environ, {'KOLIBRI_HTTP_PORT': '', 'KOLIBRI_LISTEN_PORT': ''}):
with pytest.raises(SystemExit):
options.read_options_file(conf.KOLIBRI_HOME, ini_filename=tmp_ini_path)
assert 'value "abba" is of the wrong type' in LOG_LOGGER[-2][1]
# non-numeric arguments for an integer option in the env var cause it to bail, even when ini file is ok
with open(tmp_ini_path, "w") as f:
f.write("\n".join([
"[Deployment]",
"HTTP_PORT = 1278",
]))
with mock.patch.dict(os.environ, {'KOLIBRI_HTTP_PORT': 'baba', 'KOLIBRI_LISTEN_PORT': ''}):
with pytest.raises(SystemExit):
options.read_options_file(conf.KOLIBRI_HOME, ini_filename=tmp_ini_path)
assert 'value "baba" is of the wrong type' in LOG_LOGGER[-2][1]
# invalid choice for "option" type causes it to bail
with open(tmp_ini_path, "w") as f:
f.write("\n".join([
"[Database]",
"DATABASE_ENGINE = penguin",
]))
with mock.patch.dict(os.environ, {'KOLIBRI_DATABASE_ENGINE': ''}):
with pytest.raises(SystemExit):
options.read_options_file(conf.KOLIBRI_HOME, ini_filename=tmp_ini_path)
assert 'value "penguin" is unacceptable' in LOG_LOGGER[-2][1]
def test_option_writing():
"""
Checks that options can be written to a dummy options.ini file, validated, and then read back.
"""
_OLD_CONTENT_DIR = "/mycontentdir"
_NEW_CONTENT_DIR = "/goodnessme"
_HTTP_PORT_GOOD = 7007
_HTTP_PORT_BAD = "abba"
_, tmp_ini_path = tempfile.mkstemp(prefix='options', suffix='.ini')
with open(tmp_ini_path, "w") as f:
f.write("\n".join([
"[Paths]",
"CONTENT_DIR = {dir}".format(dir=_OLD_CONTENT_DIR),
"[Deployment]",
"HTTP_PORT = {port}".format(port=_HTTP_PORT_GOOD),
]))
with mock.patch.dict(os.environ, {'KOLIBRI_HTTP_PORT': '', 'KOLIBRI_LISTEN_PORT': ''}):
# check that values are set correctly to begin with
OPTIONS = options.read_options_file(conf.KOLIBRI_HOME, ini_filename=tmp_ini_path)
assert OPTIONS["Paths"]["CONTENT_DIR"] == _OLD_CONTENT_DIR
assert OPTIONS["Deployment"]["HTTP_PORT"] == _HTTP_PORT_GOOD
# change the content directory to something new
options.update_options_file("Paths", "CONTENT_DIR", _NEW_CONTENT_DIR, conf.KOLIBRI_HOME, ini_filename=tmp_ini_path)
# try changing the port to something bad, which should throw an error
with pytest.raises(ValueError):
options.update_options_file("Deployment", "HTTP_PORT", _HTTP_PORT_BAD, conf.KOLIBRI_HOME, ini_filename=tmp_ini_path)
# check that the properly validated option was set correctly, and the invalid one wasn't
OPTIONS = options.read_options_file(conf.KOLIBRI_HOME, ini_filename=tmp_ini_path)
assert OPTIONS["Paths"]["CONTENT_DIR"] == _NEW_CONTENT_DIR
assert OPTIONS["Deployment"]["HTTP_PORT"] == _HTTP_PORT_GOOD
def test_path_expansion():
"""
Checks that options under [Path] have "~" expanded, and are relativized to the KOLIBRI_HOME directory.
"""
KOLIBRI_HOME_TEMP = tempfile.mkdtemp()
_, tmp_ini_path = tempfile.mkstemp(prefix='options', suffix='.ini')
with mock.patch.dict(os.environ, {'KOLIBRI_CONTENT_DIR': "/absolute"}):
OPTIONS = options.read_options_file(KOLIBRI_HOME_TEMP, ini_filename=tmp_ini_path)
assert OPTIONS["Paths"]["CONTENT_DIR"] == "/absolute"
with mock.patch.dict(os.environ, {'KOLIBRI_CONTENT_DIR': "relative"}):
OPTIONS = options.read_options_file(KOLIBRI_HOME_TEMP, ini_filename=tmp_ini_path)
assert OPTIONS["Paths"]["CONTENT_DIR"] == os.path.join(KOLIBRI_HOME_TEMP, "relative")
with mock.patch.dict(os.environ, {'KOLIBRI_CONTENT_DIR': "~/homeiswherethecatis"}):
OPTIONS = options.read_options_file(KOLIBRI_HOME_TEMP, ini_filename=tmp_ini_path)
assert OPTIONS["Paths"]["CONTENT_DIR"] == os.path.expanduser("~/homeiswherethecatis")
|
DXCanas/kolibri
|
kolibri/utils/tests/test_options.py
|
Python
|
mit
| 7,282
|
import collections
import datetime
import json
from django.urls import reverse
from django.utils import timezone
from wagtail.api.v2.tests.test_pages import TestPageDetail, TestPageListing
from wagtail.core.models import Locale, Page
from wagtail.tests.demosite import models
from wagtail.tests.testapp.models import SimplePage, StreamPage
from .utils import AdminAPITestCase
def get_total_page_count():
# Need to take away 1 as the root page is invisible over the API by default
return Page.objects.count() - 1
class TestAdminPageListing(AdminAPITestCase, TestPageListing):
fixtures = ['demosite.json']
def get_response(self, **params):
return self.client.get(reverse('wagtailadmin_api:pages:listing'), params)
def get_page_id_list(self, content):
return [page['id'] for page in content['items']]
# BASIC TESTS
def test_basic(self):
response = self.get_response()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check that the total count is there and correct
self.assertIn('total_count', content['meta'])
self.assertIsInstance(content['meta']['total_count'], int)
self.assertEqual(content['meta']['total_count'], get_total_page_count())
# Check that the items section is there
self.assertIn('items', content)
self.assertIsInstance(content['items'], list)
# Check that each page has a meta section with type, detail_url, html_url, status and children attributes
for page in content['items']:
self.assertIn('meta', page)
self.assertIsInstance(page['meta'], dict)
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'html_url', 'status', 'children', 'slug', 'first_published_at', 'latest_revision_created_at'})
# Check the type info
self.assertIsInstance(content['__types'], dict)
self.assertEqual(set(content['__types'].keys()), {
'demosite.EventPage',
'demosite.StandardIndexPage',
'demosite.PersonPage',
'demosite.HomePage',
'demosite.StandardPage',
'demosite.EventIndexPage',
'demosite.ContactPage',
'demosite.BlogEntryPage',
'demosite.BlogIndexPage',
})
self.assertEqual(set(content['__types']['demosite.EventPage'].keys()), {'verbose_name', 'verbose_name_plural'})
self.assertEqual(content['__types']['demosite.EventPage']['verbose_name'], 'event page')
self.assertEqual(content['__types']['demosite.EventPage']['verbose_name_plural'], 'event pages')
# Not applicable to the admin API
test_unpublished_pages_dont_appear_in_list = None
test_private_pages_dont_appear_in_list = None
def test_unpublished_pages_appear_in_list(self):
total_count = get_total_page_count()
page = models.BlogEntryPage.objects.get(id=16)
page.unpublish()
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], total_count)
def test_private_pages_appear_in_list(self):
total_count = get_total_page_count()
page = models.BlogIndexPage.objects.get(id=5)
page.view_restrictions.create(password='test')
new_total_count = get_total_page_count()
self.assertEqual(total_count, total_count)
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], new_total_count)
# FIELDS
# Not applicable to the admin API
test_parent_field_gives_error = None
def test_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'admin_display_title', 'date', 'feed_image'})
def test_fields_default(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'admin_display_title'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'html_url', 'children', 'status', 'slug', 'first_published_at', 'latest_revision_created_at'})
def test_remove_meta_fields(self):
response = self.get_response(fields='-html_url')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'admin_display_title'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'slug', 'first_published_at', 'latest_revision_created_at', 'status', 'children'})
def test_remove_all_meta_fields(self):
response = self.get_response(fields='-type,-detail_url,-slug,-first_published_at,-html_url,-latest_revision_created_at,-status,-children')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'title', 'admin_display_title'})
def test_remove_fields(self):
response = self.get_response(fields='-title,-admin_display_title')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta'})
def test_remove_id_field(self):
response = self.get_response(fields='-id')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'meta', 'title', 'admin_display_title'})
def test_all_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='*')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'admin_display_title', 'date', 'related_links', 'tags', 'carousel_items', 'body', 'feed_image', 'feed_image_thumbnail'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'show_in_menus', 'first_published_at', 'seo_title', 'slug', 'parent', 'html_url', 'search_description', 'locale', 'children', 'descendants', 'ancestors', 'translations', 'status', 'latest_revision_created_at'})
def test_all_fields_then_remove_something(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='*,-title,-admin_display_title,-date,-seo_title,-status')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'related_links', 'tags', 'carousel_items', 'body', 'feed_image', 'feed_image_thumbnail'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'show_in_menus', 'first_published_at', 'slug', 'parent', 'html_url', 'search_description', 'locale', 'children', 'descendants', 'ancestors', 'translations', 'latest_revision_created_at'})
def test_all_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(*)')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height', 'thumbnail'})
def test_fields_foreign_key(self):
# Only the base the detail_url is different here from the public API
response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
feed_image = page['feed_image']
if feed_image is not None:
self.assertIsInstance(feed_image, dict)
self.assertEqual(set(feed_image.keys()), {'id', 'meta', 'title'})
self.assertIsInstance(feed_image['id'], int)
self.assertIsInstance(feed_image['meta'], dict)
self.assertEqual(set(feed_image['meta'].keys()), {'type', 'detail_url', 'download_url'})
self.assertEqual(feed_image['meta']['type'], 'wagtailimages.Image')
self.assertEqual(feed_image['meta']['detail_url'], 'http://localhost/admin/api/main/images/%d/' % feed_image['id'])
def test_fields_parent(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='parent')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
parent = page['meta']['parent']
# All blog entry pages have the same parent
self.assertDictEqual(parent, {
'id': 5,
'meta': {
'type': 'demosite.BlogIndexPage',
'detail_url': 'http://localhost/admin/api/main/pages/5/',
'html_url': 'http://localhost/blog-index/',
},
'title': "Blog index"
})
def test_fields_descendants(self):
response = self.get_response(fields='descendants')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
descendants = page['meta']['descendants']
self.assertEqual(set(descendants.keys()), {'count', 'listing_url'})
self.assertIsInstance(descendants['count'], int)
self.assertEqual(descendants['listing_url'], 'http://localhost/admin/api/main/pages/?descendant_of=%d' % page['id'])
def test_fields_child_relation(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,related_links')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'admin_display_title', 'related_links'})
self.assertIsInstance(page['related_links'], list)
def test_fields_ordering(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='date,title,feed_image,related_links')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Test field order
content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8'))
field_order = [
'id',
'meta',
'title',
'admin_display_title',
'date',
'feed_image',
'related_links',
]
self.assertEqual(list(content['items'][0].keys()), field_order)
def test_fields_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='tags')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'tags', 'title', 'admin_display_title'})
self.assertIsInstance(page['tags'], list)
def test_fields_translations(self):
# Add a translation of the homepage
french = Locale.objects.create(language_code='fr')
homepage = Page.objects.get(depth=2)
french_homepage = homepage.copy_for_translation(french)
response = self.get_response(fields='translations')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
translations = page['meta']['translations']
if page['id'] == homepage.id:
self.assertEqual(len(translations), 1)
self.assertEqual(translations[0]['id'], french_homepage.id)
self.assertEqual(translations[0]['meta']['locale'], 'fr')
elif page['id'] == french_homepage.id:
self.assertEqual(len(translations), 1)
self.assertEqual(translations[0]['id'], homepage.id)
self.assertEqual(translations[0]['meta']['locale'], 'en')
else:
self.assertEqual(translations, [])
# CHILD OF FILTER
# Not applicable to the admin API
test_child_of_page_thats_not_in_same_site_gives_error = None
def test_child_of_root(self):
# Only return the homepage as that's the only child of the "root" node
# in the tree. This is different to the public API which pretends the
# homepage of the current site is the root page.
response = self.get_response(child_of='root')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2])
def test_child_of_page_1(self):
# Public API doesn't allow this, as it's the root page
response = self.get_response(child_of=1)
json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 200)
# DESCENDANT OF FILTER
# Not applicable to the admin API
test_descendant_of_page_thats_not_in_same_site_gives_error = None
def test_descendant_of_root(self):
response = self.get_response(descendant_of='root')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2, 4, 8, 9, 5, 16, 18, 19, 6, 10, 15, 17, 21, 22, 23, 20, 13, 14, 12])
def test_descendant_of_root_doesnt_give_error(self):
# Public API doesn't allow this
response = self.get_response(descendant_of=1)
json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 200)
# FOR EXPLORER FILTER
def make_simple_page(self, parent, title):
return parent.add_child(instance=SimplePage(title=title, content='Simple page'))
def test_for_explorer_filter(self):
movies = self.make_simple_page(Page.objects.get(pk=1), 'Movies')
visible_movies = [
self.make_simple_page(movies, 'The Way of the Dragon'),
self.make_simple_page(movies, 'Enter the Dragon'),
self.make_simple_page(movies, 'Dragons Forever'),
]
hidden_movies = [
self.make_simple_page(movies, 'The Hidden Fortress'),
self.make_simple_page(movies, 'Crouching Tiger, Hidden Dragon'),
self.make_simple_page(movies, 'Crouching Tiger, Hidden Dragon: Sword of Destiny'),
]
response = self.get_response(child_of=movies.pk, for_explorer=1)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [page.pk for page in visible_movies])
response = self.get_response(child_of=movies.pk)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [page.pk for page in visible_movies + hidden_movies])
def test_for_explorer_no_child_of(self):
response = self.get_response(for_explorer=1)
self.assertEqual(response.status_code, 400)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content, {
'message': 'filtering by for_explorer without child_of is not supported',
})
# HAS CHILDREN FILTER
def test_has_children_filter(self):
response = self.get_response(has_children='true')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2, 4, 5, 6, 21, 20])
def test_has_children_filter_off(self):
response = self.get_response(has_children='false')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [8, 9, 16, 18, 19, 10, 15, 17, 22, 23, 13, 14, 12])
def test_has_children_filter_int(self):
response = self.get_response(has_children=1)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2, 4, 5, 6, 21, 20])
def test_has_children_filter_int_off(self):
response = self.get_response(has_children=0)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [8, 9, 16, 18, 19, 10, 15, 17, 22, 23, 13, 14, 12])
def test_has_children_filter_invalid_integer(self):
response = self.get_response(has_children=3)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "has_children must be 'true' or 'false'"})
def test_has_children_filter_invalid_value(self):
response = self.get_response(has_children='yes')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "has_children must be 'true' or 'false'"})
# TYPE FILTER
def test_type_filter_items_are_all_blog_entries(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(page['meta']['type'], 'demosite.BlogEntryPage')
# No specific fields available by default
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'admin_display_title'})
def test_type_filter_multiple(self):
response = self.get_response(type='demosite.BlogEntryPage,demosite.EventPage')
content = json.loads(response.content.decode('UTF-8'))
blog_page_seen = False
event_page_seen = False
for page in content['items']:
self.assertIn(page['meta']['type'], ['demosite.BlogEntryPage', 'demosite.EventPage'])
if page['meta']['type'] == 'demosite.BlogEntryPage':
blog_page_seen = True
elif page['meta']['type'] == 'demosite.EventPage':
event_page_seen = True
# Only generic fields available
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'admin_display_title'})
self.assertTrue(blog_page_seen, "No blog pages were found in the items")
self.assertTrue(event_page_seen, "No event pages were found in the items")
class TestAdminPageDetail(AdminAPITestCase, TestPageDetail):
fixtures = ['demosite.json']
def get_response(self, page_id, **params):
return self.client.get(reverse('wagtailadmin_api:pages:detail', args=(page_id, )), params)
def test_basic(self):
response = self.get_response(16)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check the id field
self.assertIn('id', content)
self.assertEqual(content['id'], 16)
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check the meta type
self.assertIn('type', content['meta'])
self.assertEqual(content['meta']['type'], 'demosite.BlogEntryPage')
# Check the meta detail_url
self.assertIn('detail_url', content['meta'])
self.assertEqual(content['meta']['detail_url'], 'http://localhost/admin/api/main/pages/16/')
# Check the meta html_url
self.assertIn('html_url', content['meta'])
self.assertEqual(content['meta']['html_url'], 'http://localhost/blog-index/blog-post/')
# Check the meta status
self.assertIn('status', content['meta'])
self.assertEqual(content['meta']['status'], {
'status': 'live',
'live': True,
'has_unpublished_changes': False
})
# Check the meta children
self.assertIn('children', content['meta'])
self.assertEqual(content['meta']['children'], {
'count': 0,
'listing_url': 'http://localhost/admin/api/main/pages/?child_of=16'
})
# Check the parent field
self.assertIn('parent', content['meta'])
self.assertIsInstance(content['meta']['parent'], dict)
self.assertEqual(set(content['meta']['parent'].keys()), {'id', 'meta', 'title'})
self.assertEqual(content['meta']['parent']['id'], 5)
self.assertIsInstance(content['meta']['parent']['meta'], dict)
self.assertEqual(set(content['meta']['parent']['meta'].keys()), {'type', 'detail_url', 'html_url'})
self.assertEqual(content['meta']['parent']['meta']['type'], 'demosite.BlogIndexPage')
self.assertEqual(content['meta']['parent']['meta']['detail_url'], 'http://localhost/admin/api/main/pages/5/')
self.assertEqual(content['meta']['parent']['meta']['html_url'], 'http://localhost/blog-index/')
# Check that the custom fields are included
self.assertIn('date', content)
self.assertIn('body', content)
self.assertIn('tags', content)
self.assertIn('feed_image', content)
self.assertIn('related_links', content)
self.assertIn('carousel_items', content)
# Check that the date was serialised properly
self.assertEqual(content['date'], '2013-12-02')
# Check that the tags were serialised properly
self.assertEqual(content['tags'], ['bird', 'wagtail'])
# Check that the feed image was serialised properly
self.assertIsInstance(content['feed_image'], dict)
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title'})
self.assertEqual(content['feed_image']['id'], 7)
self.assertIsInstance(content['feed_image']['meta'], dict)
self.assertEqual(set(content['feed_image']['meta'].keys()), {'type', 'detail_url', 'download_url'})
self.assertEqual(content['feed_image']['meta']['type'], 'wagtailimages.Image')
self.assertEqual(content['feed_image']['meta']['detail_url'], 'http://localhost/admin/api/main/images/7/')
# Check that the child relations were serialised properly
self.assertEqual(content['related_links'], [])
for carousel_item in content['carousel_items']:
self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'embed_url', 'link', 'caption', 'image'})
self.assertEqual(set(carousel_item['meta'].keys()), {'type'})
# Check the type info
self.assertIsInstance(content['__types'], dict)
self.assertEqual(set(content['__types'].keys()), {
'wagtailcore.Page',
'demosite.HomePage',
'demosite.BlogIndexPage',
'demosite.BlogEntryPageCarouselItem',
'demosite.BlogEntryPage',
'wagtailimages.Image'
})
self.assertEqual(set(content['__types']['demosite.BlogIndexPage'].keys()), {'verbose_name', 'verbose_name_plural'})
self.assertEqual(content['__types']['demosite.BlogIndexPage']['verbose_name'], 'blog index page')
self.assertEqual(content['__types']['demosite.BlogIndexPage']['verbose_name_plural'], 'blog index pages')
# Overriden from public API tests
def test_meta_parent_id_doesnt_show_root_page(self):
# Root page is visible in the admin API
response = self.get_response(2)
content = json.loads(response.content.decode('UTF-8'))
self.assertIsNotNone(content['meta']['parent'])
def test_field_ordering(self):
# Need to override this as the admin API has a __types field
response = self.get_response(16)
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Test field order
content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8'))
field_order = [
'id',
'meta',
'title',
'admin_display_title',
'body',
'tags',
'date',
'feed_image',
'feed_image_thumbnail',
'carousel_items',
'related_links',
'__types',
]
self.assertEqual(list(content.keys()), field_order)
def test_meta_status_draft(self):
# Unpublish the page
Page.objects.get(id=16).unpublish()
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('status', content['meta'])
self.assertEqual(content['meta']['status'], {
'status': 'draft',
'live': False,
'has_unpublished_changes': True
})
def test_meta_status_live_draft(self):
# Save revision without republish
Page.objects.get(id=16).save_revision()
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('status', content['meta'])
self.assertEqual(content['meta']['status'], {
'status': 'live + draft',
'live': True,
'has_unpublished_changes': True
})
def test_meta_status_scheduled(self):
# Unpublish and save revision with go live date in the future
Page.objects.get(id=16).unpublish()
tomorrow = timezone.now() + datetime.timedelta(days=1)
Page.objects.get(id=16).save_revision(approved_go_live_at=tomorrow)
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('status', content['meta'])
self.assertEqual(content['meta']['status'], {
'status': 'scheduled',
'live': False,
'has_unpublished_changes': True
})
def test_meta_status_expired(self):
# Unpublish and set expired flag
Page.objects.get(id=16).unpublish()
Page.objects.filter(id=16).update(expired=True)
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('status', content['meta'])
self.assertEqual(content['meta']['status'], {
'status': 'expired',
'live': False,
'has_unpublished_changes': True
})
def test_meta_children_for_parent(self):
# Homepage should have children
response = self.get_response(2)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('children', content['meta'])
self.assertEqual(content['meta']['children'], {
'count': 5,
'listing_url': 'http://localhost/admin/api/main/pages/?child_of=2'
})
def test_meta_descendants(self):
# Homepage should have children
response = self.get_response(2)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('descendants', content['meta'])
self.assertEqual(content['meta']['descendants'], {
'count': 18,
'listing_url': 'http://localhost/admin/api/main/pages/?descendant_of=2'
})
def test_meta_ancestors(self):
# Homepage should have children
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('ancestors', content['meta'])
self.assertIsInstance(content['meta']['ancestors'], list)
self.assertEqual(len(content['meta']['ancestors']), 3)
self.assertEqual(content['meta']['ancestors'][0].keys(), {'id', 'meta', 'title', 'admin_display_title'})
self.assertEqual(content['meta']['ancestors'][0]['title'], 'Root')
self.assertEqual(content['meta']['ancestors'][1]['title'], 'Home page')
self.assertEqual(content['meta']['ancestors'][2]['title'], 'Blog index')
# FIELDS
def test_remove_all_meta_fields(self):
response = self.get_response(16, fields='-type,-detail_url,-slug,-first_published_at,-html_url,-descendants,-latest_revision_created_at,-children,-ancestors,-show_in_menus,-seo_title,-parent,-status,-search_description')
content = json.loads(response.content.decode('UTF-8'))
self.assertNotIn('meta', set(content.keys()))
self.assertIn('id', set(content.keys()))
def test_remove_all_fields(self):
response = self.get_response(16, fields='_,id,type')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content.keys()), {'id', 'meta', '__types'})
self.assertEqual(set(content['meta'].keys()), {'type'})
def test_all_nested_fields(self):
response = self.get_response(16, fields='feed_image(*)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height', 'thumbnail'})
def test_fields_foreign_key(self):
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
feed_image = content['feed_image']
self.assertIsInstance(feed_image, dict)
self.assertEqual(set(feed_image.keys()), {'id', 'meta', 'title'})
self.assertIsInstance(feed_image['id'], int)
self.assertIsInstance(feed_image['meta'], dict)
self.assertEqual(set(feed_image['meta'].keys()), {'type', 'detail_url', 'download_url'})
self.assertEqual(feed_image['meta']['type'], 'wagtailimages.Image')
self.assertEqual(feed_image['meta']['detail_url'], 'http://localhost/admin/api/main/images/%d/' % feed_image['id'])
class TestAdminPageDetailWithStreamField(AdminAPITestCase):
fixtures = ['test.json']
def setUp(self):
super().setUp()
self.homepage = Page.objects.get(url_path='/home/')
def make_stream_page(self, body):
stream_page = StreamPage(
title='stream page',
slug='stream-page',
body=body
)
return self.homepage.add_child(instance=stream_page)
def test_can_fetch_streamfield_content(self):
stream_page = self.make_stream_page('[{"type": "text", "value": "foo"}]')
response_url = reverse('wagtailadmin_api:pages:detail', args=(stream_page.id, ))
response = self.client.get(response_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-type'], 'application/json')
content = json.loads(response.content.decode('utf-8'))
self.assertIn('id', content)
self.assertEqual(content['id'], stream_page.id)
self.assertIn('body', content)
self.assertEqual(len(content['body']), 1)
self.assertEqual(content['body'][0]['type'], 'text')
self.assertEqual(content['body'][0]['value'], 'foo')
self.assertTrue(content['body'][0]['id'])
def test_image_block(self):
stream_page = self.make_stream_page('[{"type": "image", "value": 1}]')
response_url = reverse('wagtailadmin_api:pages:detail', args=(stream_page.id, ))
response = self.client.get(response_url)
content = json.loads(response.content.decode('utf-8'))
# ForeignKeys in a StreamField shouldn't be translated into dictionary representation
self.assertEqual(content['body'][0]['type'], 'image')
self.assertEqual(content['body'][0]['value'], 1)
class TestCustomAdminDisplayTitle(AdminAPITestCase):
fixtures = ['test.json']
def setUp(self):
super().setUp()
self.event_page = Page.objects.get(url_path='/home/events/saint-patrick/')
def test_custom_admin_display_title_shown_on_detail_page(self):
api_url = reverse('wagtailadmin_api:pages:detail', args=(self.event_page.id, ))
response = self.client.get(api_url)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content['title'], "Saint Patrick")
self.assertEqual(content['admin_display_title'], "Saint Patrick (single event)")
def test_custom_admin_display_title_shown_on_listing(self):
api_url = reverse('wagtailadmin_api:pages:listing')
response = self.client.get(api_url)
content = json.loads(response.content.decode('utf-8'))
matching_items = [item for item in content['items'] if item['id'] == self.event_page.id]
self.assertEqual(1, len(matching_items))
self.assertEqual(matching_items[0]['title'], "Saint Patrick")
self.assertEqual(matching_items[0]['admin_display_title'], "Saint Patrick (single event)")
# Overwrite imported test cases do Django doesn't run them
TestPageDetail = None
TestPageListing = None
|
FlipperPA/wagtail
|
wagtail/admin/tests/api/test_pages.py
|
Python
|
bsd-3-clause
| 33,496
|
from sysobjects.production.process_control import controlProcess
from sysdata.production.process_control_data import controlProcessData
from syscore.objects import arg_not_supplied, missing_data
from sysdata.mongodb.mongo_generic import mongoDataWithSingleKey
from syslogdiag.log_to_screen import logtoscreen
PROCESS_CONTROL_COLLECTION = "process_control"
PROCESS_CONTROL_KEY = "process_name"
class mongoControlProcessData(controlProcessData):
"""
Read and write data class to get process control data
"""
def __init__(
self, mongo_db=arg_not_supplied, log=logtoscreen("mongoControlProcessData")
):
super().__init__(log=log)
self._mongo_data = mongoDataWithSingleKey(
PROCESS_CONTROL_COLLECTION, PROCESS_CONTROL_KEY, mongo_db=mongo_db
)
@property
def mongo_data(self):
return self._mongo_data
def __repr__(self):
return "Data connection for process control, mongodb %s" % str(self.mongo_data)
def get_list_of_process_names(self):
return self.mongo_data.get_list_of_keys()
def _get_control_for_process_name_without_default(self, process_name):
result_dict = self.mongo_data.get_result_dict_for_key_without_key_value(
process_name
)
if result_dict is missing_data:
return missing_data
control_object = controlProcess.from_dict(result_dict)
return control_object
def _modify_existing_control_for_process_name(
self, process_name, new_control_object
):
self.mongo_data.add_data(
process_name, new_control_object.as_dict(), allow_overwrite=True
)
def _add_control_for_process_name(self, process_name, new_control_object):
self.mongo_data.add_data(
process_name, new_control_object.as_dict(), allow_overwrite=False
)
|
robcarver17/pysystemtrade
|
sysdata/mongodb/mongo_process_control.py
|
Python
|
gpl-3.0
| 1,876
|
# this function will print a welcome message to the user
def welcome_message():
print("Hello! I'm going to ask you 10 maths questions.")
print("Let's see how many you can get right!")
# this function will ask a maths question and return the points awarded (1 or 0)
def ask_question(first_number, second_number):
print("What is", first_number, "x", second_number)
answer = input("Answer: ")
if int(answer) == first_number * second_number:
print("Correct!")
points_awarded = 1
else:
print("Wrong!")
points_awarded = 0
print("")
return points_awarded
# this function will look at the final scores and print the results
def print_final_scores(final_score):
print("That's all the questions done. So...what was your score...?")
print("You scored", score, "points out of a possible 10.")
if score < 5:
print("You need to practice your maths!")
elif score < 8:
print("That's pretty good!")
elif score < 10:
print("You did really well! Try and get 10 out of 10 next time!")
elif score == 10:
print("Wow! What a maths star you are!! I'm impressed!")
# display welcome message
welcome_message()
# set the score to zero
score = 0
# ask questions
score = score + ask_question(8,7)
score = score + ask_question(4,9)
score = score + ask_question(12,6)
score = score + ask_question(6,8)
score = score + ask_question(7,7)
score = score + ask_question(11,6)
score = score + ask_question(11,2)
score = score + ask_question(7,9)
score = score + ask_question(6,6)
score = score + ask_question(4,8)
# print the final scores
print_final_scores(score)
|
martinpeck/broken-python
|
mathsquiz/mathsquiz-step2.py
|
Python
|
mit
| 1,660
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import hr_timesheet_current
|
vileopratama/vitech
|
src/addons/hr_timesheet_sheet/wizard/__init__.py
|
Python
|
mit
| 128
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 6 10:25:22 2019
@author: ringhausen
"""
import volmdlr as vm
import volmdlr.primitives3d as p3d
import volmdlr.primitives2d as p2d
import math
#%%
p1=vm.Point2D((0, 0))
p2=vm.Point2D((0, 2))
p3=vm.Point2D((2, 4))
p4=vm.Point2D((4, 4))
p5=vm.Point2D((4, 3))
p6=vm.Point2D((3, 2))
p7=vm.Point2D((3, 0))
l1=p2d.OpenedRoundedLineSegments2D([p7, p1, p2], {})
l2=vm.Arc2D(p2, vm.Point2D((math.sqrt(2)/2, 3+math.sqrt(2)/2)), p3)
l3=p2d.OpenedRoundedLineSegments2D([p3, p4, p5, p6], {}, adapt_radius=True)
l4=vm.Arc2D(p6, vm.Point2D((4, 1)), p7)
#l4=vm.Arc2D(p7, vm.Point2D((4, 1)), p6)
c1=vm.Contour2D([l1, l2, l3, l4])
p8=vm.Point2D((1,1))
p9=vm.Point2D((2,1))
p10=vm.Point2D((2,2))
p11=vm.Point2D((1,2))
#inner=vm.Circle2D(vm.Point2D((2,2)), 0.5)
inner=p2d.ClosedRoundedLineSegments2D([p8, p9, p10, p11], {})
c2=vm.Contour2D([inner])
#ax = l1.MPLPlot()
#l2.MPLPlot(ax=ax)
#l3.MPLPlot(ax=ax)
#l4.MPLPlot(ax=ax)
profile = p3d.ExtrudedProfile(vm.o3D, vm.x3D, vm.y3D, c1, [c2], vm.Vector3D((0,0,1)))
profile.MPLPlot()
model = vm.VolumeModel([profile])
model.BabylonShow()
#%%
p1=vm.Point2D((0, 0))
p2=vm.Point2D((2, 0))
p3=vm.Point2D((2, 2))
p4=vm.Point2D((0, 2))
p5=vm.Point2D((0.5,0.5))
p6=vm.Point2D((1.5,0.5))
p7=vm.Point2D((1.5,1.5))
p8=vm.Point2D((0.5,1.5))
l1 = p2d.ClosedRoundedLineSegments2D([p1, p2, p3, p4], {})
c1 = vm.Contour2D(l1.primitives)
l2 = p2d.ClosedRoundedLineSegments2D([p5, p6, p7, p8], {})
c2 = vm.Contour2D(l2.primitives)
profile = p3d.ExtrudedProfile(vm.o3D, vm.x3D, vm.y3D, c1, [c2], vm.Vector3D((0,0,1)))
model = vm.VolumeModel([profile])
model.BabylonShow()
|
masfaraud/volmdlr
|
scripts/babylon_extrusion.py
|
Python
|
gpl-3.0
| 1,666
|
"""
Author: Shameer Sathar
"""
from ARFFcsvReader import ARFFcsvReader
import numpy as np
"""
Script to test the ARFF predictions output file.
"""
test = ARFFcsvReader('data/results_data.arff')
prediction = np.asarray(test.get_prediction())
"""
Positive change from 0 -> 1 is identified by taking a diff and checking for +1
"""
diff = np.diff(prediction)
linear_at = np.array(np.where(diff == 1))
"""
Predictions are 1d array with size
number of channels * sample points
Here, we change the 1d data to 2d electrode time maps.
"""
pos = []
for val in linear_at.transpose():
pos.append([int(val/9001), int(val % 9001)])
print int(val/9001), int(val % 9001)
|
ssat335/GuiPlotting
|
TestARFFcsvReader.py
|
Python
|
mit
| 677
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import shutil
sys.path.append('..')
from functions import *
from config import *
from database import *
shutil.copyfile("gertrude.db", "../aiga.db")
database.init("../aiga.db")
def get_lines_splitted(filename):
result = []
with open(filename, "r") as f:
lines = f.readlines()
for line in lines:
result.append(line.split(chr(169)))
return result
def get_enfants():
enfants = {}
for fields in get_lines_splitted("aiga/enfant.txt"):
enfant = dict()
enfant['idx'] = int(fields[0])
enfant['nom'] = fields[1].decode('cp1252')
enfant['prenom'] = fields[2].decode('cp1252')
enfant['naissance'] = str2date(fields[3])
enfant['sexe'] = int(fields[4])
enfant['idx_parents'] = int(fields[5])
enfant['idx_medecin'] = int(fields[6])
enfant['entree_creche'] = str2date(fields[20])
enfants[enfant['idx']] = enfant
return enfants
def get_medecins():
medecins = {}
for fields in get_lines_splitted("aiga/medecin.txt"):
medecin = dict()
medecin['idx'] = int(fields[0])
medecin['nom'] = fields[1].decode('cp1252')
medecins[medecin['idx']] = medecin
return medecins
def get_inscriptions():
inscriptions = {}
for fields in get_lines_splitted("aiga/inscript.txt"):
inscription = dict()
inscription['idx'] = int(fields[0])
inscription['idx_enfant'] = int(fields[1])
inscription['debut'] = str2date(fields[2])
inscription['fin'] = str2date(fields[3])
inscription['heures_semaine'] = float(fields[67]) # int(fields[36])
inscription['tarif_horaire'] = float(fields[37])
inscriptions[inscription['idx']] = inscription
return inscriptions
def get_parents():
parents = {}
for fields in get_lines_splitted("aiga/parent.txt"):
# for i, field in enumerate(fields):
# if field:
# print i, ":", field,
# print
parent = dict()
parent['idx'] = int(fields[0])
parent['nom_papa'] = fields[1].decode('cp1252')
parent['prenom_papa'] = fields[2].decode('cp1252')
parent['adresse_papa'] = fields[3].decode('cp1252')
parent['code_postal_papa'] = int(fields[5]) if fields[5] else ''
parent['ville_papa'] = fields[6].decode('cp1252')
parent['telephone_domicile_papa'] = fields[7].replace(".", " ")
parent['telephone_portable_papa'] = fields[18].replace(".", " ")
parent['telephone_travail_papa'] = fields[23].replace(".", " ")
parent['email_papa'] = fields[68]
parent['allocataire'] = fields[9]
parent['revenu_papa'] = float(fields[11]) * 12
parent['nom_maman'] = fields[36].decode('cp1252')
parent['prenom_maman'] = fields[37].decode('cp1252')
parent['adresse_maman'] = fields[38]
parent['code_postal_maman'] = int(fields[39]) if fields[39] else ''
parent['ville_maman'] = fields[40]
parent['telephone_domicile_maman'] = fields[41].replace(".", " ")
parent['telephone_portable_maman'] = fields[67].replace(".", " ")
parent['email_maman'] = fields[69]
parent['revenu_maman'] = float(fields[47]) * 12
parents[parent['idx']] = parent
# print parent
return parents
def get_salaries():
salaries = []
for fields in get_lines_splitted("aiga/employe.txt"):
entry = dict()
entry['nom'] = fields[0].decode('cp1252')
entry['prenom'] = fields[2].decode('cp1252')
entry['adresse'] = fields[3].decode('cp1252')
try:
entry['code_postal'] = int(fields[5])
except:
entry['code_postal'] = ""
entry['ville'] = fields[6].decode('cp1252')
entry['telephone_domicile'] = fields[7].decode('cp1252').replace(".", " ")
salaries.append(entry)
return salaries
def get_semaines_conges():
semaines_conges = {}
for fields in get_lines_splitted("aiga/insczp.txt"):
entry = dict()
entry['idx_inscription'] = int(fields[0])
entry['heures'] = float(fields[1]) if len(fields) > 1 else 0.0
entry['semaines_conges'] = 52 - float(fields[2]) if len(fields) > 2 else 0
entry['mois_factures'] = float(fields[3]) if len(fields) > 3 else 12.0
semaines_conges[entry['idx_inscription']] = entry
return semaines_conges
def get_factures():
factures = {}
for fields in get_lines_splitted("aiga/facture.txt"):
entry = dict()
entry['idx'] = int(fields[0], 16)
entry['date'] = GetMonthStart(str2date(fields[1]))
entry['total'] = float(fields[4])
entry['idx_inscription'] = int(fields[10])
factures[entry['idx']] = entry
return factures
def insert_planning(obj, heures):
print("insert_planning(%d)" % heures)
jour = 0
while heures > 0:
count = min(heures, 10)
obj.reference[jour].InsertActivity(8 * 12, 8 * 12 + count * 12, 0)
heures -= count
jour += 1
def main():
print("Start import ...")
enfants = get_enfants()
parents = get_parents()
inscriptions = get_inscriptions()
semaines_conges = get_semaines_conges()
factures = get_factures()
medecins = get_medecins()
print("medecins", medecins)
salaries = get_salaries()
for salarie in salaries:
obj = Salarie()
obj.nom = salarie['nom']
obj.prenom = salarie['prenom']
obj.telephone_domicile = salarie['telephone_domicile']
for enfant in enfants.values():
parent = parents[enfant['idx_parents']]
obj = Inscrit()
obj.nom = enfant['nom']
obj.prenom = enfant['prenom']
obj.naissance = enfant['naissance']
obj.sexe = enfant['sexe']
obj.famille.adresse = parent['adresse_papa']
obj.famille.ville = parent['ville_papa']
obj.famille.code_postal = parent['code_postal_papa']
obj.famille.medecin_traitant = medecins[enfant['idx_medecin']]['nom']
obj.famille.numero_allocataire_caf = parent['allocataire']
# les parents
obj.famille.parents[0].nom = parent['nom_papa']
obj.famille.parents[0].prenom = parent['prenom_papa']
obj.famille.parents[0].telephone_domicile = parent['telephone_domicile_papa']
obj.famille.parents[0].telephone_portable = parent['telephone_portable_papa']
obj.famille.parents[0].telephone_travail = parent['telephone_travail_papa']
obj.famille.parents[0].email = parent['email_papa']
obj.famille.parents[0].adresse = parent['adresse_papa']
obj.famille.parents[0].ville = parent['ville_papa']
obj.famille.parents[0].code_postal = parent['code_postal_papa']
obj.famille.parents[0].revenus[0].debut = datetime.date(2010, 1, 1)
obj.famille.parents[0].revenus[0].revenu = parent['revenu_papa']
obj.famille.parents[1].nom = parent['nom_maman']
obj.famille.parents[1].prenom = parent['prenom_maman']
obj.famille.parents[1].telephone_domicile = parent['telephone_domicile_maman']
obj.famille.parents[1].telephone_portable = parent['telephone_portable_maman']
obj.famille.parents[1].email = parent['email_maman']
obj.famille.parents[1].adresse = parent['adresse_papa']
obj.famille.parents[1].ville = parent['ville_papa']
obj.famille.parents[1].code_postal = parent['code_postal_papa']
obj.famille.parents[1].revenus[0].debut = datetime.date(2010, 1, 1)
obj.famille.parents[1].revenus[0].revenu = parent['revenu_maman']
# les inscriptions
obj.inscriptions[0].delete()
del obj.inscriptions[0]
for inscription in inscriptions.values():
if inscription['idx_enfant'] == enfant['idx']:
obj_inscription = Inscription(obj)
obj_inscription.mode = MODE_TEMPS_PARTIEL
obj_inscription.debut = inscription['debut']
obj_inscription.fin = inscription['fin']
# horaires
conges = semaines_conges[inscription['idx']]
obj_inscription.semaines_conges = conges['semaines_conges']
insert_planning(obj_inscription, conges['heures'])
database.commit()
if __name__ == "__main__":
main()
|
studio1247/gertrude
|
tools/aigaimport.py
|
Python
|
gpl-3.0
| 8,430
|
"""
Fix the Sigmoid class so that it computes the sigmoid function
on the forward pass!
Scroll down to get started.
"""
import numpy as np
class Node(object):
def __init__(self, inbound_nodes=[]):
self.inbound_nodes = inbound_nodes
self.value = None
self.outbound_nodes = []
for node in inbound_nodes:
node.outbound_nodes.append(self)
def forward():
raise NotImplementedError
class Input(Node):
def __init__(self):
# An Input node has no inbound nodes,
# so no need to pass anything to the Node instantiator
Node.__init__(self)
def forward(self):
# Do nothing because nothing is calculated.
pass
class Linear(Node):
def __init__(self, X, W, b):
# Notice the ordering of the input nodes passed to the
# Node constructor.
Node.__init__(self, [X, W, b])
def forward(self):
X = self.inbound_nodes[0].value
W = self.inbound_nodes[1].value
b = self.inbound_nodes[2].value
self.value = np.dot(X, W) + b
class Sigmoid(Node):
"""
You need to fix the `_sigmoid` and `forward` methods.
"""
def __init__(self, node):
Node.__init__(self, [node])
def _sigmoid(self, x):
"""
This method is separate from `forward` because it
will be used later with `backward` as well.
`x`: A numpy array-like object.
Return the result of the sigmoid function.
Your code here!
"""
return 1 / (1 + np.exp(-x))
def forward(self):
"""
Set the value of this node to the result of the
sigmoid function, `_sigmoid`.
Your code here!
"""
# This is a dummy value to prevent numpy errors
# if you test without changing this method.
input_val = self.inbound_nodes[0].value
self.value = self._sigmoid(input_val)
def topological_sort(feed_dict):
"""
Sort the nodes in topological order using Kahn's Algorithm.
`feed_dict`: A dictionary where the key is a `Input` Node and the value is the respective value feed to that Node.
Returns a list of sorted nodes.
"""
input_nodes = [n for n in feed_dict.keys()]
G = {}
nodes = [n for n in input_nodes]
while len(nodes) > 0:
n = nodes.pop(0)
if n not in G:
G[n] = {'in': set(), 'out': set()}
for m in n.outbound_nodes:
if m not in G:
G[m] = {'in': set(), 'out': set()}
G[n]['out'].add(m)
G[m]['in'].add(n)
nodes.append(m)
L = []
S = set(input_nodes)
while len(S) > 0:
n = S.pop()
if isinstance(n, Input):
n.value = feed_dict[n]
L.append(n)
for m in n.outbound_nodes:
G[n]['out'].remove(m)
G[m]['in'].remove(n)
# if no other incoming edges add to S
if len(G[m]['in']) == 0:
S.add(m)
return L
def forward_pass(output_node, sorted_nodes):
"""
Performs a forward pass through a list of sorted Nodes.
Arguments:
`output_node`: A Node in the graph, should be the output node (have no outgoing edges).
`sorted_nodes`: a topologically sorted list of nodes.
Returns the output node's value
"""
for n in sorted_nodes:
n.forward()
return output_node.value
|
nehal96/Deep-Learning-ND-Exercises
|
MiniFlow/4 - Sigmoid Function/miniflow.py
|
Python
|
mit
| 3,429
|
from django.conf.urls import url
from data_ingestion import views
urlpatterns = [
# ex: /data-ingestion-page/
url(r'^$', views.index, name='indexData'),
# ex: /data-ingestion-page/5/
url(r'^(?P<collection_id>[0-9]+)/$', views.detail, name='detail'),
# ex: /data-ingestion-page/5/edit
url(r'^(?P<collection_id>[0-9]+)/editData/$', views.editData, name='editData'),
url(r'^(?P<id>[0-9]+)/deleteData$', views.deleteData, name='deleteData'),
]
|
SISTEMAsw/TAMP
|
gui/data_ingestion/urls.py
|
Python
|
mit
| 468
|
import time
from struct import pack
from typing import Optional
from electrum_grs import ecc
from electrum_grs.i18n import _
from electrum_grs.util import UserCancelled
from electrum_grs.keystore import bip39_normalize_passphrase
from electrum_grs.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32
from electrum_grs.logging import Logger
from electrum_grs.plugin import runs_in_hwd_thread
from electrum_grs.plugins.hw_wallet.plugin import HardwareClientBase, HardwareHandlerBase
class GuiMixin(object):
# Requires: self.proto, self.device
handler: Optional[HardwareHandlerBase]
# ref: https://github.com/trezor/trezor-common/blob/44dfb07cfaafffada4b2ce0d15ba1d90d17cf35e/protob/types.proto#L89
messages = {
3: _("Confirm the transaction output on your {} device"),
4: _("Confirm internal entropy on your {} device to begin"),
5: _("Write down the seed word shown on your {}"),
6: _("Confirm on your {} that you want to wipe it clean"),
7: _("Confirm on your {} device the message to sign"),
8: _("Confirm the total amount spent and the transaction fee on your "
"{} device"),
10: _("Confirm wallet address on your {} device"),
14: _("Choose on your {} device where to enter your passphrase"),
'default': _("Check your {} device to continue"),
}
def callback_Failure(self, msg):
# BaseClient's unfortunate call() implementation forces us to
# raise exceptions on failure in order to unwind the stack.
# However, making the user acknowledge they cancelled
# gets old very quickly, so we suppress those. The NotInitialized
# one is misnamed and indicates a passphrase request was cancelled.
if msg.code in (self.types.FailureType.PinCancelled,
self.types.FailureType.ActionCancelled,
self.types.FailureType.NotInitialized):
raise UserCancelled()
raise RuntimeError(msg.message)
def callback_ButtonRequest(self, msg):
message = self.msg
if not message:
message = self.messages.get(msg.code, self.messages['default'])
self.handler.show_message(message.format(self.device), self.cancel)
return self.proto.ButtonAck()
def callback_PinMatrixRequest(self, msg):
show_strength = True
if msg.type == 2:
msg = _("Enter a new PIN for your {}:")
elif msg.type == 3:
msg = (_("Re-enter the new PIN for your {}.\n\n"
"NOTE: the positions of the numbers have changed!"))
else:
msg = _("Enter your current {} PIN:")
show_strength = False
pin = self.handler.get_pin(msg.format(self.device), show_strength=show_strength)
if len(pin) > 9:
self.handler.show_error(_('The PIN cannot be longer than 9 characters.'))
pin = '' # to cancel below
if not pin:
return self.proto.Cancel()
return self.proto.PinMatrixAck(pin=pin)
def callback_PassphraseRequest(self, req):
if req and hasattr(req, 'on_device') and req.on_device is True:
return self.proto.PassphraseAck()
if self.creating_wallet:
msg = _("Enter a passphrase to generate this wallet. Each time "
"you use this wallet your {} will prompt you for the "
"passphrase. If you forget the passphrase you cannot "
"access the groestlcoins in the wallet.").format(self.device)
else:
msg = _("Enter the passphrase to unlock this wallet:")
passphrase = self.handler.get_passphrase(msg, self.creating_wallet)
if passphrase is None:
return self.proto.Cancel()
passphrase = bip39_normalize_passphrase(passphrase)
ack = self.proto.PassphraseAck(passphrase=passphrase)
length = len(ack.passphrase)
if length > 50:
self.handler.show_error(_("Too long passphrase ({} > 50 chars).").format(length))
return self.proto.Cancel()
return ack
def callback_PassphraseStateRequest(self, msg):
return self.proto.PassphraseStateAck()
def callback_WordRequest(self, msg):
self.step += 1
msg = _("Step {}/24. Enter seed word as explained on "
"your {}:").format(self.step, self.device)
word = self.handler.get_word(msg)
# Unfortunately the device can't handle self.proto.Cancel()
return self.proto.WordAck(word=word)
class SafeTClientBase(HardwareClientBase, GuiMixin, Logger):
def __init__(self, handler, plugin, proto):
assert hasattr(self, 'tx_api') # ProtocolMixin already constructed?
HardwareClientBase.__init__(self, plugin=plugin)
self.proto = proto
self.device = plugin.device
self.handler = handler
self.tx_api = plugin
self.types = plugin.types
self.msg = None
self.creating_wallet = False
Logger.__init__(self)
self.used()
def __str__(self):
return "%s/%s" % (self.label(), self.features.device_id)
def label(self):
return self.features.label
def get_soft_device_id(self):
return self.features.device_id
def is_initialized(self):
return self.features.initialized
def is_pairable(self):
return not self.features.bootloader_mode
@runs_in_hwd_thread
def has_usable_connection_with_device(self):
try:
res = self.ping("electrum-GRS pinging device")
assert res == "electrum-GRS pinging device"
except BaseException:
return False
return True
def used(self):
self.last_operation = time.time()
def prevent_timeouts(self):
self.last_operation = float('inf')
@runs_in_hwd_thread
def timeout(self, cutoff):
'''Time out the client if the last operation was before cutoff.'''
if self.last_operation < cutoff:
self.logger.info("timed out")
self.clear_session()
@staticmethod
def expand_path(n):
return convert_bip32_path_to_list_of_uint32(n)
@runs_in_hwd_thread
def cancel(self):
'''Provided here as in keepkeylib but not safetlib.'''
self.transport.write(self.proto.Cancel())
def i4b(self, x):
return pack('>I', x)
@runs_in_hwd_thread
def get_xpub(self, bip32_path, xtype):
address_n = self.expand_path(bip32_path)
creating = False
node = self.get_public_node(address_n, creating).node
return BIP32Node(xtype=xtype,
eckey=ecc.ECPubkey(node.public_key),
chaincode=node.chain_code,
depth=node.depth,
fingerprint=self.i4b(node.fingerprint),
child_number=self.i4b(node.child_num)).to_xpub()
@runs_in_hwd_thread
def toggle_passphrase(self):
if self.features.passphrase_protection:
self.msg = _("Confirm on your {} device to disable passphrases")
else:
self.msg = _("Confirm on your {} device to enable passphrases")
enabled = not self.features.passphrase_protection
self.apply_settings(use_passphrase=enabled)
@runs_in_hwd_thread
def change_label(self, label):
self.msg = _("Confirm the new label on your {} device")
self.apply_settings(label=label)
@runs_in_hwd_thread
def change_homescreen(self, homescreen):
self.msg = _("Confirm on your {} device to change your home screen")
self.apply_settings(homescreen=homescreen)
@runs_in_hwd_thread
def set_pin(self, remove):
if remove:
self.msg = _("Confirm on your {} device to disable PIN protection")
elif self.features.pin_protection:
self.msg = _("Confirm on your {} device to change your PIN")
else:
self.msg = _("Confirm on your {} device to set a PIN")
self.change_pin(remove)
@runs_in_hwd_thread
def clear_session(self):
'''Clear the session to force pin (and passphrase if enabled)
re-entry. Does not leak exceptions.'''
self.logger.info(f"clear session: {self}")
self.prevent_timeouts()
try:
super(SafeTClientBase, self).clear_session()
except BaseException as e:
# If the device was removed it has the same effect...
self.logger.info(f"clear_session: ignoring error {e}")
@runs_in_hwd_thread
def get_public_node(self, address_n, creating):
self.creating_wallet = creating
return super(SafeTClientBase, self).get_public_node(address_n)
@runs_in_hwd_thread
def close(self):
'''Called when Our wallet was closed or the device removed.'''
self.logger.info("closing client")
self.clear_session()
# Release the device
self.transport.close()
def firmware_version(self):
f = self.features
return (f.major_version, f.minor_version, f.patch_version)
def atleast_version(self, major, minor=0, patch=0):
return self.firmware_version() >= (major, minor, patch)
@staticmethod
def wrapper(func):
'''Wrap methods to clear any message box they opened.'''
def wrapped(self, *args, **kwargs):
try:
self.prevent_timeouts()
return func(self, *args, **kwargs)
finally:
self.used()
self.handler.finished()
self.creating_wallet = False
self.msg = None
return wrapped
@staticmethod
def wrap_methods(cls):
for method in ['apply_settings', 'change_pin',
'get_address', 'get_public_node',
'load_device_by_mnemonic', 'load_device_by_xprv',
'recovery_device', 'reset_device', 'sign_message',
'sign_tx', 'wipe_device']:
setattr(cls, method, cls.wrapper(getattr(cls, method)))
|
GroestlCoin/electrum-grs
|
electrum_grs/plugins/safe_t/clientbase.py
|
Python
|
gpl-3.0
| 10,155
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
GrassAlgorithm.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import time
import uuid
import importlib
import re
from PyQt4.QtGui import QIcon
from qgis.core import QgsRasterLayer
from qgis.utils import iface
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import getParameterFromString, ParameterVector, ParameterMultipleInput, ParameterExtent, ParameterNumber, ParameterSelection, ParameterRaster, ParameterTable, ParameterBoolean, ParameterString
from processing.core.outputs import getOutputFromString, OutputRaster, OutputVector, OutputFile, OutputHTML
from GrassUtils import GrassUtils
from processing.tools import dataobjects, system
class GrassAlgorithm(GeoAlgorithm):
GRASS_OUTPUT_TYPE_PARAMETER = 'GRASS_OUTPUT_TYPE_PARAMETER'
GRASS_MIN_AREA_PARAMETER = 'GRASS_MIN_AREA_PARAMETER'
GRASS_SNAP_TOLERANCE_PARAMETER = 'GRASS_SNAP_TOLERANCE_PARAMETER'
GRASS_REGION_EXTENT_PARAMETER = 'GRASS_REGION_PARAMETER'
GRASS_REGION_CELLSIZE_PARAMETER = 'GRASS_REGION_CELLSIZE_PARAMETER'
GRASS_REGION_ALIGN_TO_RESOLUTION = '-a_r.region'
OUTPUT_TYPES = ['auto', 'point', 'line', 'area']
def __init__(self, descriptionfile):
GeoAlgorithm.__init__(self)
self.descriptionFile = descriptionfile
self.defineCharacteristicsFromFile()
self.numExportedLayers = 0
# GRASS console output, needed to do postprocessing in case GRASS
# dumps results to the console
self.consoleOutput = []
def getCopy(self):
newone = GrassAlgorithm(self.descriptionFile)
newone.provider = self.provider
return newone
def getIcon(self):
return QIcon(os.path.dirname(__file__) + '/../../images/grass.png')
def help(self):
return False, 'http://grass.osgeo.org/grass64/manuals/' + self.grassName + '.html'
def getParameterDescriptions(self):
descs = {}
_, helpfile = self.help()
try:
infile = open(helpfile)
lines = infile.readlines()
for i in range(len(lines)):
if lines[i].startswith('<DT><b>'):
for param in self.parameters:
searchLine = '<b>' + param.name + '</b>'
if searchLine in lines[i]:
i += 1
descs[param.name] = (lines[i])[4:-6]
break
infile.close()
except Exception:
pass
return descs
def defineCharacteristicsFromFile(self):
lines = open(self.descriptionFile)
line = lines.readline().strip('\n').strip()
self.grassName = line
line = lines.readline().strip('\n').strip()
self.name = line
line = lines.readline().strip('\n').strip()
self.group = line
hasRasterOutput = False
hasVectorInput = False
vectorOutputs = 0
line = lines.readline().strip('\n').strip()
while line != '':
try:
line = line.strip('\n').strip()
if line.startswith('Parameter'):
parameter = getParameterFromString(line)
self.addParameter(parameter)
if isinstance(parameter, ParameterVector):
hasVectorInput = True
if isinstance(parameter, ParameterMultipleInput) \
and parameter.datatype < 3:
hasVectorInput = True
elif line.startswith('*Parameter'):
param = getParameterFromString(line[1:])
param.isAdvanced = True
self.addParameter(param)
else:
output = getOutputFromString(line)
self.addOutput(output)
if isinstance(output, OutputRaster):
hasRasterOutput = True
elif isinstance(output, OutputVector):
vectorOutputs += 1
line = lines.readline().strip('\n').strip()
except Exception, e:
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
self.tr('Could not open GRASS algorithm: %s.\n%s' % (self.descriptionFile, line)))
raise e
lines.close()
self.addParameter(ParameterExtent(self.GRASS_REGION_EXTENT_PARAMETER,
self.tr('GRASS region extent')))
if hasRasterOutput:
self.addParameter(ParameterNumber(
self.GRASS_REGION_CELLSIZE_PARAMETER,
self.tr('GRASS region cellsize (leave 0 for default)'),
0, None, 0.0))
if hasVectorInput:
param = ParameterNumber(self.GRASS_SNAP_TOLERANCE_PARAMETER,
'v.in.ogr snap tolerance (-1 = no snap)',
-1, None, -1.0)
param.isAdvanced = True
self.addParameter(param)
param = ParameterNumber(self.GRASS_MIN_AREA_PARAMETER,
'v.in.ogr min area', 0, None, 0.0001)
param.isAdvanced = True
self.addParameter(param)
if vectorOutputs == 1:
param = ParameterSelection(self.GRASS_OUTPUT_TYPE_PARAMETER,
'v.out.ogr output type',
self.OUTPUT_TYPES)
param.isAdvanced = True
self.addParameter(param)
def getDefaultCellsize(self):
cellsize = 0
for param in self.parameters:
if param.value:
if isinstance(param, ParameterRaster):
if isinstance(param.value, QgsRasterLayer):
layer = param.value
else:
layer = dataobjects.getObjectFromUri(param.value)
cellsize = max(cellsize, (layer.extent().xMaximum()
- layer.extent().xMinimum())
/ layer.width())
elif isinstance(param, ParameterMultipleInput):
layers = param.value.split(';')
for layername in layers:
layer = dataobjects.getObjectFromUri(layername)
if isinstance(layer, QgsRasterLayer):
cellsize = max(cellsize, (
layer.extent().xMaximum()
- layer.extent().xMinimum())
/ layer.width())
if cellsize == 0:
cellsize = 1
return cellsize
def processAlgorithm(self, progress):
if system.isWindows():
path = GrassUtils.grassPath()
if path == '':
raise GeoAlgorithmExecutionException(
self.tr('GRASS folder is not configured.\nPlease '
'configure it before running GRASS algorithms.'))
commands = []
self.exportedLayers = {}
outputCommands = []
# If GRASS session has been created outside of this algorithm then
# get the list of layers loaded in GRASS otherwise start a new
# session
existingSession = GrassUtils.sessionRunning
if existingSession:
self.exportedLayers = GrassUtils.getSessionLayers()
else:
GrassUtils.startGrassSession()
# 1: Export layer to grass mapset
for param in self.parameters:
if isinstance(param, ParameterRaster):
if param.value is None:
continue
value = param.value
# Check if the layer hasn't already been exported in, for
# example, previous GRASS calls in this session
if value in self.exportedLayers.keys():
continue
else:
self.setSessionProjectionFromLayer(value, commands)
commands.append(self.exportRasterLayer(value))
if isinstance(param, ParameterVector):
if param.value is None:
continue
value = param.value
if value in self.exportedLayers.keys():
continue
else:
self.setSessionProjectionFromLayer(value, commands)
commands.append(self.exportVectorLayer(value))
if isinstance(param, ParameterTable):
pass
if isinstance(param, ParameterMultipleInput):
if param.value is None:
continue
layers = param.value.split(';')
if layers is None or len(layers) == 0:
continue
if param.datatype == ParameterMultipleInput.TYPE_RASTER:
for layer in layers:
if layer in self.exportedLayers.keys():
continue
else:
self.setSessionProjectionFromLayer(layer, commands)
commands.append(self.exportRasterLayer(layer))
elif param.datatype == ParameterMultipleInput.TYPE_VECTOR_ANY:
for layer in layers:
if layer in self.exportedLayers.keys():
continue
else:
self.setSessionProjectionFromLayer(layer, commands)
commands.append(self.exportVectorLayer(layer))
self.setSessionProjectionFromProject(commands)
region = \
str(self.getParameterValue(self.GRASS_REGION_EXTENT_PARAMETER))
regionCoords = region.split(',')
command = 'g.region'
command += ' n=' + str(regionCoords[3])
command += ' s=' + str(regionCoords[2])
command += ' e=' + str(regionCoords[1])
command += ' w=' + str(regionCoords[0])
cellsize = self.getParameterValue(self.GRASS_REGION_CELLSIZE_PARAMETER)
if cellsize:
command += ' res=' + str(cellsize)
else:
command += ' res=' + str(self.getDefaultCellsize())
alignToResolution = \
self.getParameterValue(self.GRASS_REGION_ALIGN_TO_RESOLUTION)
if alignToResolution:
command += ' -a'
commands.append(command)
# 2: Set parameters and outputs
command = self.grassName
for param in self.parameters:
if param.value is None or param.value == '':
continue
if param.name in [ self.GRASS_REGION_CELLSIZE_PARAMETER, self.GRASS_REGION_EXTENT_PARAMETER, self.GRASS_MIN_AREA_PARAMETER, self.GRASS_SNAP_TOLERANCE_PARAMETER, self.GRASS_OUTPUT_TYPE_PARAMETER, self.GRASS_REGION_ALIGN_TO_RESOLUTION ]:
continue
if isinstance(param, (ParameterRaster, ParameterVector)):
value = param.value
if value in self.exportedLayers.keys():
command += ' ' + param.name + '=' \
+ self.exportedLayers[value]
else:
command += ' ' + param.name + '=' + value
elif isinstance(param, ParameterMultipleInput):
s = param.value
for layer in self.exportedLayers.keys():
s = s.replace(layer, self.exportedLayers[layer])
s = s.replace(';', ',')
command += ' ' + param.name + '=' + s
elif isinstance(param, ParameterBoolean):
if param.value:
command += ' ' + param.name
elif isinstance(param, ParameterSelection):
idx = int(param.value)
command += ' ' + param.name + '=' + str(param.options[idx])
elif isinstance(param, ParameterString):
command += ' ' + param.name + '="' + str(param.value) + '"'
else:
command += ' ' + param.name + '="' + str(param.value) + '"'
uniqueSufix = str(uuid.uuid4()).replace('-', '')
for out in self.outputs:
if isinstance(out, OutputFile):
if out.name == 'outputtext':
# The 'outputtext' file is generated by piping output
# from GRASS, is not an actual grass command
command += ' > ' + out.value
else:
command += ' ' + out.name + '="' + out.value + '"'
elif not isinstance(out, OutputHTML):
# Html files are not generated by GRASS, only by us to
# decorate GRASS output, so we skip them. An output name
# to make sure it is unique if the session uses this
# algorithm several times.
uniqueOutputName = out.name + uniqueSufix
command += ' ' + out.name + '=' + uniqueOutputName
# Add output file to exported layers, to indicate that
# they are present in GRASS
self.exportedLayers[out.value] = uniqueOutputName
command += ' --overwrite'
commands.append(command)
# 3: Export resulting layers to a format that qgis can read
for out in self.outputs:
if isinstance(out, OutputRaster):
filename = out.value
# Raster layer output: adjust region to layer before
# exporting
commands.append('g.region rast=' + out.name + uniqueSufix)
outputCommands.append('g.region rast=' + out.name
+ uniqueSufix)
if self.grassName == 'r.composite':
command = 'r.out.tiff -t --verbose'
command += ' input='
command += out.name + uniqueSufix
command += ' output="' + filename + '"'
commands.append(command)
outputCommands.append(command)
else:
command = 'r.out.gdal -c createopt="TFW=YES,COMPRESS=LZW"'
command += ' input='
if self.grassName == 'r.horizon':
command += out.name + uniqueSufix + '_0'
else:
command += out.name + uniqueSufix
command += ' output="' + filename + '"'
commands.append(command)
outputCommands.append(command)
if isinstance(out, OutputVector):
filename = out.value
command = 'v.out.ogr -s -c -e -z input=' + out.name + uniqueSufix
command += ' dsn="' + os.path.dirname(out.value) + '"'
command += ' format=ESRI_Shapefile'
command += ' olayer=' + os.path.basename(out.value)[:-4]
typeidx = \
self.getParameterValue(self.GRASS_OUTPUT_TYPE_PARAMETER)
outtype = ('auto' if typeidx
is None else self.OUTPUT_TYPES[typeidx])
command += ' type=' + outtype
commands.append(command)
outputCommands.append(command)
# 4: Run GRASS
loglines = []
loglines.append(self.tr('GRASS execution commands'))
for line in commands:
progress.setCommand(line)
loglines.append(line)
if ProcessingConfig.getSetting(GrassUtils.GRASS_LOG_COMMANDS):
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
self.consoleOutput = GrassUtils.executeGrass(commands, progress,
outputCommands)
self.postProcessResults()
# If the session has been created outside of this algorithm, add
# the new GRASS layers to it otherwise finish the session
if existingSession:
GrassUtils.addSessionLayers(self.exportedLayers)
else:
GrassUtils.endGrassSession()
def postProcessResults(self):
name = self.commandLineName().replace('.', '_')[len('grass:'):]
try:
module = importlib.import_module('processing.algs.grass.ext.' + name)
except ImportError:
return
if hasattr(module, 'postProcessResults'):
func = getattr(module, 'postProcessResults')
func(self)
def exportVectorLayer(self, orgFilename):
# TODO: improve this. We are now exporting if it is not a shapefile,
# but the functionality of v.in.ogr could be used for this.
# We also export if there is a selection
if not os.path.exists(orgFilename) or not orgFilename.endswith('shp'):
layer = dataobjects.getObjectFromUri(orgFilename, False)
if layer:
filename = dataobjects.exportVectorLayer(layer)
else:
layer = dataobjects.getObjectFromUri(orgFilename, False)
if layer:
useSelection = \
ProcessingConfig.getSetting(ProcessingConfig.USE_SELECTED)
if useSelection and layer.selectedFeatureCount() != 0:
filename = dataobjects.exportVectorLayer(layer)
else:
filename = orgFilename
else:
filename = orgFilename
destFilename = self.getTempFilename()
self.exportedLayers[orgFilename] = destFilename
command = 'v.in.ogr'
min_area = self.getParameterValue(self.GRASS_MIN_AREA_PARAMETER)
command += ' min_area=' + str(min_area)
snap = self.getParameterValue(self.GRASS_SNAP_TOLERANCE_PARAMETER)
command += ' snap=' + str(snap)
command += ' dsn="' + os.path.dirname(filename) + '"'
command += ' layer=' + os.path.basename(filename)[:-4]
command += ' output=' + destFilename
command += ' --overwrite -o'
return command
def setSessionProjectionFromProject(self, commands):
if not GrassUtils.projectionSet:
proj4 = iface.mapCanvas().mapRenderer().destinationCrs().toProj4()
command = 'g.proj'
command += ' -c'
command += ' proj4="' + proj4 + '"'
commands.append(command)
GrassUtils.projectionSet = True
def setSessionProjectionFromLayer(self, layer, commands):
if not GrassUtils.projectionSet:
qGisLayer = dataobjects.getObjectFromUri(layer)
if qGisLayer:
proj4 = str(qGisLayer.crs().toProj4())
command = 'g.proj'
command += ' -c'
command += ' proj4="' + proj4 + '"'
commands.append(command)
GrassUtils.projectionSet = True
def exportRasterLayer(self, layer):
destFilename = self.getTempFilename()
self.exportedLayers[layer] = destFilename
if bool(re.match('netcdf', layer, re.I)) or bool(re.match('hdf', layer, re.I)):
command = 'r.in.gdal'
else:
command = 'r.external -r'
command += ' input="' + layer + '"'
command += ' band=1'
command += ' output=' + destFilename
command += ' --overwrite -o'
return command
def getTempFilename(self):
filename = 'tmp' + str(time.time()).replace('.', '') \
+ str(system.getNumExportedLayers())
return filename
def commandLineName(self):
return 'grass:' + self.name[:self.name.find(' ')]
def checkBeforeOpeningParametersDialog(self):
msg = GrassUtils.checkGrassIsInstalled()
if msg is not None:
html = self.tr(
'<p>This algorithm requires GRASS to be run. Unfortunately, '
'it seems that GRASS is not installed in your system, or it '
'is not correctly configured to be used from QGIS</p>'
'<p><a href="http://docs.qgis.org/testing/en/docs/user_manual/processing/3rdParty.html">Click here</a> '
'to know more about how to install and configure GRASS to be used with QGIS</p>')
return html
def checkParameterValuesBeforeExecuting(self):
name = self.commandLineName().replace('.', '_')[len('grass:'):]
try:
module = importlib.import_module('processing.algs.grass.ext.' + name)
except ImportError:
return
if hasattr(module, 'checkParameterValuesBeforeExecuting'):
func = getattr(module, 'checkParameterValuesBeforeExecuting')
return func(self)
def getPostProcessingErrorMessage(self, wrongLayers):
html = GeoAlgorithm.getPostProcessingErrorMessage(self, wrongLayers)
msg = GrassUtils.checkGrassIsInstalled(True)
html += self.tr(
'<p>This algorithm requires GRASS to be run. A test to check '
'if GRASS is correctly installed and configured in your system '
'has been performed, with the following result:</p><ul><i>')
if msg is None:
html += self.tr('GRASS seems to be correctly installed and '
'configured</i></li></ul>')
else:
html += msg + '</i></li></ul>'
html += self.tr(
'<p><a href="http://docs.qgis.org/testing/en/docs/user_manual/processing/3rdParty.html">Click here</a> '
'to know more about how to install and configure GRASS to be used with QGIS</p>')
return html
|
nextgis/NextGIS_QGIS_open
|
python/plugins/processing/algs/grass/GrassAlgorithm.py
|
Python
|
gpl-2.0
| 22,837
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-27 15:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tracker', '0003_event_ispublic'),
]
operations = [
migrations.CreateModel(
name='Pin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AddField(
model_name='event',
name='isPermanent',
field=models.BooleanField(default=False, verbose_name=b'keep this event forever'),
),
migrations.AlterField(
model_name='event',
name='isPublic',
field=models.BooleanField(default=False, verbose_name=b'share this event with other users'),
),
migrations.AddField(
model_name='pin',
name='event',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tracker.Event'),
),
migrations.AddField(
model_name='pin',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
JakeWimberley/Weathredds
|
tracker/migrations/0004_auto_20160627_1108.py
|
Python
|
gpl-3.0
| 1,456
|
def Setup(Settings,DefaultModel):
# set8backinexpansionism/expand_lr_minlen30_kfold.py
Settings["experiment_name"] = "expand_lr_minlen30_kfold"
Settings["graph_histories"] = ['together']
n=0
from keras.preprocessing.image import ImageDataGenerator
from DatasetHandler.custom_image import ImageDataGenerator as custom_ImageDataGenerator
image_generator = custom_ImageDataGenerator(
featurewise_center = False, # set input mean to 0 over the dataset
samplewise_center = False, # set each sample mean to 0
featurewise_std_normalization = False, # divide inputs by std of the dataset
samplewise_std_normalization = False, # divide each input by its std
zca_whitening = False, # apply ZCA whitening
rotation_range = 0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range = 0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range = 0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip = False, # randomly flip images
vertical_flip = True, # randomly flip images
)
# Set these values:
number_of_images_from_one = 2
source_dataset = "5556x_minlen30_640px"
target_dataset = "5556x_minlen30_640px_2x_expanded_lr"
pixels = 640
epochs = 500
use_dump_file = 'SegmentsData_marked_R100_4Tables.dump' # -> * new XYZ_expanded.dump
model_type = 'img_osm_mix'
# Feed the monkey and don't touch anything!
Settings["models"][n]["noncanon_dataset"] = 'expand_existing_dataset'
Settings["models"][n]["noncanon_dataset_imagegenerator"] = image_generator
Settings["models"][n]["noncanon_dataset_genfrom1"] = number_of_images_from_one
Settings["models"][n]["model_type"] = model_type
Settings["models"][n]["dataset_name"] = target_dataset
Settings["models"][n]["source_dataset"] = source_dataset
Settings["models"][n]["pixels"] = pixels
Settings["models"][n]["cnn_model"] = 'resnet50'
Settings["models"][n]["unique_id"] = 'expanded: ' + target_dataset
Settings["models"][n]["cooking_method"] = 'generators' # 'direct' or 'generators'
Settings["models"][n]["epochs"] = epochs
Settings["models"][n]["dump_file_override"] = use_dump_file
Settings["models"][n]["dump_file_expanded"] = use_dump_file[:-5] + '_expanded.dump'
Settings["models"][n]["k_fold_crossvalidation"] = True
Settings["models"][n]["crossvalidation_k"] = 10
Settings["graph_histories"] = []
return Settings
|
previtus/MGR-Project-Code
|
Settings/set7_dataset-aggressive-expansion/expand_lr_minlen30_kfold.py
|
Python
|
mit
| 2,610
|
"""
The Wub Machine
Python web interface
started August 5 2011 by Peter Sobot (petersobot.com)
"""
__author__ = "Peter Sobot"
__copyright__ = "Copyright (C) 2011 Peter Sobot"
__version__ = "2.2"
import json, time, locale, traceback, gc, logging, os, database, urllib
import tornado.ioloop, tornado.web, tornado.template, tornado.httpclient, tornado.escape, tornado.websocket
import tornadio, tornadio.server
import config
from datetime import datetime, timedelta
from hashlib import md5
# Wubmachine-specific libraries
from helpers.remixqueue import RemixQueue
from helpers.soundcloud import SoundCloud
from helpers.cleanup import Cleanup
from helpers.daemon import Daemon
from helpers.web import *
# Kinds of remixers.
from remixers.dubstep import Dubstep
from remixers.electrohouse import ElectroHouse
remixers = {
'Dubstep': Dubstep,
'ElectroHouse': ElectroHouse
}
# Check dependencies...
# Check required version numbers
assert tornado.version_info >= (2, 0, 0), "Tornado v2 or greater is required!"
assert tornadio.__version__ >= (0, 0, 4), "Tornadio v0.0.4 or greater is required!"
# Instead of using xheaders, which doesn't seem to work under Tornadio, we do this:
if config.nginx:
class RequestHandler(tornado.web.RequestHandler):
"""
Patched Tornado RequestHandler to take care of Nginx ip proxying
"""
def __init__(self, application, request, **kwargs):
if 'X-Real-Ip' in request.headers:
request.remote_ip = request.headers['X-Real-Ip']
tornado.web.RequestHandler.__init__(self, application, request, **kwargs)
else:
RequestHandler = tornado.web.RequestHandler
# Handlers
class MainHandler(RequestHandler):
def get(self):
js = ("window.wubconfig = %s;" % json.dumps(config.javascript)) + javascripts
kwargs = {
"isOpen": r.isAccepting(),
"track": sc.frontPageTrack(),
"isErroring": r.errorRateExceeded(),
'count': locale.format("%d", trackCount, grouping=True),
'cleanup_timeout': time_in_words(config.cleanup_timeout),
'javascript': js,
'connectform': connectform
}
self.write(templates.load('index.html').generate(**kwargs))
def head(self):
self.finish()
class ProgressSocket(tornadio.SocketConnection):
listeners = {}
@classmethod
def update(self, uid, data):
try:
self.listeners[uid].send(data)
except:
pass
def on_open(self, *args, **kwargs):
try:
self.uid = kwargs['extra']
if self.uid in r.finished:
try:
self.close()
except:
pass
else:
self.listeners[self.uid] = self
if self.uid in r.remixers:
r.remixers[self.uid].being_watched = True
log.info("Remixer %s is now being watched..." % self.uid)
r.cleanup()
if r.isAvailable():
try:
r.start(self.uid)
except:
self.send({ 'status': -1, 'text': "Sorry, something went wrong. Please try again later!", 'progress': 0, 'uid': self.uid, 'time': time.time() })
self.close()
log.info("Opened progress socket for %s" % self.uid)
except:
log.error("Failed to open progress socket for %s because: %s" % (self.uid, traceback.format_exc()) )
def on_close(self):
try:
log.info("Progress socket for %s received on_close event. Stopping..." % self.uid)
try:
r.stop(self.uid)
except:
pass
if self.uid in r.remixers:
r.remixers[self.uid].being_watched = False
if self.uid in self.listeners:
del self.listeners[self.uid]
log.info("Closed progress socket for %s" % self.uid)
except:
log.warning("Failed to close progress socket for %s due to:\n%s" % (self.uid, traceback.format_exc()))
def on_message(self, message):
pass
class MonitorSocket(tornadio.SocketConnection):
monitors = set()
@classmethod
def update(self, uid):
try:
if self.monitors:
data = MonitorHandler.track(uid)
for m in self.monitors.copy():
try:
m.send(data.decode('utf-8'))
m.send(MonitorHandler.overview())
except:
log.error("Failed to send data to monitor.")
except:
log.error("Major failure in MonitorSocket.update.")
def on_open(self, *args, **kwargs):
log.info("Opened monitor socket.")
self.monitors.add(self)
def on_close(self):
log.info("Closed monitor socket.")
self.monitors.remove(self)
def on_message(self, message):
pass
class MonitorHandler(RequestHandler):
keys = ['upload', 'download', 'remixTrue', 'remixFalse', 'shareTrue', 'shareFalse']
@tornado.web.asynchronous
def get(self, sub=None, uid=None):
if sub:
sections = {
'graph': self.graph,
'overview': self.overview,
'latest': self.latest,
'remixqueue': self.remixqueue,
'timespan' : self.timespan
}
if sub in sections:
self.write(sections[sub]())
self.finish()
else:
raise tornado.web.HTTPError(404)
else:
kwargs = {
'overview': self.overview(),
'latest': self.latest(),
'config': "window.wubconfig = %s;" % json.dumps(config.javascript)
}
self.write(templates.load('monitor.html').generate(**kwargs))
self.finish()
def clearqueue(self):
del self.watchqueue[:]
@classmethod
def histogram(self, interval=None):
db = database.Session()
try:
query = db.query(database.Event).add_columns('count(*)', database.Event.action, database.Event.success).group_by('action', 'success')
if interval:
limit = datetime.now() - timedelta(**{ interval: 1 })
d = query.filter(database.Event.start > limit).all()
else:
d = query.all()
n = {}
for k in self.keys:
n[k] = 0
for a in d:
if a.action == 'upload' or a.action == 'download':
n[a.action] = int(a.__dict__['count(*)'])
elif a.action == 'remix' or a.action == 'share':
n["%s%s" % (a.action, a.success)] = int(a.__dict__['count(*)'])
return n
except:
log.error("DB read exception:\n%s" % traceback.format_exc())
return {}
def remixqueue(self):
self.set_header("Content-Type", 'text/plain')
return str("Remixers: %s\nFinished: %s\nQueue: %s\nRunning: %s" % (r.remixers, r.finished, r.queue, r.running))
@classmethod
def overview(self):
kwargs = {
'ct': str(datetime.now()),
'inqueue': len(r.queue),
'processing': len(r.running),
'maximum': config.maximum_concurrent_remixes,
'maximumexceeded': len(r.remixers) > config.maximum_concurrent_remixes,
'hourly': config.hourly_remix_limit,
'hourlyexceeded': r.countInHour() >= config.hourly_remix_limit,
'errorInterval': 1,
'errorRate': r.errorRate(),
'errorRateExceeded': r.errorRateExceeded(),
'isOpen': r.isAccepting(),
'hour': MonitorHandler.histogram('hours'),
'day': MonitorHandler.histogram('days'),
'ever': MonitorHandler.histogram(),
}
return templates.load('overview.html').generate(**kwargs)
def current(self):
running = [v for k, v in r.remixers.iteritems() if k in r.running]
return templates.load('current.html').generate(c=running)
def shared(self):
db = database.Session()
try:
d = db.query(database.Event).filter_by(action = "sharing", success = True).group_by(database.Event.uid).order_by(database.Event.id.desc()).limit(6).all()
except:
log.error("DB read exception:\n%s" % traceback.format_exc())
return templates.load('shared.html').generate(tracks=d)
@classmethod
def track(self, track):
db = database.Session()
if not track:
raise tornado.web.HTTPError(400)
if isinstance(track, database.Track):
try:
track = db.merge(track)
except:
log.error("DB read exception:\n%s" % traceback.format_exc())
db.rollback()
else:
if isinstance(track, dict) and 'uid' in track:
track = track['uid']
elif not isinstance(track, str) or len(track) != 32:
return ''
try:
tracks = db.query(database.Track).filter(database.Track.uid == track).all()
except:
log.error("DB read exception:\n%s" % traceback.format_exc())
db.rollback()
if not tracks:
return ''
else:
track = tracks[0]
for stat in ['upload', 'remix', 'share', 'download']:
track.__setattr__(stat, None)
events = {}
for event in track.events:
events[event.action] = event
track.upload = events.get('upload')
track.remix = events.get('remix')
track.share = events.get('share')
track.download = events.get('download')
track.running = track.uid in r.running or (track.share and track.share.start and not track.share.end and track.share.success is None)
track.failed = (track.remix and track.remix.success == False) or (track.share and track.share.success == False)
if track.failed:
if track.remix.success is False:
track.failure = track.remix.detail
elif track.share.detail is not None:
track.failure = track.share.detail
else:
track.failure = ''
try:
track.progress = r.remixers[track.uid].last['progress']
track.text = r.remixers[track.uid].last['text']
except:
track.progress = None
track.text = None
kwargs = {
'track': track,
'exists': os.path.exists,
'time_ago_in_words': time_ago_in_words,
'seconds_to_time': seconds_to_time,
'convert_bytes': convert_bytes
}
return templates.load('track.html').generate(**kwargs)
def latest(self):
db = database.Session()
try:
tracks = db.query(database.Track).order_by(database.Track.id.desc()).limit(config.monitor_limit).all()
except:
log.error("DB read exception, rolling back:\n%s" % traceback.format_exc())
db.rollback()
return ''.join([self.track(track) for track in tracks])
def timespan(self):
start = float(self.get_argument('start'))
end = float(self.get_argument('end'))
if end - start < 0:
raise tornado.web.HTTPError(400)
elif end - start > config.monitor_time_limit:
start = end - config.monitor_time_limit
db = database.Session()
try:
tracks = db.query(database.Track).filter(database.Track.time < datetime.fromtimestamp(end)).filter(database.Track.time > datetime.fromtimestamp(start)).order_by(database.Track.id.desc()).all()
except:
log.error("DB read exception, rolling back:\n%s" % traceback.format_exc())
db.rollback()
return ''.join([self.track(track) for track in tracks])
def graph(self):
history = {}
db = database.Session()
for i in xrange(1, 24*2): # last 3 days
low = datetime.now() - timedelta(hours = i)
high = low + timedelta(hours = 1)
timestamp = 1000 * time.mktime(high.timetuple())
try:
dayr = db.query(database.Event).add_columns('count(*)', database.Event.action, database.Event.success).group_by('action', 'success').filter(database.Event.start.between(low, high)).all()
n = {}
for daya in dayr:
if daya.action == 'download':
n[daya.action] = [timestamp , int(daya.__dict__['count(*)'])]
elif daya.action == 'remix' or daya.action == 'share':
n["%s%s" % (daya.action, daya.success)] = [timestamp, int(daya.__dict__['count(*)'])]
for k in self.keys:
if not k in history:
history[k] = []
if k in n:
history[k].append(n[k])
else:
history[k].append([timestamp, int(0)])
except:
log.error("DB read exception, rolling back:\n%s" % traceback.format_exc())
db.rollback()
return history
class ShareHandler(RequestHandler):
@tornado.web.asynchronous
def get(self, uid):
self.uid = uid
try:
token = str(self.get_argument('token'))
timeout = config.soundcloud_timeout
self.event = database.Event(uid, "share", ip = self.request.remote_ip)
if not uid in r.finished:
raise tornado.web.HTTPError(404)
t = r.finished[uid]['tag']
description = config.soundcloud_description
if 'artist' in t and 'album' in t and t['artist'].strip() != '' and t['album'].strip() != '':
description = ("Original song by %s, from the album \"%s\".<br />" % (t['artist'].strip(), t['album'].strip())) + description
elif 'artist' in t and t['artist'].strip() != '':
description = ("Original song by %s.<br />" % t['artist'].strip()) + description
form = MultiPartForm()
form.add_field('oauth_token', token)
form.add_field('track[title]', t['new_title'].encode('utf-8'))
form.add_field('track[genre]', t['style'])
form.add_field('track[license]', "no-rights-reserved")
form.add_field('track[tag_list]', ' '.join(['"%s"' % tag for tag in config.soundcloud_tag_list]))
form.add_field('track[description]', description.encode('utf-8'))
form.add_field('track[track_type]', 'remix')
form.add_field('track[downloadable]', 'true')
form.add_field('track[sharing_note]', config.soundcloud_sharing_note)
form.add_file('track[asset_data]', '%s.mp3' % uid, open(t['remixed']))
if 'tempo' in t:
form.add_field('track[bpm]', t['tempo'])
if 'art' in t:
form.add_file('track[artwork_data]', '%s.png' % uid, open(t['art']))
if 'key' in t:
form.add_field('track[key_signature]', t['key'])
MonitorSocket.update(self.uid)
self.ht = tornado.httpclient.AsyncHTTPClient()
self.ht.fetch(
"https://api.soundcloud.com/tracks.json",
self._get,
method = 'POST',
headers = {"Content-Type": form.get_content_type()},
body = str(form),
request_timeout = timeout,
connect_timeout = timeout
)
except:
self.write({ 'error': traceback.format_exc().splitlines()[-1] })
self.event.success = False
self.event.end = datetime.now()
self.event.detail = traceback.format_exc()
MonitorSocket.update(self.uid)
finally:
db = database.Session()
try:
db.add(self.event)
db.commit()
except:
log.error("DB exception, rolling back:\n%s" % traceback.format_exc())
db.rollback()
def _get(self, response):
self.write(response.body)
self.finish()
r = json.loads(response.body)
try:
db = database.Session()
self.event = db.merge(self.event)
self.event.success = True
self.event.end = datetime.now()
self.event.detail = r['permalink_url'].encode('ascii', 'ignore')
db.commit()
except:
log.error("DB exception, rolling back:\n%s" % traceback.format_exc())
db.rollback()
MonitorSocket.update(self.uid)
sc.fetchTracks()
class DownloadHandler(RequestHandler):
def get(self, uid):
if not uid in r.finished or not os.path.isfile('static/songs/%s.mp3' % uid):
raise tornado.web.HTTPError(404)
else:
db = database.Session()
try:
uploader = db.query(database.Event.ip).filter_by(uid = uid, action = "upload").first()[0]
except:
log.error("DB exception, rolling back:\n%s" % traceback.format_exc())
db.rollback()
uploader = self.request.remote_ip
if uploader != self.request.remote_ip:
log.error("Download attempt on remix %s by IP %s, not uploader %s!" % (uid, self.request.remote_ip, uploader))
raise tornado.web.HTTPError(403)
filename = "%s.mp3" % (r.finished[uid]['tag']['new_title'] if 'new_title' in r.finished[uid]['tag'] else uid)
self.set_header('Content-disposition', 'attachment; filename="%s"' % filename)
self.set_header('Content-type', 'audio/mpeg')
self.set_header('Content-Length', os.stat('static/songs/%s.mp3' % uid)[6])
self.write(open('static/songs/%s.mp3' % uid).read())
self.finish()
try:
db.add(database.Event(uid, "download", success = True, ip = self.request.remote_ip))
db.commit()
except:
log.error("DB exception, rolling back:\n%s" % traceback.format_exc())
db.rollback()
MonitorSocket.update(uid)
class UploadHandler(RequestHandler):
def trackDone(self, final):
# [TODO]: Why is this here? Move this somewhere more appropriate.
global trackCount
trackCount += 1
if self.uid in ProgressSocket.listeners:
log.info("Closing client connection for track %s..." % self.uid)
ProgressSocket.listeners[self.uid].close()
log.info("Closed client connection for track %s." % self.uid)
def post(self):
self.uid = config.uid()
try:
remixer = remixers[self.get_argument('style')]
except:
log.error("Error when trying to handle upload: %s" % traceback.format_exc())
self.write({ "error" : "No remixer type specified!" })
self.track = database.Track(self.uid, style=self.get_argument('style'))
self.event = database.Event(self.uid, "upload", None, self.request.remote_ip, urllib.unquote_plus(self.get_argument('qqfile').encode('ascii', 'ignore')))
try:
extension = os.path.splitext(self.get_argument('qqfile'))[1]
except:
extension = '.mp3'
self.track.extension = extension
targetPath = os.path.join('uploads/', '%s%s' % (self.uid, extension))
if extension not in config.allowed_file_extensions:
self.write({ 'error': "Sorry, but %s only works with %s." % (config.app_name, list_in_words([e[1:] for e in config.allowed_file_extensions])) })
return
try:
f = open(targetPath, 'w')
data = self.request.body if not self.request.files else self.request.files['upload'][0]['body']
f.write(data)
f.close()
self.track.hash = md5(data).hexdigest()
self.track.size = len(data)
del data
if not self.request.files:
del self.request.body
else:
del self.request.files['upload'][0]['body']
r.add(self.uid, extension, remixer, ProgressSocket.update, self.trackDone)
self.event.success = True
response = r.waitingResponse(self.uid)
response['success'] = True
self.write(response)
except Exception as e:
log.error("Error when trying to handle upload: %s" % traceback.format_exc())
self.write({ "error" : "Could not save file." })
self.event.success = False
self.event.end = datetime.now()
db = database.Session()
try:
db.add(self.track)
db.add(self.event)
db.commit()
except:
log.error("DB exception, rolling back:\n%s" % traceback.format_exc())
db.rollback()
MonitorSocket.update(self.uid)
gc.collect()
application = tornado.web.Application([
(r"/(favicon.ico)", tornado.web.StaticFileHandler, {"path": "static/img/"}),
(r"/static/(.*)", tornado.web.StaticFileHandler, {"path": "static/"}),
(r"/monitor[/]?([^/]+)?[/]?(.*)", MonitorHandler), #Fix this
(r"/upload", UploadHandler),
(r"/share/(%s)" % config.uid_re, ShareHandler),
(r"/download/(%s)" % config.uid_re, DownloadHandler),
(r"/", MainHandler),
tornadio.get_router(
MonitorSocket,
resource = config.monitor_resource
).route(),
tornadio.get_router(
ProgressSocket,
resource = config.progress_resource,
extra_re = config.uid_re,
extra_sep = config.socket_extra_sep
).route()],
socket_io_port = config.socket_io_port,
enabled_protocols = ['websocket', 'xhr-multipart', 'xhr-polling', 'jsonp-polling'],
)
if __name__ == "__main__":
Daemon()
log = logging.getLogger()
log.name = config.log_name
handler = logging.FileHandler(config.log_file)
handler.setFormatter(logging.Formatter(config.log_format))
handler.setLevel(logging.DEBUG)
log.addHandler(handler)
try:
log.info("Starting %s..." % config.app_name)
try:
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
except:
locale.setlocale(locale.LC_ALL, 'en_US')
log.info("\tConnecting to MySQL...")
db = database.Session()
if not db:
log.critical("Can't connect to DB!")
exit(1)
log.info("\tGrabbing track count from DB...")
trackCount = db.query(database.Event).filter_by(action='remix', success = True).count()
log.info("\tClearing temp directories...")
cleanup = Cleanup(log, None)
cleanup.all()
log.info("\tStarting RemixQueue...")
r = RemixQueue(MonitorSocket)
cleanup.remixQueue = r
log.info("\tInstantiating SoundCloud object...")
sc = SoundCloud(log)
log.info("\tLoading templates...")
templates = tornado.template.Loader("templates/")
templates.autoescape = None
log.info("\tStarting cleanup timers...")
fileCleanupTimer = tornado.ioloop.PeriodicCallback(cleanup.active, 1000*config.cleanup_timeout)
fileCleanupTimer.start()
queueCleanupTimer = tornado.ioloop.PeriodicCallback(r.cleanup, 100*min(config.watch_timeout, config.remix_timeout, config.wait_timeout))
queueCleanupTimer.start()
log.info("\tCaching javascripts...")
javascripts = '\n'.join([
open('./static/js/jquery.fileupload.js').read(),
open('./static/js/front.js').read(),
open('./static/js/player.js').read(),
])
connectform = open('./static/js/connectform.js').read()
log.info("\tStarting Tornado...")
application.listen(8888)
log.info("...started!")
tornadio.server.SocketServer(application, xheaders=config.nginx)
except:
raise
finally:
log.critical("Error: %s" % traceback.format_exc())
log.critical("IOLoop instance stopped. About to shutdown...")
try:
cleanup.all()
except:
pass
log.critical("Shutting down!")
if os.path.exists('server.py.pid'):
os.remove('server.py.pid')
exit(0)
|
psobot/wub-machine
|
server.py
|
Python
|
mit
| 24,698
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`run_plotly`
==================
.. module:: run_plotly
:platform: Unix, Windows
:synopsis:
.. moduleauthor:: hbldh <henrik.blidh@nedomkull.com>
Created on 2015-08-17
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
# Assumes credentials file exists.
import plotly.plotly as py
import plotly.graph_objs as pygraph
from pyberryimu.client import BerryIMUClient
from pyberryimu.calibration.standard import StandardCalibration
def plot_data():
c = py.get_credentials()
trace1 = pygraph.Scatter(
x=[], y=[],
mode='lines',
line=pygraph.Line(color='rgba(31,119,180,0.15)'),
stream=dict(token=c.get('stream_ids')[0], maxpoints=100))
trace2 = pygraph.Scatter(
x=[], y=[],
mode='lines',
line=pygraph.Line(color='rgba(180,119,31,0.15)'),
stream=dict(token=c.get('stream_ids')[1], maxpoints=100))
trace3 = pygraph.Scatter(
x=[], y=[],
mode='lines',
line=pygraph.Line(color='rgba(119,180,31,0.15)'),
stream=dict(token=c.get('stream_ids')[2], maxpoints=100))
data = pygraph.Data([trace1, trace2, trace3])
layout = pygraph.Layout(
title='Streaming PyBerryIMU Data'
)
fig = pygraph.Figure(data=data, layout=layout)
print(py.plot(fig, filename='PyBerryIMU'))
s_x = py.Stream(c.get('stream_ids')[0])
s_y = py.Stream(c.get('stream_ids')[1])
s_z = py.Stream(c.get('stream_ids')[2])
s_x.open()
s_y.open()
s_z.open()
with BerryIMUClient() as c:
c.calibration_object = StandardCalibration.load()
t_start = c.timestamp
while (c.timestamp - t_start) < 60:
t = c.timestamp - t_start
acc = c.read_accelerometer()
s_x.write(dict(x=t, y=acc[0]))
s_y.write(dict(x=t, y=acc[1]))
s_z.write(dict(x=t, y=acc[2]))
s_x.close()
s_y.close()
s_z.close()
if __name__ == '__main__':
plot_data()
|
hbldh/wlmetrics
|
wlmetrics/plot/run_plotly.py
|
Python
|
mit
| 2,096
|
'''
K.I.S.T.I.E (Keep, It, Simple, Take, It, Easy)
Created on 1 Jan 2013
@author: Leonardo Bruni, leo.b2003@gmail.com
Kistie Core Module Library
This Kistie implementation i's part of project 'Kistie_Autorig' by Leonardo Bruni, leo.b2003@gmail.com
'''
|
Leopardob/Kistie
|
kcode/kcore/__init__.py
|
Python
|
bsd-3-clause
| 251
|
from django.contrib.auth.models import User
from fixture_generator import fixture_generator
from fixture_generator.tests.models import Author, Entry
@fixture_generator(Author)
def test_1():
Author.objects.create(name="Tom Clancy")
Author.objects.create(name="Daniel Pinkwater")
@fixture_generator(User)
def test_2():
pass
@fixture_generator(Entry)
def test_3():
Entry.objects.create(public=True)
Entry.objects.create(public=False)
|
alex/django-fixture-generator
|
fixture_generator/tests/fixture_gen.py
|
Python
|
bsd-3-clause
| 455
|
from __future__ import absolute_import, division, print_function
import re
from ...external.qt.QtGui import QDialog, QMessageBox
from ...external.qt import QtCore
from ... import core
from ...core import parse
from ...utils.qt import CompletionTextEdit
from ..qtutil import load_ui
def disambiguate(label, labels):
""" Changes name of label if it conflicts with labels list
Parameters
----------
label : string
labels : collection of strings
Returns
-------
label, perhaps appended with a suffix "_{number}". The output
does not appear in labels
"""
label = label.replace(' ', '_')
if label not in labels:
return label
suffix = 1
while label + ('_%i' % suffix) in labels:
suffix += 1
return label + ('_%i' % suffix)
class ColorizedCompletionTextEdit(CompletionTextEdit):
def insertPlainText(self, *args):
super(ColorizedCompletionTextEdit, self).insertPlainText(*args)
self.reformat_text()
def keyReleaseEvent(self, event):
super(ColorizedCompletionTextEdit, self).keyReleaseEvent(event)
self.reformat_text()
def reformat_text(self):
# Here every time a key is released, we re-colorize the expression.
# We show valid components in blue, and invalid ones in red. We
# recognized components because they contain a ":" which is not valid
# Python syntax (except if one considers lambda functions, but we can
# probably ignore that here)
text = self.toPlainText()
# If there are no : in the text we don't need to do anything
if not ":" in text:
return
pattern = '[^\\s]*:[^\\s]*'
def format_components(m):
component = m.group(0)
if component in self.word_list:
return "<font color='#0072B2'><b>" + component + "</b></font> "
else:
return "<font color='#D55E00'><b>" + component + "</b></font> "
html = re.sub(pattern, format_components, text)
tc = self.textCursor()
pos = tc.position()
self.setHtml(html)
# Sometimes the HTML gets rid of double spaces so we have to make
# sure the position isn't greater than the text length.
text = self.toPlainText()
pos = min(pos, len(text))
tc.setPosition(pos)
self.setTextCursor(tc)
self.setAlignment(QtCore.Qt.AlignCenter)
class CustomComponentWidget(object):
"""
Dialog to add derived components to data via parsed commands.
"""
def __init__(self, collection, parent=None):
# Load in ui file to set up widget
self.ui = load_ui('custom_component_widget', parent)
# In the ui file we do not create the text field for the expression
# because we want to use a custom widget that supports auto-complete.
self.ui.expression = ColorizedCompletionTextEdit()
self.ui.verticalLayout_3.addWidget(self.ui.expression)
self.ui.expression.setAlignment(QtCore.Qt.AlignCenter)
self.ui.expression.setObjectName("expression")
self.ui.expression.setToolTip("Define a new component. You can either "
"type out the full name of a component\n"
"with the data:component syntax, or "
"start typing and press TAB to use "
"tab-completion.\n Blue-colored "
"components are valid, while "
"Red-colored components are invalid.")
self._labels = {}
self._data = {}
self._collection = collection
self._gather_components()
self._gather_data()
self._init_widgets()
self._connect()
# Set up auto-completion. While the auto-complete window is open, we
# cannot add/remove datasets or other components, so we can populate
# the auto_completer straight off.
self.ui.expression.set_word_list(list(self._labels.keys()))
def _connect(self):
cl = self.ui.component_list
cl.itemDoubleClicked.connect(self._add_to_expression)
def _init_widgets(self):
""" Set up default state of widget """
comps = self.ui.component_list
comps.addItems(sorted(self._labels.keys()))
data = self.ui.data_list
data.addItems(sorted(self._data.keys()))
def _gather_components(self):
""" Build a mapping from unique labels -> componentIDs """
comps = set()
for data in self._collection:
for c in data.components:
if c in comps:
continue
label = "%s:%s" % (data.label, c)
label = disambiguate(label, self._labels)
self._labels[label] = c
comps.add(c)
def _gather_data(self):
""" Build a mapping from unique labels -> data objects """
for data in self._collection:
label = data.label
label = disambiguate(label, self._data)
self._data[label] = data
def _selected_data(self):
""" Yield all data objects that are selected in the DataList """
for items in self.ui.data_list.selectedItems():
yield self._data[str(items.text())]
def _create_link(self):
""" Create a ComponentLink form the state of the GUI
Returns
-------
A new component link
"""
expression = str(self.ui.expression.toPlainText())
# To maintain backward compatibility with previous versions of glue,
# we add curly brackets around the components in the expression.
pattern = '[^\\s]*:[^\\s]*'
def add_curly(m):
return "{" + m.group(0) + "}"
expression = re.sub(pattern, add_curly, expression)
pc = parse.ParsedCommand(expression, self._labels)
label = str(self.ui.new_label.text()) or 'new component'
new_id = core.data.ComponentID(label)
link = parse.ParsedComponentLink(new_id, pc)
return link
@property
def _number_targets(self):
"""
How many targets are selected
"""
return len(self.ui.data_list.selectedItems())
def _add_link_to_targets(self, link):
""" Add a link to all the selected data """
for target in self._selected_data():
target.add_component_link(link)
def _add_to_expression(self, item):
""" Add a component list item to the expression editor """
addition = '%s ' % item.text()
expression = self.ui.expression
expression.insertPlainText(addition)
@staticmethod
def create_component(collection):
"""Present user with a dialog to define and add new components.
Parameters
----------
collection : A `DataCollection` to edit
"""
# pylint: disable=W0212
widget = CustomComponentWidget(collection)
while True:
widget.ui.show()
if widget.ui.exec_() == QDialog.Accepted:
if len(str(widget.ui.expression.toPlainText())) == 0:
QMessageBox.critical(widget.ui, "Error", "No expression set",
buttons=QMessageBox.Ok)
elif widget._number_targets == 0:
QMessageBox.critical(widget.ui, "Error", "Please specify the target dataset(s)",
buttons=QMessageBox.Ok)
elif len(widget.ui.new_label.text()) == 0:
QMessageBox.critical(widget.ui, "Error", "Please specify the new component name",
buttons=QMessageBox.Ok)
else:
link = widget._create_link()
if link:
widget._add_link_to_targets(link)
break
else:
break
def main():
from glue.core.data import Data
from glue.core.data_collection import DataCollection
import numpy as np
x = np.random.random((5, 5))
y = x * 3
data = DataCollection(Data(label='test', x=x, y=y))
CustomComponentWidget.create_component(data)
for d in data:
print(d.label)
for c in d.components:
print('\t%s' % c)
if __name__ == "__main__":
main()
|
JudoWill/glue
|
glue/qt/widgets/custom_component_widget.py
|
Python
|
bsd-3-clause
| 8,477
|
# -*- coding: utf-8 -*-
import os
from datetime import datetime
from pytz import timezone
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "schedule_twitter.settings")
import django
from django.conf import settings
from apscheduler.schedulers.background import BlockingScheduler
from tweet.models import Tweet
django.setup()
def tweet():
tz = timezone(settings.TIME_ZONE)
now = datetime.now(tz)
tweets = Tweet.objects.filter(agendado_para__lte=now, publicado=False)
for tweet in tweets:
tweet.send()
def start_scheduler():
scheduler = BlockingScheduler()
scheduler.add_job(tweet,
settings.SCHEDULE['type'],
minutes=settings.SCHEDULE['interval'],
id='tweet_jobs')
scheduler.start()
start_scheduler()
|
aoqfonseca/scheduler_tweet
|
clock.py
|
Python
|
mit
| 811
|
# Preprocessing: From JPEG to HKL
import os
import glob
import sys
import yaml
import scipy.misc
import numpy as np
import hickle as hkl
def get_img(img_name, img_size=256, batch_size=256):
target_shape = (img_size, img_size, 3)
img = scipy.misc.imread(img_name) # x*x*3
assert img.dtype == 'uint8', img_name
# assert False
if len(img.shape) == 2:
img = scipy.misc.imresize(img, (img_size, img_size))
img = np.asarray([img, img, img])
else:
if img.shape[2] > 3:
img = img[:, :, :3]
img = scipy.misc.imresize(img, target_shape)
img = np.rollaxis(img, 2)
if img.shape[0] != 3:
print img_name
return img
def save_batches(file_list, tar_dir, img_size=256, batch_size=256,
flag_avg=False, num_sub_batch=1):
'''
num_sub_batch is for parallelling using multiple gpus, it should be
2, 4, or 8,
where the indexing is reverted binary number
when 2, the files ends with _0.pkl and _1.pkl
when 4, with _00.pkl, _10.pkl, _01.pkl and _11.pkl
'''
if not os.path.exists(tar_dir):
os.makedirs(tar_dir)
imgA_batch = np.zeros((3, img_size, img_size, batch_size), np.uint8)
imgB_batch = np.zeros((3, img_size, img_size, batch_size), np.uint8)
if flag_avg:
img_sum = np.zeros((3, img_size, img_size))
batch_count = 0
count = 0
for file_names in file_list:
imgA_batch[:, :, :, count % batch_size] = \
get_img(file_names[0], img_size=img_size, batch_size=batch_size)
imgB_batch[:, :, :, count % batch_size] = \
get_img(file_names[1], img_size=img_size, batch_size=batch_size)
count += 1
if count % batch_size == 0:
batch_count += 1
if flag_avg:
img_sum += imgA_batch.mean(axis=3)
img_sum += imgB_batch.mean(axis=3)
if num_sub_batch == 1:
save_name = '%04d' % (batch_count - 1) + 'A.hkl'
hkl.dump(imgA_batch, os.path.join(tar_dir, save_name), mode='w')
save_name = '%04d' % (batch_count - 1) + 'B.hkl'
hkl.dump(imgB_batch, os.path.join(tar_dir, save_name), mode='w')
elif num_sub_batch == 2:
half_size = batch_size / 2
save_name = '%04d' % (batch_count - 1) + '_0A.hkl'
hkl.dump(imgA_batch[:, :, :, :half_size],
os.path.join(tar_dir, save_name), mode='w')
save_name = '%04d' % (batch_count - 1) + '_0B.hkl'
hkl.dump(imgB_batch[:, :, :, :half_size],
os.path.join(tar_dir, save_name), mode='w')
save_name = '%04d' % (batch_count - 1) + '_1A.hkl'
hkl.dump(imgA_batch[:, :, :, half_size:],
os.path.join(tar_dir, save_name), mode='w')
save_name = '%04d' % (batch_count - 1) + '_1B.hkl'
hkl.dump(imgB_batch[:, :, :, half_size:],
os.path.join(tar_dir, save_name), mode='w')
elif num_sub_batch == 4:
q1 = batch_size / 4
q2 = batch_size / 2
q3 = batch_size / 4 * 3
save_name = '%04d' % (batch_count - 1) + '_00A.hkl'
hkl.dump(imgA_batch[:, :, :, :q1],
os.path.join(tar_dir, save_name), mode='w')
save_name = '%04d' % (batch_count - 1) + '_10A.hkl'
hkl.dump(imgA_batch[:, :, :, q1:q2],
os.path.join(tar_dir, save_name), mode='w')
save_name = '%04d' % (batch_count - 1) + '_01A.hkl'
hkl.dump(imgA_batch[:, :, :, q2:q3],
os.path.join(tar_dir, save_name), mode='w')
save_name = '%04d' % (batch_count - 1) + '_11A.hkl'
hkl.dump(imgA_batch[:, :, :, q3:],
os.path.join(tar_dir, save_name), mode='w')
save_name = '%04d' % (batch_count - 1) + '_00B.hkl'
hkl.dump(imgB_batch[:, :, :, :q1],
os.path.join(tar_dir, save_name), mode='w')
save_name = '%04d' % (batch_count - 1) + '_10B.hkl'
hkl.dump(imgB_batch[:, :, :, q1:q2],
os.path.join(tar_dir, save_name), mode='w')
save_name = '%04d' % (batch_count - 1) + '_01B.hkl'
hkl.dump(imgB_batch[:, :, :, q2:q3],
os.path.join(tar_dir, save_name), mode='w')
save_name = '%04d' % (batch_count - 1) + '_11B.hkl'
hkl.dump(imgB_batch[:, :, :, q3:],
os.path.join(tar_dir, save_name), mode='w')
else:
NotImplementedError("num_sub_batch has to be 1, 2, or 4")
return img_sum / (2*batch_count) if flag_avg and batch_count!=0 else None
def get_filenames(src_dir, misc_dir, istrain=True, seed=None):
save_filename = 'shuffled_train_filenames.npy' if istrain else 'shuffled_val_filenames.npy'
if os.path.exists(os.path.join(misc_dir, save_filename)):
filenames = np.load(os.path.join(misc_dir, save_filename))
return filenames
if not os.path.exists(misc_dir):
os.makedirs(misc_dir)
print save_filename + ' not found, generating ...'
# Each subfolder contains a group of colour variants
subfolders = []
for pathA in os.listdir(src_dir):
pathAfull = os.path.join(src_dir, pathA)
if not os.path.isdir(pathAfull):
continue
for pathB in os.listdir(pathAfull):
pathBfull = os.path.join(pathAfull, pathB)
if not os.path.isdir(pathBfull):
continue
subfolders.append(pathBfull)
np.random.seed(seed)
np.random.shuffle(subfolders)
# Choose two images at random from each subfolder
filenames = []
count = 0
for subfolder in subfolders:
# Add two images that are colour variants
subfolder_images = glob.glob(subfolder + '/*jpg')
if len(subfolder_images)<=1:
continue
subfolder_images = np.asarray(sorted(subfolder_images)) # Probably doesn't need to be sorted, but the original code had this
np.random.shuffle(subfolder_images)
# Choose images of distinct products, rather than different shots of
# the exact same product
# Try to choose images that are the same shots, but of different
# colour variants
imageA, imageB, productA, productB, shotA, shotB = None, None, None, None, None, None
for i in xrange(len(subfolder_images)-1):
imageA = subfolder_images[i]
filenameA = imageA.split("/")[-1]
productA = filenameA.split("_")[0]
shotA = filenameA.split("_")[1]
for j in xrange(i+1, len(subfolder_images)):
imageB = subfolder_images[j]
filenameB = imageB.split("/")[-1]
productB = filenameB.split("_")[0]
shotB = filenameB.split("_")[1]
if shotA != shotB:
continue
if productA != productB:
break
if productA != productB and shotA == shotB:
break
if productA == productB or imageA is None or imageB is None:
continue
filenames.append((imageA, imageB))
count += 1
if count%100==0:
sys.stdout.write(str(count)+" image pairs found\r")
sys.stdout.flush()
filenames = np.asarray(filenames)
np.save(os.path.join(misc_dir, save_filename),
filenames)
return filenames
if __name__ == '__main__':
with open('paths.yaml', 'r') as f:
paths = yaml.load(f)
train_img_dir = paths['train_img_dir']
val_img_dir = paths['val_img_dir']
misc_dir = paths['misc_dir']
if len(sys.argv) < 2:
gen_type = 'full'
else:
gen_type = sys.argv[1]
if gen_type == 'full':
print 'generating full dataset ...'
elif gen_type == 'toy':
print 'generating toy dataset ...'
else:
NotImplementedError("gen_type (2nd argument of make_hkl.py) can only be full or toy")
train_filenames = get_filenames(train_img_dir, misc_dir, istrain=True)
val_filenames = get_filenames(val_img_dir, misc_dir, istrain=False)
img_size = 256
batch_size = 256
if gen_type == 'toy':
# generate 10 batches each
train_filenames = train_filenames[:2560]
val_filenames = val_filenames[:2560]
for num_sub_batch in [1]:
tar_train_dir = paths['tar_train_dir']
tar_val_dir = paths['tar_val_dir']
tar_train_dir += '_b' + str(batch_size) + \
'_b_' + str(batch_size / num_sub_batch)
tar_val_dir += '_b' + str(batch_size) + \
'_b_' + str(batch_size / num_sub_batch)
# training data
img_mean = save_batches(train_filenames, tar_train_dir,
img_size=img_size, batch_size=batch_size,
flag_avg=True, num_sub_batch=num_sub_batch)
np.save(os.path.join(misc_dir, 'img_mean.npy'), img_mean)
# validation data
save_batches(val_filenames, tar_val_dir,
img_size=img_size, batch_size=batch_size,
num_sub_batch=num_sub_batch)
|
momiah/cvariants_theano
|
preprocessing/make_hkl.py
|
Python
|
bsd-3-clause
| 9,500
|
from __future__ import annotations
import datetime
from functools import partial
from textwrap import dedent
from typing import TYPE_CHECKING
import warnings
import numpy as np
from pandas._libs.tslibs import Timedelta
import pandas._libs.window.aggregations as window_aggregations
from pandas._typing import (
Axis,
TimedeltaConvertibleTypes,
)
if TYPE_CHECKING:
from pandas import DataFrame, Series
from pandas.core.generic import NDFrame
from pandas.compat.numpy import function as nv
from pandas.util._decorators import doc
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import is_datetime64_ns_dtype
from pandas.core.dtypes.missing import isna
import pandas.core.common as common # noqa: PDF018
from pandas.core.indexers.objects import (
BaseIndexer,
ExponentialMovingWindowIndexer,
GroupbyIndexer,
)
from pandas.core.util.numba_ import maybe_use_numba
from pandas.core.window.common import zsqrt
from pandas.core.window.doc import (
_shared_docs,
args_compat,
create_section_header,
kwargs_compat,
numba_notes,
template_header,
template_returns,
template_see_also,
window_agg_numba_parameters,
)
from pandas.core.window.numba_ import (
generate_numba_ewm_func,
generate_numba_ewm_table_func,
)
from pandas.core.window.online import (
EWMMeanState,
generate_online_numba_ewma_func,
)
from pandas.core.window.rolling import (
BaseWindow,
BaseWindowGroupby,
)
def get_center_of_mass(
comass: float | None,
span: float | None,
halflife: float | None,
alpha: float | None,
) -> float:
valid_count = common.count_not_none(comass, span, halflife, alpha)
if valid_count > 1:
raise ValueError("comass, span, halflife, and alpha are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if comass is not None:
if comass < 0:
raise ValueError("comass must satisfy: comass >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
comass = (span - 1) / 2
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
comass = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
comass = (1 - alpha) / alpha
else:
raise ValueError("Must pass one of comass, span, halflife, or alpha")
return float(comass)
def _calculate_deltas(
times: str | np.ndarray | NDFrame | None,
halflife: float | TimedeltaConvertibleTypes | None,
) -> np.ndarray:
"""
Return the diff of the times divided by the half-life. These values are used in
the calculation of the ewm mean.
Parameters
----------
times : str, np.ndarray, Series, default None
Times corresponding to the observations. Must be monotonically increasing
and ``datetime64[ns]`` dtype.
halflife : float, str, timedelta, optional
Half-life specifying the decay
Returns
-------
np.ndarray
Diff of the times divided by the half-life
"""
# error: Item "str" of "Union[str, ndarray, NDFrameT, None]" has no
# attribute "view"
# error: Item "None" of "Union[str, ndarray, NDFrameT, None]" has no
# attribute "view"
_times = np.asarray(
times.view(np.int64), dtype=np.float64 # type: ignore[union-attr]
)
_halflife = float(Timedelta(halflife).value)
return np.diff(_times) / _halflife
class ExponentialMovingWindow(BaseWindow):
r"""
Provide exponentially weighted (EW) calculations.
Exactly one parameter: ``com``, ``span``, ``halflife``, or ``alpha`` must be
provided.
Parameters
----------
com : float, optional
Specify decay in terms of center of mass
:math:`\alpha = 1 / (1 + com)`, for :math:`com \geq 0`.
span : float, optional
Specify decay in terms of span
:math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`.
halflife : float, str, timedelta, optional
Specify decay in terms of half-life
:math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for
:math:`halflife > 0`.
If ``times`` is specified, the time unit (str or timedelta) over which an
observation decays to half its value. Only applicable to ``mean()``,
and halflife value will not apply to the other functions.
.. versionadded:: 1.1.0
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly
:math:`0 < \alpha \leq 1`.
min_periods : int, default 0
Minimum number of observations in window required to have a value;
otherwise, result is ``np.nan``.
adjust : bool, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average).
- When ``adjust=True`` (default), the EW function is calculated using weights
:math:`w_i = (1 - \alpha)^i`. For example, the EW moving average of the series
[:math:`x_0, x_1, ..., x_t`] would be:
.. math::
y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ... + (1 -
\alpha)^t x_0}{1 + (1 - \alpha) + (1 - \alpha)^2 + ... + (1 - \alpha)^t}
- When ``adjust=False``, the exponentially weighted function is calculated
recursively:
.. math::
\begin{split}
y_0 &= x_0\\
y_t &= (1 - \alpha) y_{t-1} + \alpha x_t,
\end{split}
ignore_na : bool, default False
Ignore missing values when calculating weights.
- When ``ignore_na=False`` (default), weights are based on absolute positions.
For example, the weights of :math:`x_0` and :math:`x_2` used in calculating
the final weighted average of [:math:`x_0`, None, :math:`x_2`] are
:math:`(1-\alpha)^2` and :math:`1` if ``adjust=True``, and
:math:`(1-\alpha)^2` and :math:`\alpha` if ``adjust=False``.
- When ``ignore_na=True``, weights are based
on relative positions. For example, the weights of :math:`x_0` and :math:`x_2`
used in calculating the final weighted average of
[:math:`x_0`, None, :math:`x_2`] are :math:`1-\alpha` and :math:`1` if
``adjust=True``, and :math:`1-\alpha` and :math:`\alpha` if ``adjust=False``.
axis : {0, 1}, default 0
If ``0`` or ``'index'``, calculate across the rows.
If ``1`` or ``'columns'``, calculate across the columns.
times : str, np.ndarray, Series, default None
.. versionadded:: 1.1.0
Only applicable to ``mean()``.
Times corresponding to the observations. Must be monotonically increasing and
``datetime64[ns]`` dtype.
If 1-D array like, a sequence with the same shape as the observations.
.. deprecated:: 1.4.0
If str, the name of the column in the DataFrame representing the times.
method : str {'single', 'table'}, default 'single'
.. versionadded:: 1.4.0
Execute the rolling operation per single column or row (``'single'``)
or over the entire object (``'table'``).
This argument is only implemented when specifying ``engine='numba'``
in the method call.
Only applicable to ``mean()``
Returns
-------
``ExponentialMovingWindow`` subclass
See Also
--------
rolling : Provides rolling window calculations.
expanding : Provides expanding transformations.
Notes
-----
See :ref:`Windowing Operations <window.exponentially_weighted>`
for further usage details and examples.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.ewm(com=0.5).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
>>> df.ewm(alpha=2 / 3).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
**adjust**
>>> df.ewm(com=0.5, adjust=True).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
>>> df.ewm(com=0.5, adjust=False).mean()
B
0 0.000000
1 0.666667
2 1.555556
3 1.555556
4 3.650794
**ignore_na**
>>> df.ewm(com=0.5, ignore_na=True).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.225000
>>> df.ewm(com=0.5, ignore_na=False).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
**times**
Exponentially weighted mean with weights calculated with a timedelta ``halflife``
relative to ``times``.
>>> times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17']
>>> df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean()
B
0 0.000000
1 0.585786
2 1.523889
3 1.523889
4 3.233686
"""
_attributes = [
"com",
"span",
"halflife",
"alpha",
"min_periods",
"adjust",
"ignore_na",
"axis",
"times",
"method",
]
def __init__(
self,
obj: NDFrame,
com: float | None = None,
span: float | None = None,
halflife: float | TimedeltaConvertibleTypes | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool = True,
ignore_na: bool = False,
axis: Axis = 0,
times: str | np.ndarray | NDFrame | None = None,
method: str = "single",
*,
selection=None,
):
super().__init__(
obj=obj,
min_periods=1 if min_periods is None else max(int(min_periods), 1),
on=None,
center=False,
closed=None,
method=method,
axis=axis,
selection=selection,
)
self.com = com
self.span = span
self.halflife = halflife
self.alpha = alpha
self.adjust = adjust
self.ignore_na = ignore_na
self.times = times
if self.times is not None:
if not self.adjust:
raise NotImplementedError("times is not supported with adjust=False.")
if isinstance(self.times, str):
warnings.warn(
(
"Specifying times as a string column label is deprecated "
"and will be removed in a future version. Pass the column "
"into times instead."
),
FutureWarning,
stacklevel=find_stack_level(),
)
self.times = self._selected_obj[self.times]
if not is_datetime64_ns_dtype(self.times):
raise ValueError("times must be datetime64[ns] dtype.")
# error: Argument 1 to "len" has incompatible type "Union[str, ndarray,
# NDFrameT, None]"; expected "Sized"
if len(self.times) != len(obj): # type: ignore[arg-type]
raise ValueError("times must be the same length as the object.")
if not isinstance(self.halflife, (str, datetime.timedelta)):
raise ValueError(
"halflife must be a string or datetime.timedelta object"
)
if isna(self.times).any():
raise ValueError("Cannot convert NaT values to integer")
self._deltas = _calculate_deltas(self.times, self.halflife)
# Halflife is no longer applicable when calculating COM
# But allow COM to still be calculated if the user passes other decay args
if common.count_not_none(self.com, self.span, self.alpha) > 0:
self._com = get_center_of_mass(self.com, self.span, None, self.alpha)
else:
self._com = 1.0
else:
if self.halflife is not None and isinstance(
self.halflife, (str, datetime.timedelta)
):
raise ValueError(
"halflife can only be a timedelta convertible argument if "
"times is not None."
)
# Without times, points are equally spaced
self._deltas = np.ones(max(len(self.obj) - 1, 0), dtype=np.float64)
self._com = get_center_of_mass(
# error: Argument 3 to "get_center_of_mass" has incompatible type
# "Union[float, Any, None, timedelta64, signedinteger[_64Bit]]";
# expected "Optional[float]"
self.com,
self.span,
self.halflife, # type: ignore[arg-type]
self.alpha,
)
def _get_window_indexer(self) -> BaseIndexer:
"""
Return an indexer class that will compute the window start and end bounds
"""
return ExponentialMovingWindowIndexer()
def online(self, engine="numba", engine_kwargs=None):
"""
Return an ``OnlineExponentialMovingWindow`` object to calculate
exponentially moving window aggregations in an online method.
.. versionadded:: 1.3.0
Parameters
----------
engine: str, default ``'numba'``
Execution engine to calculate online aggregations.
Applies to all supported aggregation methods.
engine_kwargs : dict, default None
Applies to all supported aggregation methods.
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be
applied to the function
Returns
-------
OnlineExponentialMovingWindow
"""
return OnlineExponentialMovingWindow(
obj=self.obj,
com=self.com,
span=self.span,
halflife=self.halflife,
alpha=self.alpha,
min_periods=self.min_periods,
adjust=self.adjust,
ignore_na=self.ignore_na,
axis=self.axis,
times=self.times,
engine=engine,
engine_kwargs=engine_kwargs,
selection=self._selection,
)
@doc(
_shared_docs["aggregate"],
see_also=dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
"""
),
examples=dedent(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
1 2 5 8
2 3 6 9
>>> df.ewm(alpha=0.5).mean()
A B C
0 1.000000 4.000000 7.000000
1 1.666667 4.666667 7.666667
2 2.428571 5.428571 8.428571
"""
),
klass="Series/Dataframe",
axis="",
)
def aggregate(self, func, *args, **kwargs):
return super().aggregate(func, *args, **kwargs)
agg = aggregate
@doc(
template_header,
create_section_header("Parameters"),
args_compat,
window_agg_numba_parameters,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
numba_notes.replace("\n", "", 1),
window_method="ewm",
aggregation_description="(exponential weighted moment) mean",
agg_method="mean",
)
def mean(self, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
if self.method == "single":
func = generate_numba_ewm_func
numba_cache_key = (lambda x: x, "ewm_mean")
else:
func = generate_numba_ewm_table_func
numba_cache_key = (lambda x: x, "ewm_mean_table")
ewm_func = func(
engine_kwargs=engine_kwargs,
com=self._com,
adjust=self.adjust,
ignore_na=self.ignore_na,
deltas=self._deltas,
normalize=True,
)
return self._apply(
ewm_func,
numba_cache_key=numba_cache_key,
)
elif engine in ("cython", None):
if engine_kwargs is not None:
raise ValueError("cython engine does not accept engine_kwargs")
nv.validate_window_func("mean", args, kwargs)
deltas = None if self.times is None else self._deltas
window_func = partial(
window_aggregations.ewm,
com=self._com,
adjust=self.adjust,
ignore_na=self.ignore_na,
deltas=deltas,
normalize=True,
)
return self._apply(window_func)
else:
raise ValueError("engine must be either 'numba' or 'cython'")
@doc(
template_header,
create_section_header("Parameters"),
args_compat,
window_agg_numba_parameters,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also,
create_section_header("Notes"),
numba_notes.replace("\n", "", 1),
window_method="ewm",
aggregation_description="(exponential weighted moment) sum",
agg_method="sum",
)
def sum(self, *args, engine=None, engine_kwargs=None, **kwargs):
if not self.adjust:
raise NotImplementedError("sum is not implemented with adjust=False")
if maybe_use_numba(engine):
if self.method == "single":
func = generate_numba_ewm_func
numba_cache_key = (lambda x: x, "ewm_sum")
else:
func = generate_numba_ewm_table_func
numba_cache_key = (lambda x: x, "ewm_sum_table")
ewm_func = func(
engine_kwargs=engine_kwargs,
com=self._com,
adjust=self.adjust,
ignore_na=self.ignore_na,
deltas=self._deltas,
normalize=False,
)
return self._apply(
ewm_func,
numba_cache_key=numba_cache_key,
)
elif engine in ("cython", None):
if engine_kwargs is not None:
raise ValueError("cython engine does not accept engine_kwargs")
nv.validate_window_func("sum", args, kwargs)
deltas = None if self.times is None else self._deltas
window_func = partial(
window_aggregations.ewm,
com=self._com,
adjust=self.adjust,
ignore_na=self.ignore_na,
deltas=deltas,
normalize=False,
)
return self._apply(window_func)
else:
raise ValueError("engine must be either 'numba' or 'cython'")
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
bias : bool, default False
Use a standard estimation bias correction.
"""
).replace("\n", "", 1),
args_compat,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) standard deviation",
agg_method="std",
)
def std(self, bias: bool = False, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
return zsqrt(self.var(bias=bias, **kwargs))
def vol(self, bias: bool = False, *args, **kwargs):
warnings.warn(
(
"vol is deprecated will be removed in a future version. "
"Use std instead."
),
FutureWarning,
stacklevel=2,
)
return self.std(bias, *args, **kwargs)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
bias : bool, default False
Use a standard estimation bias correction.
"""
).replace("\n", "", 1),
args_compat,
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) variance",
agg_method="var",
)
def var(self, bias: bool = False, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
window_func = window_aggregations.ewmcov
wfunc = partial(
window_func,
com=self._com,
adjust=self.adjust,
ignore_na=self.ignore_na,
bias=bias,
)
def var_func(values, begin, end, min_periods):
return wfunc(values, begin, end, min_periods, values)
return self._apply(var_func)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
other : Series or DataFrame , optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
bias : bool, default False
Use a standard estimation bias correction.
"""
).replace("\n", "", 1),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) sample covariance",
agg_method="cov",
)
def cov(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
bias: bool = False,
**kwargs,
):
from pandas import Series
def cov_func(x, y):
x_array = self._prep_values(x)
y_array = self._prep_values(y)
window_indexer = self._get_window_indexer()
min_periods = (
self.min_periods
if self.min_periods is not None
else window_indexer.window_size
)
start, end = window_indexer.get_window_bounds(
num_values=len(x_array),
min_periods=min_periods,
center=self.center,
closed=self.closed,
)
result = window_aggregations.ewmcov(
x_array,
start,
end,
# error: Argument 4 to "ewmcov" has incompatible type
# "Optional[int]"; expected "int"
self.min_periods, # type: ignore[arg-type]
y_array,
self._com,
self.adjust,
self.ignore_na,
bias,
)
return Series(result, index=x.index, name=x.name)
return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func)
@doc(
template_header,
create_section_header("Parameters"),
dedent(
"""
other : Series or DataFrame, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
"""
).replace("\n", "", 1),
kwargs_compat,
create_section_header("Returns"),
template_returns,
create_section_header("See Also"),
template_see_also[:-1],
window_method="ewm",
aggregation_description="(exponential weighted moment) sample correlation",
agg_method="corr",
)
def corr(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
**kwargs,
):
from pandas import Series
def cov_func(x, y):
x_array = self._prep_values(x)
y_array = self._prep_values(y)
window_indexer = self._get_window_indexer()
min_periods = (
self.min_periods
if self.min_periods is not None
else window_indexer.window_size
)
start, end = window_indexer.get_window_bounds(
num_values=len(x_array),
min_periods=min_periods,
center=self.center,
closed=self.closed,
)
def _cov(X, Y):
return window_aggregations.ewmcov(
X,
start,
end,
min_periods,
Y,
self._com,
self.adjust,
self.ignore_na,
True,
)
with np.errstate(all="ignore"):
cov = _cov(x_array, y_array)
x_var = _cov(x_array, x_array)
y_var = _cov(y_array, y_array)
result = cov / zsqrt(x_var * y_var)
return Series(result, index=x.index, name=x.name)
return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func)
class ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow):
"""
Provide an exponential moving window groupby implementation.
"""
_attributes = ExponentialMovingWindow._attributes + BaseWindowGroupby._attributes
def __init__(self, obj, *args, _grouper=None, **kwargs):
super().__init__(obj, *args, _grouper=_grouper, **kwargs)
if not obj.empty and self.times is not None:
# sort the times and recalculate the deltas according to the groups
groupby_order = np.concatenate(list(self._grouper.indices.values()))
self._deltas = _calculate_deltas(
self.times.take(groupby_order), # type: ignore[union-attr]
self.halflife,
)
def _get_window_indexer(self) -> GroupbyIndexer:
"""
Return an indexer class that will compute the window start and end bounds
Returns
-------
GroupbyIndexer
"""
window_indexer = GroupbyIndexer(
groupby_indices=self._grouper.indices,
window_indexer=ExponentialMovingWindowIndexer,
)
return window_indexer
class OnlineExponentialMovingWindow(ExponentialMovingWindow):
def __init__(
self,
obj: NDFrame,
com: float | None = None,
span: float | None = None,
halflife: float | TimedeltaConvertibleTypes | None = None,
alpha: float | None = None,
min_periods: int | None = 0,
adjust: bool = True,
ignore_na: bool = False,
axis: Axis = 0,
times: str | np.ndarray | NDFrame | None = None,
engine: str = "numba",
engine_kwargs: dict[str, bool] | None = None,
*,
selection=None,
):
if times is not None:
raise NotImplementedError(
"times is not implemented with online operations."
)
super().__init__(
obj=obj,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
times=times,
selection=selection,
)
self._mean = EWMMeanState(
self._com, self.adjust, self.ignore_na, self.axis, obj.shape
)
if maybe_use_numba(engine):
self.engine = engine
self.engine_kwargs = engine_kwargs
else:
raise ValueError("'numba' is the only supported engine")
def reset(self):
"""
Reset the state captured by `update` calls.
"""
self._mean.reset()
def aggregate(self, func, *args, **kwargs):
return NotImplementedError
def std(self, bias: bool = False, *args, **kwargs):
return NotImplementedError
def corr(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
**kwargs,
):
return NotImplementedError
def cov(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
bias: bool = False,
**kwargs,
):
return NotImplementedError
def var(self, bias: bool = False, *args, **kwargs):
return NotImplementedError
def mean(self, *args, update=None, update_times=None, **kwargs):
"""
Calculate an online exponentially weighted mean.
Parameters
----------
update: DataFrame or Series, default None
New values to continue calculating the
exponentially weighted mean from the last values and weights.
Values should be float64 dtype.
``update`` needs to be ``None`` the first time the
exponentially weighted mean is calculated.
update_times: Series or 1-D np.ndarray, default None
New times to continue calculating the
exponentially weighted mean from the last values and weights.
If ``None``, values are assumed to be evenly spaced
in time.
This feature is currently unsupported.
Returns
-------
DataFrame or Series
Examples
--------
>>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)})
>>> online_ewm = df.head(2).ewm(0.5).online()
>>> online_ewm.mean()
a b
0 0.00 5.00
1 0.75 5.75
>>> online_ewm.mean(update=df.tail(3))
a b
2 1.615385 6.615385
3 2.550000 7.550000
4 3.520661 8.520661
>>> online_ewm.reset()
>>> online_ewm.mean()
a b
0 0.00 5.00
1 0.75 5.75
"""
result_kwargs = {}
is_frame = True if self._selected_obj.ndim == 2 else False
if update_times is not None:
raise NotImplementedError("update_times is not implemented.")
else:
update_deltas = np.ones(
max(self._selected_obj.shape[self.axis - 1] - 1, 0), dtype=np.float64
)
if update is not None:
if self._mean.last_ewm is None:
raise ValueError(
"Must call mean with update=None first before passing update"
)
result_from = 1
result_kwargs["index"] = update.index
if is_frame:
last_value = self._mean.last_ewm[np.newaxis, :]
result_kwargs["columns"] = update.columns
else:
last_value = self._mean.last_ewm
result_kwargs["name"] = update.name
np_array = np.concatenate((last_value, update.to_numpy()))
else:
result_from = 0
result_kwargs["index"] = self._selected_obj.index
if is_frame:
result_kwargs["columns"] = self._selected_obj.columns
else:
result_kwargs["name"] = self._selected_obj.name
np_array = self._selected_obj.astype(np.float64).to_numpy()
ewma_func = generate_online_numba_ewma_func(self.engine_kwargs)
result = self._mean.run_ewm(
np_array if is_frame else np_array[:, np.newaxis],
update_deltas,
self.min_periods,
ewma_func,
)
if not is_frame:
result = result.squeeze()
result = result[result_from:]
result = self._selected_obj._constructor(result, **result_kwargs)
return result
|
jorisvandenbossche/pandas
|
pandas/core/window/ewm.py
|
Python
|
bsd-3-clause
| 33,522
|
import math
import sys
# read FILE with CVs and weights
FILENAME_ = sys.argv[1]
# number of CVs for FES
NCV_ = int(sys.argv[2])
# read minimum, maximum and number of bins for FES grid
gmin = []; gmax = []; nbin = []
for i in range(0, NCV_):
i0 = 3*i + 3
gmin.append(float(sys.argv[i0]))
gmax.append(float(sys.argv[i0+1]))
nbin.append(int(sys.argv[i0+2]))
# read KBT_
KBT_ = float(sys.argv[3*NCV_+3])
# block size
BSIZE_ = int(sys.argv[-1])
def get_indexes_from_index(index, nbin):
indexes = []
# get first index
indexes.append(index%nbin[0])
# loop
kk = index
for i in range(1, len(nbin)-1):
kk = ( kk - indexes[i-1] ) / nbin[i-1]
indexes.append(kk%nbin[i])
if(len(nbin)>=2):
indexes.append( ( kk - indexes[len(nbin)-2] ) / nbin[len(nbin) -2] )
return indexes
def get_indexes_from_cvs(cvs, gmin, dx):
keys = []
for i in range(0, len(cvs)):
keys.append(int( round( ( cvs[i] - gmin[i] ) / dx[i] ) ))
return tuple(keys)
def get_points(key, gmin, dx):
xs = []
for i in range(0, len(key)):
xs.append(gmin[i] + float(key[i]) * dx[i])
return xs
# define bin size
dx = []
for i in range(0, NCV_):
dx.append( (gmax[i]-gmin[i])/float(nbin[i]-1) )
# total numbers of bins
nbins = 1
for i in range(0, len(nbin)): nbins *= nbin[i]
# read file and store lists
cv_list=[]; w_list=[]
for lines in open(FILENAME_, "r").readlines():
riga = lines.strip().split()
# check format
if(len(riga)!=NCV_ and len(riga)!=NCV_+1):
print (FILENAME_,"is in the wrong format!")
exit()
# read CVs
cvs = []
for i in range(0, NCV_): cvs.append(float(riga[i]))
# get indexes
key = get_indexes_from_cvs(cvs, gmin, dx)
# read weight, if present
if(len(riga)==NCV_+1):
w = float(riga[NCV_])
else: w = 1.0
# store into lists
cv_list.append(key)
w_list.append(w)
# total number of data points
ndata = len(cv_list)
# number of blocks
nblock = int(ndata/BSIZE_)
# prepare histo dictionaries
histo_ave = {} ; histo_ave2 = {};
# cycle on blocks
for iblock in range(0, nblock):
# define range in CV
i0 = iblock * BSIZE_
i1 = i0 + BSIZE_
# build histo
histo = {}
for i in range(i0, i1):
if cv_list[i] in histo: histo[cv_list[i]] += w_list[i]
else: histo[cv_list[i]] = w_list[i]
# calculate average histo in block
for key in histo: histo[key] /= float(BSIZE_)
# add to global histo dictionary
for key in histo:
if key in histo_ave:
histo_ave[key] += histo[key]
histo_ave2[key] += histo[key] * histo[key]
else:
histo_ave[key] = histo[key]
histo_ave2[key] = histo[key] * histo[key]
# print out fes and error
log = open("fes."+str(BSIZE_)+".dat", "w")
# this is needed to add a blank line
xs_old = []
for i in range(0, nbins):
# get the indexes in the multi-dimensional grid
key = tuple(get_indexes_from_index(i, nbin))
# get CV values for that grid point
xs = get_points(key, gmin, dx)
# add a blank line for gnuplot
if(i == 0):
xs_old = xs[:]
else:
flag = 0
for j in range(1,len(xs)):
if(xs[j] != xs_old[j]):
flag = 1
xs_old = xs[:]
if (flag == 1): log.write("\n")
# print value of CVs
for x in xs:
log.write("%12.6lf " % x)
# calculate fes
nb = float(nblock)
if key in histo_ave:
# average and variance
aveh = histo_ave[key] / nb
s2h = (histo_ave2[key]/nb-aveh*aveh) * nb / ( nb - 1.0 )
# error
errh = math.sqrt( s2h / nb )
# free energy and error
fes = -KBT_ * math.log(aveh)
errf = KBT_ / aveh * errh
# printout
log.write(" %12.6lf %12.6lf\n" % (fes, errf))
else:
log.write(" Infinity\n")
log.close()
|
JFDama/plumed2
|
user-doc/tutorials/trieste-4/do_block_fes.py
|
Python
|
lgpl-3.0
| 3,916
|
from src.li.visual.ViewStyle import ViewStyle
from src.li.types.VideoVisual import VideoVisual
from src.li.types.AddToCollectionVisual import AddToCollectionVisual
from src.li.types.YoutubePlaylistVisual import YoutubePlaylistVisual
from src.li.visual.FullTextSettings import FullTextSettings, Location
from src.li.visual.TextSettings import TextSettings
from src.tools.addonSettings import string as st
viewStyle = ViewStyle.EPISODES
addToCollectionVisual = AddToCollectionVisual (
st(780), #title
TextSettings(
None, #color
False, #bold?
False #italic?
),
None, #icon
None, #thumb
)
videosVisual = VideoVisual (
FullTextSettings(
TextSettings( #title
'None', #color
False, #bold?
False, #italic?
),
sourceTS=TextSettings (
'red', #color
False, #bold?
False, #italic?
False #show?
),
countTS=TextSettings(
'steelblue', #color
False, #bold?
False, #italic?
),
count2TS=TextSettings(
'yellow', #color
False, #bold?
False, #italic?
),
countLocation = Location.LEFT_ALIGNED
)
)
nextPageVisual = YoutubePlaylistVisual (
TextSettings(
None, #color
False, #bold?
False #italic?
),
customTitle = st(980),
ctHasPageNum = True
)
|
SportySpice/Collections
|
src/paths/visual/browse_youtube_playlist.py
|
Python
|
gpl-2.0
| 1,896
|
"""
Average mean sea level pressure by day, unrotate lat/lon and save
"""
import os, sys
import itertools
import numpy as np
import cPickle as pickle
#import matplotlib.animation as animation
import iris
import iris.coords as coords
import iris.coord_categorisation
from iris.analysis.interpolate import linear
import cartopy.crs as ccrs
import h5py
#def checkpoleposition(cube):
#rot_pole = temperature.coord('grid_latitude').coord_system.as_cartopy_crs()
# ll = ccrs.Geodetic()
#lon, lat = 40, -42
# Transform the lon lat point into unrotated coordinates.
#target_xy = ll.transform_point(rotated_lon, rotated_lat, rot_pole)
#extracted_cube = linear(temperature, [('grid_latitude', target_xy[1]), ('grid_longitude', target_xy[0]
#experiment_ids = ['djzny', 'djznq', 'djznw']
experiment_ids = ['djzny']
data_to_mean = ['temp', 'sp_hum']
dset = ['t_on_p', 'sh_on_p']
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
for a, dm in enumerate(data_to_mean):
#dataset = iris.load_cube(fname)
fname = '/projects/cascade/pwille/temp/%s_pressure_levels_interp_%s' % (dm,experiment_id)
ds = dset[a]
with h5py.File(fname, 'r') as i:
d = i['%s' % ds]
print d
#iris.coord_categorisation.add_day_of_year(p_at_msl, 'time', name='dayyear')
#print fname
#daily_mean = .aggregated_by(['dayyear'], iris.analysis.MEAN)
#daily_mean_rot = checkpoleposition(daily_mean)
#if not os.path.exists(experiment_id): os.makedirs(experiment_id)
#pickle.dump( daily_mean, open( "/home/pwille/python_scripts/%s/pickle_daily_mean_%s.p" % (experiment_id, experiment_id), "wb" ) )
|
peterwilletts24/Monsoon-Python-Scripts
|
pp_load_mean_pickle.py
|
Python
|
mit
| 1,683
|
# -*- coding: utf-8 -*-
"""
gdown.modules.fileshark
~~~~~~~~~~~~~~~~~~~
This module contains handlers for fileshark.
"""
import re
# from datetime import datetime
from dateutil import parser
from requests.exceptions import ConnectionError
from ..module import browser, acc_info_template
from ..exceptions import ModuleError
def accInfo(username, passwd, proxy=False):
"""Returns account info."""
r = browser(proxy)
acc_info = acc_info_template()
# r.headers['X-Requested-With'] = 'XMLHttpRequest'
try:
rc = r.get('http://fileshark.pl/zaloguj')
except ConnectionError as e:
raise ModuleError('ip banned')
open('gdown.log', 'w').write(rc.text)
csrf_token = re.search('name="_csrf_token" value="(.+?)"', rc.text).group(1)
data = {'_username': username,
'_password': passwd,
# '_remember_me': 'on',
'_csrf_token': csrf_token}
# r.headers['x-requested-with'] = 'XMLHttpRequest'
r.headers['origin'] = 'https://fileshark.pl'
rc = r.post('https://fileshark.pl/login_check', data=data).text
open('gdown.log', 'w').write(rc)
if 'Rodzaj konta <strong>Premium' in rc:
date_expire = re.search('Premium <br><span title="([0-9\- \:]+?)"', rc).group(1)
rc = re.search('data-min="0" data-max="([0-9]+)" data-angleOffset="90" data-linecap="round" value="([0-9]+)"', rc)
transfer = int((int(rc.group(1)) - int(rc.group(2))) / 1024 / 1024 / 1024)
# block test
rc = r.get('https://fileshark.pl/pobierz/24866459/8247a/handel-dziecmi-apel-andzi-yuvhkh-xwe0-webm').text
if 'Wykryliśmy, że Twoje konto jest wykorzystywane komercyjnie.' in rc:
print('Wykryliśmy, że Twoje konto jest wykorzystywane komercyjnie.')
acc_info['status'] = 'blocked'
else:
acc_info['status'] = 'premium'
acc_info['expire_date'] = parser.parse(date_expire)
acc_info['transfer'] = transfer
elif 'Rodzaj konta <strong>Standardowe' in rc:
acc_info['status'] = 'free'
elif 'Konto jest wyłączone.' in rc:
acc_info['status'] = 'blocked'
elif 'Nieprawidłowe dane.' in rc:
acc_info['status'] = 'deleted'
else:
pasddassad
return acc_info
|
oczkers/gdown
|
gdown/modules/fileshark.py
|
Python
|
gpl-3.0
| 2,312
|
#!/usr/bin/env python2
"""Split a fasta file in n files of approximately the same number of sequences
WARNING: This will create 'n' files in your present directory
USAGE:
python fasta_split.py input_file num_files
input_file: fasta file
num_files: number of files to split into
"""
# Importing modules
import gzip
import sys
# Defining classes
class Fasta(object):
"""Fasta object with name and sequence
"""
def __init__(self, name, sequence):
self.name = name
self.sequence = sequence
def write_to_file(self, handle):
handle.write(">" + self.name + "\n")
handle.write(self.sequence + "\n")
def __repr__(self):
return self.name + " " + self.sequence[:31]
# Defining functions
def myopen(_file, mode="rt"):
if _file.endswith(".gz"):
return gzip.open(_file, mode=mode)
else:
return open(_file, mode=mode)
def fasta_iterator(input_file):
"""Takes a fasta file input_file and returns a fasta iterator
"""
with myopen(input_file) as f:
sequence = []
name = ""
begun = False
for line in f:
line = line.strip()
if line.startswith(">"):
if begun:
yield Fasta(name, "".join(sequence))
name = line[1:]
sequence = ""
begun = True
else:
sequence += line
if name != "":
yield Fasta(name, "".join(sequence))
# parse user input
try:
input_file = sys.argv[1]
num_files = int(sys.argv[2])
except:
print(__doc__)
sys.exit(1)
# Iterate through sequences and write to files
file_number = 0
for sequence in fasta_iterator(input_file):
n = file_number % num_files + 1
print(n)
current_file = input_file + str(n) + ".fasta"
with open(current_file, "a") as f:
sequence.write_to_file(f)
file_number += 1
|
enormandeau/Scripts
|
fasta_split.py
|
Python
|
gpl-3.0
| 1,930
|
#!/usr/bin/env pythonw
# -*- coding: UTF-8 -*-
#
# Drag&Drop test 1
#
# Created by Giovanni Porcari on 2007-03-24.
# Copyright (c) 2007 Softwell. All rights reserved.
#
""" Drag&Drop test 1 """
from gnr.core.gnrbag import Bag
class GnrCustomWebPage(object):
def main(self, root, **kwargs):
#root.script("dojo.subscribe('/dnd/start',function(foo){console.debug(foo);})")
root.css('.dojoDndItemSelected {border:4px dotted silver;}')
root.css('.dojoDndItemAnchor {border:4px dotted blue;}')
src = dict(width='40px', height='40px', margin='10px')
tgt = dict(width='80px', height='80px', margin='10px')
root = root.div(width='40px')
draggable = root.div(dnd_source=True, dnd_singular=False)
draggable.div(background_color='green', dnd_itemType='green', **src)
draggable.div(background_color='red', dnd_itemType='red', **src)
draggable.div(background_color='pink', dnd_itemType='pink', **src)
draggable.div(background_color='yellow', dnd_itemType='yellow', **src)
targets = root.div(dnd_target=True, isSource=False, dnd_accept='green,red',
height='240px', width='120px',
border='1px solid green')
#targets.div(border='3px solid green',**tgt)
#targets.div(border='3px solid red',**tgt)
#targets.div(border='3px solid pink',**tgt)
#targets.div(border='3px solid yellow',**tgt)
|
poppogbr/genropy
|
packages/showcase/webpages/utilities/dnd/test1.py
|
Python
|
lgpl-2.1
| 1,461
|
"""
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals, division
from collections import namedtuple
from copy import deepcopy
from multiprocessing.pool import ThreadPool
import yaml
import json
import os
from operator import attrgetter
import random
from string import ascii_letters
import time
import logging
from datetime import timedelta
import datetime as dt
import copy
from atomic_reactor.build import BuildResult
from atomic_reactor.plugin import BuildStepPlugin
from atomic_reactor.plugins.pre_reactor_config import get_config
from atomic_reactor.plugins.pre_check_and_set_rebuild import is_rebuild
from atomic_reactor.util import get_preferred_label, df_parser, get_build_json
from atomic_reactor.constants import PLUGIN_ADD_FILESYSTEM_KEY, PLUGIN_BUILD_ORCHESTRATE_KEY
from osbs.api import OSBS
from osbs.exceptions import OsbsException
from osbs.conf import Configuration
from osbs.constants import BUILD_FINISHED_STATES
ClusterInfo = namedtuple('ClusterInfo', ('cluster', 'platform', 'osbs', 'load'))
WORKSPACE_KEY_BUILD_INFO = 'build_info'
WORKSPACE_KEY_UPLOAD_DIR = 'koji_upload_dir'
WORKSPACE_KEY_OVERRIDE_KWARGS = 'override_kwargs'
FIND_CLUSTER_RETRY_DELAY = 15.0
FAILURE_RETRY_DELAY = 10.0
MAX_CLUSTER_FAILS = 20
def get_worker_build_info(workflow, platform):
"""
Obtain worker build information for a given platform
"""
workspace = workflow.plugin_workspace[OrchestrateBuildPlugin.key]
return workspace[WORKSPACE_KEY_BUILD_INFO][platform]
def get_koji_upload_dir(workflow):
"""
Obtain koji_upload_dir value used for worker builds
"""
workspace = workflow.plugin_workspace[OrchestrateBuildPlugin.key]
return workspace[WORKSPACE_KEY_UPLOAD_DIR]
def override_build_kwarg(workflow, k, v):
"""
Override a build-kwarg for all worker builds
"""
key = OrchestrateBuildPlugin.key
workspace = workflow.plugin_workspace.setdefault(key, {})
override_kwargs = workspace.setdefault(WORKSPACE_KEY_OVERRIDE_KWARGS, {})
override_kwargs[k] = v
class UnknownPlatformException(Exception):
""" No clusters could be found for a platform """
class AllClustersFailedException(Exception):
""" Each cluster has reached max_cluster_fails """
class ClusterRetryContext(object):
def __init__(self, max_cluster_fails):
# how many times this cluster has failed
self.fails = 0
# datetime at which attempts can resume
self.retry_at = dt.datetime.utcfromtimestamp(0)
# the number of fail counts before this cluster is considered dead
self.max_cluster_fails = max_cluster_fails
@property
def failed(self):
"""Is this cluster considered dead?"""
return self.fails >= self.max_cluster_fails
@property
def in_retry_wait(self):
"""Should we wait before trying this cluster again?"""
return dt.datetime.now() < self.retry_at
def try_again_later(self, seconds):
"""Put this cluster in retry-wait (or consider it dead)"""
if not self.failed:
self.fails += 1
self.retry_at = (dt.datetime.now() + timedelta(seconds=seconds))
def wait_for_any_cluster(contexts):
"""
Wait until any of the clusters are out of retry-wait
:param contexts: List[ClusterRetryContext]
:raises: AllClustersFailedException if no more retry attempts allowed
"""
try:
earliest_retry_at = min(ctx.retry_at for ctx in contexts.values()
if not ctx.failed)
except ValueError: # can't take min() of empty sequence
raise AllClustersFailedException(
"Could not find appropriate cluster for worker build."
)
time_until_next = earliest_retry_at - dt.datetime.now()
time.sleep(max(timedelta(seconds=0), time_until_next).seconds)
class WorkerBuildInfo(object):
def __init__(self, build, cluster_info, logger):
self.build = build
self.cluster = cluster_info.cluster
self.osbs = cluster_info.osbs
self.platform = cluster_info.platform
self.log = logging.LoggerAdapter(logger, {'arch': self.platform})
self.monitor_exception = None
@property
def name(self):
return self.build.get_build_name() if self.build else 'N/A'
def wait_to_finish(self):
self.build = self.osbs.wait_for_build_to_finish(self.name)
return self.build
def watch_logs(self):
for line in self.osbs.get_build_logs(self.name, follow=True):
self.log.info(line)
def get_annotations(self):
build_annotations = self.build.get_annotations() or {}
annotations = {
'build': {
'cluster-url': self.osbs.os_conf.get_openshift_base_uri(),
'namespace': self.osbs.os_conf.get_namespace(),
'build-name': self.name,
},
'digests': json.loads(
build_annotations.get('digests', '[]')),
'plugins-metadata': json.loads(
build_annotations.get('plugins-metadata', '{}')),
}
if 'metadata_fragment' in build_annotations and \
'metadata_fragment_key' in build_annotations:
annotations['metadata_fragment'] = build_annotations['metadata_fragment']
annotations['metadata_fragment_key'] = build_annotations['metadata_fragment_key']
return annotations
def get_fail_reason(self):
fail_reason = {}
if self.monitor_exception:
fail_reason['general'] = repr(self.monitor_exception)
elif not self.build:
fail_reason['general'] = 'build not started'
if not self.build:
return fail_reason
build_annotations = self.build.get_annotations() or {}
metadata = json.loads(build_annotations.get('plugins-metadata', '{}'))
if self.monitor_exception:
fail_reason['general'] = repr(self.monitor_exception)
try:
fail_reason.update(metadata['errors'])
except KeyError:
try:
build_name = self.build.get_build_name()
pod = self.osbs.get_pod_for_build(build_name)
fail_reason['pod'] = pod.get_failure_reason()
except (OsbsException, AttributeError):
# Catch AttributeError here because osbs-client < 0.41
# doesn't have this method
pass
return fail_reason
def cancel_build(self):
if self.build and not self.build.is_finished():
self.osbs.cancel_build(self.name)
class OrchestrateBuildPlugin(BuildStepPlugin):
"""
Start and monitor worker builds for each platform
This plugin will find the best suited worker cluster to
be used for each platform. It does so by calculating the
current load of active builds on each cluster and choosing
the one with smallest load.
The list of available worker clusters is retrieved by fetching
the result provided by reactor_config plugin.
If any of the worker builds fail, this plugin will return a
failed BuildResult. Although, it does wait for all worker builds
to complete in any case.
If all worker builds succeed, then this plugin returns a
successful BuildResult, but with a remote image result. The
image is built in the worker builds which is likely a different
host than the one running this build. This means that the local
docker daemon has no knowledge of the built image.
If build_image is defined it is passed to the worker build,
but there is still possibility to have build_imagestream inside
osbs.conf in the secret, and that would take precendence over
build_image from kwargs
"""
CONTAINER_FILENAME = 'container.yaml'
UNREACHABLE_CLUSTER_LOAD = object()
key = PLUGIN_BUILD_ORCHESTRATE_KEY
def __init__(self, tasker, workflow, platforms, build_kwargs,
osbs_client_config=None, worker_build_image=None,
config_kwargs=None,
find_cluster_retry_delay=FIND_CLUSTER_RETRY_DELAY,
failure_retry_delay=FAILURE_RETRY_DELAY,
max_cluster_fails=MAX_CLUSTER_FAILS):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param platforms: list<str>, platforms to build
:param build_kwargs: dict, keyword arguments for starting worker builds
:param osbs_client_config: str, path to directory containing osbs.conf
:param worker_build_image: str, the builder image to use for worker builds
(not used, image is inherited from the orchestrator)
:param config_kwargs: dict, keyword arguments to override worker configuration
:param find_cluster_retry_delay: the delay in seconds to try again reaching a cluster
:param failure_retry_delay: the delay in seconds to try again starting a build
:param max_cluster_fails: the maximum number of times a cluster can fail before being
ignored
"""
super(OrchestrateBuildPlugin, self).__init__(tasker, workflow)
self.platforms = set(platforms)
self.build_kwargs = build_kwargs
self.osbs_client_config = osbs_client_config
self.config_kwargs = config_kwargs or {}
self.find_cluster_retry_delay = find_cluster_retry_delay
self.failure_retry_delay = failure_retry_delay
self.max_cluster_fails = max_cluster_fails
self.koji_upload_dir = self.get_koji_upload_dir()
self.fs_task_id = self.get_fs_task_id()
self.release = self.get_release()
if worker_build_image:
self.log.warning('worker_build_image is deprecated')
self.worker_builds = []
def make_list(self, value):
if not isinstance(value, list):
value = [value]
return value
def get_platforms(self):
build_file_dir = self.workflow.source.get_build_file_path()[1]
excluded_platforms = set()
container_path = os.path.join(build_file_dir, self.CONTAINER_FILENAME)
if os.path.exists(container_path):
with open(container_path) as f:
data = yaml.load(f)
if data is None or 'platforms' not in data or data['platforms'] is None:
return self.platforms
excluded_platforms = set(self.make_list(data['platforms'].get('not', [])))
only_platforms = set(self.make_list(data['platforms'].get('only', [])))
if only_platforms:
self.platforms = self.platforms & only_platforms
return self.platforms - excluded_platforms
def get_current_builds(self, osbs):
field_selector = ','.join(['status!={status}'.format(status=status.capitalize())
for status in BUILD_FINISHED_STATES])
with osbs.retries_disabled():
return len(osbs.list_builds(field_selector=field_selector))
def get_cluster_info(self, cluster, platform):
kwargs = deepcopy(self.config_kwargs)
kwargs['conf_section'] = cluster.name
if self.osbs_client_config:
kwargs['conf_file'] = os.path.join(self.osbs_client_config, 'osbs.conf')
conf = Configuration(**kwargs)
osbs = OSBS(conf, conf)
current_builds = self.get_current_builds(osbs)
load = current_builds / cluster.max_concurrent_builds
self.log.debug('enabled cluster %s for platform %s has load %s and active builds %s/%s',
cluster.name, platform, load, current_builds, cluster.max_concurrent_builds)
return ClusterInfo(cluster, platform, osbs, load)
def get_clusters(self, platform, retry_contexts, all_clusters):
''' return clusters sorted by load. '''
possible_cluster_info = {}
candidates = set(copy.copy(all_clusters))
while candidates and not possible_cluster_info:
wait_for_any_cluster(retry_contexts)
for cluster in sorted(candidates, key=attrgetter('priority')):
ctx = retry_contexts[cluster.name]
if ctx.in_retry_wait:
continue
if ctx.failed:
continue
try:
cluster_info = self.get_cluster_info(cluster, platform)
possible_cluster_info[cluster] = cluster_info
except OsbsException:
ctx.try_again_later(self.find_cluster_retry_delay)
candidates -= set([c for c in candidates if retry_contexts[c.name].failed])
ret = sorted(possible_cluster_info.values(), key=lambda c: c.cluster.priority)
ret = sorted(ret, key=lambda c: c.load)
return ret
def get_release(self):
labels = df_parser(self.workflow.builder.df_path, workflow=self.workflow).labels
return get_preferred_label(labels, 'release')
@staticmethod
def get_koji_upload_dir():
"""
Create a path name for uploading files to
:return: str, path name expected to be unique
"""
dir_prefix = 'koji-upload'
random_chars = ''.join([random.choice(ascii_letters)
for _ in range(8)])
unique_fragment = '%r.%s' % (time.time(), random_chars)
return os.path.join(dir_prefix, unique_fragment)
def get_worker_build_kwargs(self, release, platform, koji_upload_dir,
task_id):
build_kwargs = deepcopy(self.build_kwargs)
build_kwargs.pop('architecture', None)
build_kwargs['release'] = release
build_kwargs['platform'] = platform
build_kwargs['koji_upload_dir'] = koji_upload_dir
build_kwargs['is_auto'] = is_rebuild(self.workflow)
if task_id:
build_kwargs['filesystem_koji_task_id'] = task_id
return build_kwargs
def _apply_repositories(self, annotations):
unique = set()
primary = set()
for build_info in self.worker_builds:
if not build_info.build:
continue
repositories = build_info.build.get_repositories() or {}
unique.update(repositories.get('unique', []))
primary.update(repositories.get('primary', []))
if unique or primary:
annotations['repositories'] = {
'unique': sorted(list(unique)),
'primary': sorted(list(primary)),
}
def _make_labels(self):
labels = {}
koji_build_id = None
ids = set([build_info.build.get_koji_build_id()
for build_info in self.worker_builds
if build_info.build])
self.log.debug('all koji-build-ids: %s', ids)
if ids:
koji_build_id = ids.pop()
if koji_build_id:
labels['koji-build-id'] = koji_build_id
return labels
def get_fs_task_id(self):
task_id = None
fs_result = self.workflow.prebuild_results.get(PLUGIN_ADD_FILESYSTEM_KEY)
if fs_result is None:
return None
try:
task_id = int(fs_result['filesystem-koji-task-id'])
except KeyError:
self.log.error("%s: expected filesystem-koji-task-id in result",
PLUGIN_ADD_FILESYSTEM_KEY)
raise
except (ValueError, TypeError):
self.log.exception("%s: returned an invalid task ID: %r",
PLUGIN_ADD_FILESYSTEM_KEY, task_id)
raise
self.log.debug("%s: got filesystem_koji_task_id of %d",
PLUGIN_ADD_FILESYSTEM_KEY, task_id)
return task_id
def do_worker_build(self, cluster_info):
workspace = self.workflow.plugin_workspace.get(self.key, {})
override_kwargs = workspace.get(WORKSPACE_KEY_OVERRIDE_KWARGS, {})
build = None
try:
kwargs = self.get_worker_build_kwargs(self.release, cluster_info.platform,
self.koji_upload_dir, self.fs_task_id)
kwargs.update(override_kwargs)
with cluster_info.osbs.retries_disabled():
build = cluster_info.osbs.create_worker_build(**kwargs)
except OsbsException:
self.log.exception('%s - failed to create worker build.',
cluster_info.platform)
raise
except Exception:
self.log.exception('%s - failed to create worker build',
cluster_info.platform)
build_info = WorkerBuildInfo(build=build, cluster_info=cluster_info, logger=self.log)
self.worker_builds.append(build_info)
if build_info.build:
try:
self.log.info('%s - created build %s on cluster %s.', cluster_info.platform,
build_info.name, cluster_info.cluster.name)
build_info.watch_logs()
build_info.wait_to_finish()
except Exception as e:
build_info.monitor_exception = e
self.log.exception('%s - failed to monitor worker build',
cluster_info.platform)
# Attempt to cancel it rather than leave it running
# unmonitored.
try:
build_info.cancel_build()
except OsbsException:
pass
def select_and_start_cluster(self, platform):
''' Choose a cluster and start a build on it '''
config = get_config(self.workflow)
clusters = config.get_enabled_clusters_for_platform(platform)
if not clusters:
raise UnknownPlatformException('No clusters found for platform {}!'
.format(platform))
retry_contexts = {
cluster.name: ClusterRetryContext(self.max_cluster_fails)
for cluster in clusters
}
while True:
try:
possible_cluster_info = self.get_clusters(platform,
retry_contexts,
clusters)
except AllClustersFailedException as ex:
cluster = ClusterInfo(None, platform, None, None)
build_info = WorkerBuildInfo(build=None,
cluster_info=cluster,
logger=self.log)
build_info.monitor_exception = repr(ex)
self.worker_builds.append(build_info)
return
for cluster_info in possible_cluster_info:
ctx = retry_contexts[cluster_info.cluster.name]
try:
self.log.info('Attempting to start build for platform %s on cluster %s',
platform, cluster_info.cluster.name)
self.do_worker_build(cluster_info)
return
except OsbsException:
ctx.try_again_later(self.failure_retry_delay)
# this will put the cluster in retry-wait when get_clusters runs
def set_build_image(self):
"""
Overrides build_image for worker, to be same as in orchestrator build
"""
spec = get_build_json().get("spec")
try:
build_name = spec['strategy']['customStrategy']['from']['name']
build_kind = spec['strategy']['customStrategy']['from']['kind']
except KeyError:
raise RuntimeError("Build object is malformed, failed to fetch buildroot image")
if build_kind == 'DockerImage':
self.config_kwargs['build_image'] = build_name
else:
raise RuntimeError("Build kind isn't 'DockerImage' but %s" % build_kind)
def run(self):
self.set_build_image()
platforms = self.get_platforms()
thread_pool = ThreadPool(len(platforms))
result = thread_pool.map_async(self.select_and_start_cluster, platforms)
try:
result.get()
# Always clean up worker builds on any error to avoid
# runaway worker builds (includes orchestrator build cancellation)
except Exception:
thread_pool.terminate()
self.log.info('build cancelled, cancelling worker builds')
if self.worker_builds:
ThreadPool(len(self.worker_builds)).map(
lambda bi: bi.cancel_build(), self.worker_builds)
while not result.ready():
result.wait(1)
raise
else:
thread_pool.close()
thread_pool.join()
annotations = {'worker-builds': {
build_info.platform: build_info.get_annotations()
for build_info in self.worker_builds if build_info.build
}}
self._apply_repositories(annotations)
labels = self._make_labels()
fail_reasons = {
build_info.platform: build_info.get_fail_reason()
for build_info in self.worker_builds
if not build_info.build or not build_info.build.is_succeeded()
}
workspace = self.workflow.plugin_workspace.setdefault(self.key, {})
workspace[WORKSPACE_KEY_UPLOAD_DIR] = self.koji_upload_dir
workspace[WORKSPACE_KEY_BUILD_INFO] = {build_info.platform: build_info
for build_info in self.worker_builds}
if fail_reasons:
return BuildResult(fail_reason=json.dumps(fail_reasons),
annotations=annotations, labels=labels)
return BuildResult.make_remote_image_result(annotations, labels=labels)
|
vrutkovs/atomic-reactor
|
atomic_reactor/plugins/build_orchestrate_build.py
|
Python
|
bsd-3-clause
| 22,139
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('legislators', '0007_auto_20150311_1607'),
('core', '0005_convenetime_active'),
]
operations = [
migrations.CreateModel(
name='Stream',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('granicus_subdomain', models.CharField(help_text=b'Granicus subdomain for stream', max_length=20)),
('camera_id', models.IntegerField(help_text=b'Identifier for direct camera stream')),
('direct_event_feed', models.BooleanField(default=False, help_text=b'Use an event feed instead of a camera feed')),
('feed_id', models.IntegerField(help_text=b'Feed identifier for direct embed', null=True, blank=True)),
('event_id', models.IntegerField(help_text=b'Event identifier for direct embed', null=True, blank=True)),
('chamber', models.OneToOneField(related_name='stream_for', to='legislators.Chamber', help_text=b'Chamber viewable in stream')),
],
options={
},
bases=(models.Model,),
),
]
|
texastribune/txlege84
|
txlege84/core/migrations/0006_stream.py
|
Python
|
mit
| 1,329
|
import os, sys
'''
BASE_RESOURCE_PATH = os.path.join(os.getcwd())
sys.path.append(os.path.join(BASE_RESOURCE_PATH, "pyparsing"))
sys.path.append(os.path.join(BASE_RESOURCE_PATH, "pyscraper"))
from descriptionparserfactory import DescriptionParserFactory
from descriptionparserfactory import *
descFile = "E:\\XBMC\\RCB\\develop\\scraper\\offline\\mame\\astrowar.xml"
parseInstruction = "E:\\XBMC\\RCB\\develop\\scraper\\offline\\mame\\parserconfig.xml"
descParser = DescriptionParserFactory.getParser(parseInstruction)
results = descParser.parseDescription(str(descFile), 'iso-8859-15')
for result in results:
print result
print len(results)
'''
from emulatorautoconfig.autoconfig import EmulatorAutoconfig
config = EmulatorAutoconfig('C:\\Users\\lom\\AppData\\Roaming\\XBMC\\addons\\script.games.rom.collection.browser.dev\\resources\\emu_autoconfig.xml')
'''
config.readXml()
for op in config.operatingSystems:
print op.name
for platform in op.platforms:
print platform.name
for alias in platform.aliases:
print alias
for emulator in platform.emulators:
print emulator.name
print emulator.emuCmd
print emulator.emuParams
for detection in emulator.detectionMethods:
print detection.name
print detection.command
'''
emulators = config.findEmulators('Android', 'Super Nintendo', True);
if(emulators):
for emulator in emulators:
print emulator.name
print emulator.os
print emulator.platform
print emulator.emuCmd
print emulator.emuParams
print emulator.detectionMethods
print emulator.isInstalled
|
skerit/romcollectionbrowser
|
resources/lib/temptests2.py
|
Python
|
gpl-2.0
| 1,763
|
l=[]
ris = ''
n = int(raw_input())
for i in xrange(n):
x = str(raw_input())
x = str(x)
l.append(x)
l = sorted(l, key=int, reverse=True)
print(" ".join(l))
|
Nebulino/CodingGame-Solutions
|
Reverse - Reverse sort number.py
|
Python
|
mit
| 168
|
import os
import math
from affine import Affine
import numpy as np
from shapely.geometry import shape
import rasterio
import geopandas as gpd
import pytest
from distancerasters.utils import (
get_affine_and_shape,
rasterize,
export_raster,
convert_index_to_coords,
calc_haversine_distance,
)
@pytest.fixture
def example_shape():
# geometry from (1/10 scale):
# https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry
return shape({"type": "LineString", "coordinates": [(3, 1), (1, 3), (4, 4)]})
@pytest.fixture
def example_feature():
# geometry from (1/10 scale):
# https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry
return {"type": "Feature", "geometry": shape({"type": "LineString", "coordinates": [(3, 1), (1, 3), (4, 4)]}), "properties": {"id": 1}}
@pytest.fixture
def example_raster():
# returns a raster that represents a rasterized example_shape
# with pixel_size = 0.5
return np.array(
[
[0, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
]
)
@pytest.fixture
def example_affine():
return Affine(1.5, 1.0, 0.0, 1.0, 1.5, 1.0)
@pytest.fixture
def example_path():
path = "tests/testdata/lorem/ipsum/dolor/amet"
# remove any file at example_path that may have been created in previous tests
if os.path.isfile(path):
os.remove(path)
return path
def test_get_affine_and_shape():
output_affine, output_shape = get_affine_and_shape((1, 4, 3, 6), 0.5)
assert output_affine == Affine(0.5, 0.0, 1.0, 0.0, -0.5, 6.0)
assert output_shape == (4, 4)
def test_bad_get_affine_and_shape():
with pytest.raises(TypeError):
get_affine_and_shape((0, 0, 0, 0), "string")
def test_rasterize(example_shape, example_raster, example_path):
# pass shapely shape geometry to rasterize
output_raster = rasterize(
example_shape, pixel_size=0.5, bounds=example_shape.bounds
)[0]
assert (output_raster == example_raster).all
def test_rasterize_output(example_shape, example_raster, example_path):
# same as above, with output path
output_raster = rasterize(
example_shape, pixel_size=0.5, bounds=example_shape.bounds, output=example_path
)[0]
# open raster written to example_path and make sure the data matches
with rasterio.open(example_path) as src:
assert (src.read() == example_raster).all
def test_rasterize_with_attribute(example_shape, example_raster):
geodataframe = gpd.GeoDataFrame([{"id": 0, "geometry": example_shape, "value": 2}])
# pass geodataframe to rasterize
output_raster = rasterize(
geodataframe, pixel_size=0.5, bounds=geodataframe.total_bounds, attribute="value"
)[0]
assert (output_raster == example_raster * 2).all()
def test_geodataframe_rasterize(example_shape, example_raster):
geodataframe = gpd.GeoDataFrame([{"id": 0, "geometry": example_shape}])
# pass geodataframe to rasterize
output_raster = rasterize(
geodataframe, pixel_size=0.5, bounds=geodataframe.total_bounds
)[0]
assert (output_raster == example_raster).all()
def test_iterable_rasterize(example_feature, example_raster):
bounds = shape(example_feature["geometry"]).bounds
example_iterable = [example_feature]
# pass iterable of geometry to rasterize
output_raster = rasterize(
example_iterable, pixel_size=0.5, bounds=bounds
)[0]
assert (output_raster == example_raster).all
def test_bad_rasterize(example_shape, example_affine):
# warns about ignoring pixel_size because valid affine and shape were passed
with pytest.warns(UserWarning):
rasterize(example_shape, pixel_size=0.5, shape=(2, 4), affine=example_affine)[0]
# warns about ignoring bounds because valid affine and shape were passed
with pytest.warns(UserWarning):
rasterize(
example_shape,
pixel_size=1.5,
shape=(2, 4),
bounds=(0, 1, 2, 3),
affine=example_affine,
)[0]
# do not provide pixel_size + bounds OR affine + shape
# perhaps this should be a more specific type of exception?
with pytest.raises(Exception):
rasterize(example_shape)
# attriubte is not a valid keyword
with pytest.raises(KeyError):
rasterize(
example_shape, pixel_size=0.5, bounds=example_shape.bounds, attribute="abc"
)
def test_bad_rasterize_output(example_shape, example_affine):
# fiona throws exception here because file is not found
# perhaps this should be a more specific type of exception?
with pytest.raises(Exception):
rasterize(
"bad/path/to/file",
pixel_size=0.5,
bounds=example_shape.bounds
)
def test_export_raster(example_raster, example_affine, example_path):
export_raster(example_raster, example_affine, example_path)
# test if file was created by export_raster at example_path
assert os.path.exists(example_path)
# open raster written to example_path and make sure the data matches
with rasterio.open(example_path) as src:
assert (src.read() == example_raster).all
def test_bad_export_raster(example_raster, example_affine, example_path):
# TODO: pass bad raster to export_raster
# Try passing a bad output datatype
with pytest.raises(ValueError):
export_raster(
example_raster, example_affine, example_path, out_dtype="bad_dt42"
)
# TODO: pass bad nodata to export_raster
def test_convert_index_to_coords(example_affine):
# Simple index with affine identity
assert convert_index_to_coords((1, 2), Affine.identity()) == (2.5, -1.5)
# Random index with example affine
assert convert_index_to_coords((5, 8), example_affine) == (12.75, -7.25)
def test_calc_haversine_distance():
# Test distance that should be zero
assert calc_haversine_distance((0, -180), (180, 0)) == 0
# More complex test calculation that requires every aspect of the function
a = math.cos(math.pi / 2) * math.sqrt(2) / 8 + math.sin(math.pi / 8) ** 2
expected_output = math.atan2(math.sqrt(a), math.sqrt(1 - a)) * 12742
assert (
calc_haversine_distance((math.pi, -90), (4 * (math.pi / 3), -45))
== expected_output
)
# Make sure output is float
assert isinstance(calc_haversine_distance((0, 0), (0, 0)), float)
|
sgoodm/python-distance-rasters
|
tests/test_utils.py
|
Python
|
bsd-3-clause
| 6,579
|
import os
import re
import shutil
import sublime_plugin
from .git.git_command_base import GitCommandBase
from .command_base import AdvancedNewFileBase
from ..anf_util import *
class AdvancedNewFileMove(AdvancedNewFileBase, sublime_plugin.WindowCommand, GitCommandBase):
def __init__(self, window):
super(AdvancedNewFileMove, self).__init__(window)
def run(self, is_python=False, initial_path=None, rename_file=None):
self.is_python = is_python
self.run_setup()
self.rename_filename = rename_file
path = self.settings.get(RENAME_DEFAULT_SETTING)
current_file = self.view.file_name()
current_file_name = os.path.basename(self.view.file_name()) if current_file else ""
path = path.replace("<filepath>", current_file)
path = path.replace("<filename>", current_file_name)
self.show_filename_input(path if len(path) > 0 else self.generate_initial_path())
def input_panel_caption(self):
caption = 'Enter a new path for current file'
view = self.window.active_view()
if view is not None:
self.original_name = os.path.basename(view.file_name()) if view.file_name is not None else ""
else:
self.original_name = ""
if self.is_python:
caption = '%s (creates __init__.py in new dirs)' % caption
return caption
def _git_mv(self, from_filepath, to_filepath):
path, filename = os.path.split(from_filepath)
args = ["mv", filename, to_filepath]
result = self.run_command(args, path)
if result != 0:
sublime.error_message("Git move of %s to %s failed" % (from_filepath, to_filepath))
def entered_file_action(self, path):
attempt_open = True
directory = os.path.dirname(path)
if not os.path.exists(directory):
try:
self.create_folder(directory)
except OSError as e:
attempt_open = False
sublime.error_message("Cannot create '" + path + "'. See console for details")
print("Exception: %s '%s'" % (e.strerror, e.filename))
if attempt_open:
self._rename_file(path)
def _rename_file(self, file_path):
if os.path.isdir(file_path) or re.search(r"(/|\\)$", file_path):
# use original name if a directory path has been passed in.
file_path = os.path.join(file_path, self.original_name)
window = self.window
if self.rename_filename:
file_view = self._find_open_file(self.rename_filename)
if file_view is not None:
self.view.run_command("save")
window.focus_view(file_view)
window.run_command("close")
self._move_action(self.rename_filename, file_path)
if file_view is not None:
self.open_file(file_path)
elif self.view is not None and self.view.file_name() is not None:
filename = self.view.file_name()
if filename:
self.view.run_command("save")
window.focus_view(self.view)
window.run_command("close")
self._move_action(filename, file_path)
else:
content = self.view.substr(sublime.Region(0, self.view.size()))
self.view.set_scratch(True)
window.focus_view(self.view)
window.run_command("close")
with open(file_path, "w") as file_obj:
file_obj.write(content)
self.open_file(file_path)
else:
sublime.error_message("Unable to move file. No file to move.")
def _move_action(self, from_file, to_file):
tracked_by_git = self.file_tracked_by_git(from_file)
if tracked_by_git and self.settings.get(VCS_MANAGEMENT_SETTING):
self._git_mv(from_file, to_file)
else:
shutil.move(from_file, to_file)
def update_status_message(self, creation_path):
if self.view != None:
if os.path.isdir(creation_path) or os.path.basename(creation_path) == "" :
creation_path = os.path.join(creation_path, self.original_name)
self.view.set_status("AdvancedNewFile", "Moving file to %s " % \
creation_path)
else:
sublime.status_message("Moving file to %s" % creation_path)
class AdvancedNewFileMoveAtCommand(sublime_plugin.WindowCommand):
def run(self, files):
if len(files) != 1:
return
self.window.run_command("advanced_new_file_move", {"rename_file": files[0]})
def is_visible(self, files):
return len(files) == 1
|
herove/dotfiles
|
sublime/Packages/AdvancedNewFile/advanced_new_file/commands/move_file_command.py
|
Python
|
mit
| 4,725
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import today
from erpnext.accounts.utils import get_fiscal_year
from erpnext.stock.stock_ledger import update_entries_after
def execute():
try:
year_start_date = get_fiscal_year(today())[1]
except:
return
if year_start_date:
items = frappe.db.sql("""select distinct item_code, warehouse from `tabStock Ledger Entry`
where ifnull(serial_no, '') != '' and actual_qty > 0 and incoming_rate=0""", as_dict=1)
for d in items:
try:
update_entries_after({
"item_code": d.item_code,
"warehouse": d.warehouse,
"posting_date": year_start_date
}, allow_zero_rate=True)
except:
pass
|
hassanibi/erpnext
|
erpnext/patches/v6_24/repost_valuation_rate_for_serialized_items.py
|
Python
|
gpl-3.0
| 827
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2016, Jianfeng Chen <jchen37@ncsu.edu>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
def dist(a, b):
return sum((i-j)**2 for i, j in zip(a, b))
def GD(PF0, PFc):
up = 0
for i in PFc:
up += min([dist(i, j) for j in PF0])
return up**0.5 / (len(PFc))
|
Ginfung/FSSE
|
Metrics/gd.py
|
Python
|
mit
| 1,445
|
# -*- coding: utf-8 -*-
# This file is part of Bika LIMS
#
# Copyright 2011-2017 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
import re
import sys
import math
import inspect
import importlib
import transaction
from zope.interface import implements
from AccessControl import ClassSecurityInfo
from Products.Archetypes.atapi import Schema
from Products.Archetypes.atapi import BaseFolder
from Products.Archetypes.atapi import registerType
from Products.CMFPlone.utils import safe_unicode
from Products.Archetypes.references import HoldingReference
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.WorkflowCore import WorkflowException
from Products.ATContentTypes.lib.historyaware import HistoryAwareMixin
# Fields
from Products.Archetypes.atapi import TextField
from Products.ATExtensions.field import RecordsField
from bika.lims.browser.fields import InterimFieldsField
from bika.lims.browser.fields import HistoryAwareReferenceField
# Widgets
from Products.Archetypes.atapi import ReferenceWidget
from Products.Archetypes.atapi import TextAreaWidget
from bika.lims.browser.widgets import RecordsWidget
from bika.lims.browser.widgets import RecordsWidget as BikaRecordsWidget
# bika.lims imports
from bika.lims.config import PROJECTNAME
from bika.lims import bikaMessageFactory as _
from bika.lims.interfaces import ICalculation
from bika.lims.content.bikaschema import BikaSchema
schema = BikaSchema.copy() + Schema((
InterimFieldsField(
'InterimFields',
widget=BikaRecordsWidget(
label=_("Calculation Interim Fields"),
description=_(
"Define interim fields such as vessel mass, dilution factors, "
"should your calculation require them. The field title "
"specified here will be used as column headers and field "
"descriptors where the interim fields are displayed. If "
"'Apply wide' is enabled the field will be shown in a "
"selection box on the top of the worksheet, allowing to apply "
"a specific value to all the corresponding fields on the "
"sheet."),
)
),
HistoryAwareReferenceField(
'DependentServices',
required=1,
multiValued=1,
vocabulary_display_path_bound=sys.maxsize,
allowed_types=('AnalysisService',),
relationship='CalculationAnalysisService',
referenceClass=HoldingReference,
widget=ReferenceWidget(
checkbox_bound=0,
visible=False,
label=_("Dependent Analyses"),
),
),
RecordsField(
'PythonImports',
required=False,
subfields=('module', 'function'),
subfield_labels={'module': _('Module'), 'function': _('Function')},
subfield_readonly={'module': False, 'function': False},
subfield_types={'module': 'string', 'function': 'string'},
default=[
{'module': 'math', 'function': 'ceil'},
{'module': 'math', 'function': 'floor'},
],
subfield_validators={
'module': 'importvalidator',
},
widget=RecordsWidget(
label=_("Additional Python Libraries"),
description=_(
"If your formula needs a special function from an external "
"Python library, you can import it here. E.g. if you want to "
"use the 'floor' function from the Python 'math' module, you "
"add 'math' to the Module field and 'floor' to the function field. "
"The equivalent in Python would be 'from math import floor'. "
"In your calculation you could use then 'floor([Ca] + [Mg])'. "
),
allowDelete=True,
),
),
TextField(
'Formula',
validators=('formulavalidator',),
default_content_type='text/plain',
allowable_content_types=('text/plain',),
widget=TextAreaWidget(
label=_("Calculation Formula"),
description=_(
"<p>The formula you type here will be dynamically calculated "
"when an analysis using this calculation is displayed.</p>"
"<p>To enter a Calculation, use standard maths operators, "
"+ - * / ( ), and all keywords available, both from other "
"Analysis Services and the Interim Fields specified here, "
"as variables. Enclose them in square brackets [ ].</p>"
"<p>E.g, the calculation for Total Hardness, the total of "
"Calcium (ppm) and Magnesium (ppm) ions in water, is entered "
"as [Ca] + [Mg], where Ca and MG are the keywords for those "
"two Analysis Services.</p>"),
)
),
RecordsField(
'TestParameters',
required=False,
subfields=('keyword', 'value'),
subfield_labels={'keyword': _('Keyword'), 'value': _('Value')},
subfield_readonly={'keyword': True, 'value': False},
subfield_types={'keyword': 'string', 'value': 'float'},
default=[{'keyword': '', 'value': 0}],
widget=RecordsWidget(
label=_("Test Parameters"),
description=_("To test the calculation, enter values here for all "
"calculation parameters. This includes Interim "
"fields defined above, as well as any services that "
"this calculation depends on to calculate results."),
allowDelete=False,
),
),
TextField(
'TestResult',
default_content_type='text/plain',
allowable_content_types=('text/plain',),
widget=TextAreaWidget(
label=_('Test Result'),
description=_("The result after the calculation has taken place "
"with test values. You will need to save the "
"calculation before this value will be calculated."),
)
),
))
schema['title'].widget.visible = True
schema['description'].widget.visible = True
class Calculation(BaseFolder, HistoryAwareMixin):
"""Calculation for Analysis Results
"""
implements(ICalculation)
security = ClassSecurityInfo()
displayContentsTab = False
schema = schema
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
def setInterimFields(self, value):
new_value = []
for x in range(len(value)):
row = dict(value[x])
keys = row.keys()
if 'value' not in keys:
row['value'] = 0
new_value.append(row)
self.getField('InterimFields').set(self, new_value)
def setFormula(self, Formula=None):
"""Set the Dependent Services from the text of the calculation Formula
"""
bsc = getToolByName(self, 'bika_setup_catalog')
if Formula is None:
self.setDependentServices(None)
self.getField('Formula').set(self, Formula)
else:
DependentServices = []
keywords = re.compile(r"\[([^\.^\]]+)\]").findall(Formula)
for keyword in keywords:
service = bsc(portal_type="AnalysisService",
getKeyword=keyword)
if service:
DependentServices.append(service[0].getObject())
self.getField('DependentServices').set(self, DependentServices)
self.getField('Formula').set(self, Formula)
def getMinifiedFormula(self):
"""Return the current formula value as text.
The result will have newlines and additional spaces stripped out.
"""
value = " ".join(self.getFormula().splitlines())
return value
def getCalculationDependencies(self, flat=False, deps=None):
""" Recursively calculates all dependencies of this calculation.
The return value is dictionary of dictionaries (of dictionaries....)
{service_UID1:
{service_UID2:
{service_UID3: {},
service_UID4: {},
},
},
}
set flat=True to get a simple list of AnalysisService objects
"""
if deps is None:
deps = [] if flat is True else {}
for service in self.getDependentServices():
calc = service.getCalculation()
if calc:
calc.getCalculationDependencies(flat, deps)
if flat:
deps.append(service)
else:
deps[service.UID()] = {}
return deps
def getCalculationDependants(self):
"""Return a flat list of services who's calculations depend on this."""
deps = []
for service in self.getBackReferences('AnalysisServiceCalculation'):
calc = service.getCalculation()
if calc and calc.UID() != self.UID():
calc.getCalculationDependants(deps)
deps.append(service)
return deps
def setTestParameters(self, form_value):
"""This is called from the objectmodified subscriber, to ensure
correct population of the test-parameters field.
It collects Keywords for all services that are direct dependencies of
this calculatioin, and all of this calculation's InterimFields,
and gloms them together.
"""
params = []
# Set default/existing values for InterimField keywords
for interim in self.getInterimFields():
keyword = interim.get('keyword')
ex = [x.get('value') for x in form_value if x.get('keyword') == keyword]
params.append({'keyword': keyword,
'value': ex[0] if ex else interim.get('value')})
# Set existing/blank values for service keywords
for service in self.getDependentServices():
keyword = service.getKeyword()
ex = [x.get('value') for x in form_value if x.get('keyword') == keyword]
params.append({'keyword': keyword,
'value': ex[0] if ex else ''})
self.Schema().getField('TestParameters').set(self, params)
def setTestResult(self, form_value):
"""Calculate formula with TestParameters and enter result into
TestResult field.
"""
# Create mapping from TestParameters
mapping = {x['keyword']: x['value'] for x in self.getTestParameters()}
# Gather up and parse formula
formula = self.getMinifiedFormula()
test_result_field = self.Schema().getField('TestResult')
# Flush the TestResult field and return
if not formula:
return test_result_field.set(self, "")
formula = formula.replace('[', '{').replace(']', '}').replace(' ', '')
result = 'Failure'
try:
# print "pre: {}".format(formula)
formula = formula.format(**mapping)
# print "formatted: {}".format(formula)
result = eval(formula, self._getGlobals())
# print "result: {}".format(result)
except TypeError as e:
# non-numeric arguments in interim mapping?
result = "TypeError: {}".format(str(e.args[0]))
except ZeroDivisionError as e:
result = "Division by 0: {}".format(str(e.args[0]))
except KeyError as e:
result = "Key Error: {}".format(str(e.args[0]))
except ImportError as e:
result = "Import Error: {}".format(str(e.args[0]))
except Exception as e:
result = "Unspecified exception: {}".format(str(e.args[0]))
test_result_field.set(self, str(result))
def _getGlobals(self, **kwargs):
"""Return the globals dictionary for the formula calculation
"""
# Default globals
globs = {"__builtins__": None, 'math': math}
# Update with keyword arguments
globs.update(kwargs)
# Update with additional Python libraries
for imp in self.getPythonImports():
module = imp["module"]
func = imp["function"]
member = self._getModuleMember(module, func)
if member is None:
raise ImportError("Could not find member {} of module {}".format(
func, module))
globs[func] = member
return globs
def _getModuleMember(self, dotted_name, member):
"""Get the member object of a module.
:param dotted_name: The dotted name of the module, e.g. 'scipy.special'
:type dotted_name: string
:param member: The name of the member function, e.g. 'gammaincinv'
:type member: string
:returns: member object or None
"""
try:
module = importlib.import_module(dotted_name)
except ImportError:
return None
members = dict(inspect.getmembers(module))
return members.get(member)
def workflow_script_activate(self):
wf = getToolByName(self, 'portal_workflow')
pu = getToolByName(self, 'plone_utils')
# A calculation cannot be re-activated if services it depends on
# are deactivated.
services = self.getDependentServices()
inactive_services = []
for service in services:
if wf.getInfoFor(service, "inactive_state") == "inactive":
inactive_services.append(service.Title())
if inactive_services:
msg = _("Cannot activate calculation, because the following "
"service dependencies are inactive: ${inactive_services}",
mapping={'inactive_services': safe_unicode(", ".join(inactive_services))})
pu.addPortalMessage(msg, 'error')
transaction.get().abort()
raise WorkflowException
def workflow_script_deactivate(self):
bsc = getToolByName(self, 'bika_setup_catalog')
pu = getToolByName(self, 'plone_utils')
# A calculation cannot be deactivated if active services are using it.
services = bsc(portal_type="AnalysisService", inactive_state="active")
calc_services = []
for service in services:
service = service.getObject()
calc = service.getCalculation()
if calc and calc.UID() == self.UID():
calc_services.append(service.Title())
if calc_services:
msg = _('Cannot deactivate calculation, because it is in use by the '
'following services: ${calc_services}',
mapping={'calc_services': safe_unicode(", ".join(calc_services))})
pu.addPortalMessage(msg, 'error')
transaction.get().abort()
raise WorkflowException
registerType(Calculation, PROJECTNAME)
|
rockfruit/bika.lims
|
bika/lims/content/calculation.py
|
Python
|
agpl-3.0
| 15,072
|
import scrapy
import time
from datetime import datetime
from nhsbot.items import NhsbotItem
class NHSChoices(scrapy.Spider):
"""
Creates an NHSChoices Spider class to scrape the
NHS Choices website.
Inherits from basic spider which provides start_requests()
implementation. This sends requests from the start_urls attribute
and calls the parse method for each resulting responses.
"""
name = "nhsuk" # defines the spider's name, must be unique (required).
allowed_domains = [ # limit domains to crawl.
'nhs.uk'
]
start_urls = [ # define urls to start crawling from.
'http://www.nhs.uk/Conditions/Pages/hub.aspx'
]
custom_settings = { # override wide project configuration settings.
'LOG_LEVEL': 'INFO',
# Avoid getting banned
'CONCURRENT_REQUESTS_PER_DOMAIN' : 2,
'AUTOTHROTTLE_ENABLED' : True,
'AUTOTHROTTLE_START_DELAY' : 1,
'AUTOTHROTTLE_MAX_DELAY' : 3,
'COOKIES_ENABLED': False,
'DOWNLOAD_DELAY ': 2
}
def parse(self, response):
"""
Default callback to process downloaded responses.
Crawls the urls defined in start_urls, in our case the
NHS choices conditions website. Aims to extract url links
from the 'Browse by index' section.
"""
index_urls_xpath_selector = '//div[@id="haz-mod1"]//li/a/@href'
index_urls_to_crawl = (response.xpath(index_urls_xpath_selector)
.extract())
if not index_urls_to_crawl:
self.logger.info('Failed to find URLs to follow at %s, xpath used: %s',
response.url, index_urls_xpath_selector)
return
for index_url in index_urls_to_crawl:
yield response.follow(index_url, callback=self.parse_index)
def parse_index(self, response):
"""
Custom parser that aims to extract url links
from the alphabetical index page.
Limits the links to the ones interested in
(url pattern ../conditions/..)
"""
domain = 'nhs.uk'
condition_urls_pattern = '/conditions/'
condition_urls_xpath_selector = (
'//div[@id="ctl00_PlaceHolderMain_BodyMap_ConditionsByAlphabet"]' \
'//li/a/@href')
condition_urls_to_crawl = (response.xpath(condition_urls_xpath_selector)
.extract())
if not condition_urls_to_crawl:
self.logger.info('Failed to find URLs to follow at %s, xpath used: %s',
response.url, condition_urls_xpath_selector)
return
for condition_url in condition_urls_to_crawl:
if ( condition_url.lower().startswith(condition_urls_pattern) or
(domain+condition_urls_pattern) in condition_url.lower() ):
yield response.follow(condition_url, callback=self.parse_condition_articles)
else:
self.logger.info('Ignoring url %s, to satisfy url pattern: %s',
condition_url, condition_urls_pattern)
def parse_condition_articles(self, response):
"""
Custom parser that aims to extract article
url links from the condition page.
Limits the links to the ones interested in
(url pattern ../conditions/..)
"""
# Make sure to crawl the first active page
# e.g. Introduction
yield self.parse_condition(response)
domain = 'nhs.uk'
article_urls_pattern = '/conditions/'
article_urls_xpath_selector = ('//div[@id="ctl00_PlaceHolderMain_articles"]' \
'//li/span/a/@href')
article_urls_to_crawl = (response.xpath(article_urls_xpath_selector)
.extract())
if not article_urls_to_crawl:
self.logger.info('Failed to find URLs to follow at %s, xpath used: %s',
response.url, article_urls_xpath_selector)
return
for article_url in article_urls_to_crawl:
if ( article_url.lower().startswith(article_urls_pattern) or
(domain+article_urls_pattern) in article_url.lower() ):
yield response.follow(article_url, callback=self.parse_condition)
else:
self.logger.info('Ignoring url %s, to satisfy url pattern: %s',
article_url, article_urls_pattern)
def parse_condition(self, response):
"""
Custom parser that aims to scrape required
information from the condition page.
Information extracted (if found):
title, metadata, content, last reviewed date.
"""
if 'beta.nhs.uk' in response.url:
self.logger.info('Ignoring url %s, as it is a beta page.',
response.url)
return
item = NhsbotItem()
# Add source of our spider.
item['source'] = 'nhsuk-spider'
# Add crawled date.
item['crawled_epoch_date'] = int(time.time())
# Extract url
# Add url as id.
url = response.url
item['id'] = url
item['url'] = url
# Extract title
title_xpath_selector = '//div[contains(@class,"healthaz-header")]/h1/text()'
title = response.xpath(title_xpath_selector).extract_first()
if title:
item['title'] = title
else:
self.logger.info('Could not find a title for %s, xpath used: %s',
url, title_xpath_selector)
# Extract metadata elements on the page.
meta = dict()
meta_elems = response.xpath('//meta')
date_issued_pattern = '%Y-%m-%d'
for meta_elem in meta_elems:
key = meta_elem.xpath('@name').extract_first()
value = meta_elem.xpath('@content').extract_first()
if not key or not value:
continue
# Make sure that date metadata is extracted as UTC ISO8601 format
if key == 'DC.date.issued':
self.logger.info('Found date metadata %s, try to convert to UTC' \
'ISO8601 format with pattern %s',
key, date_issued_pattern)
try:
parsed_date = datetime.strptime(value,date_issued_pattern)
if parsed_date:
meta[key] = parsed_date.isoformat() + 'Z'
except:
pass
else:
meta[key] = value
if meta:
item['meta'] = meta
else:
self.logger.info('Could not find metadata for %s, xpath used: %s',
url, 'multiple: //meta, @name, @content')
# Extract page content.
content_xpath_selector = '//div[contains(@class,"main-content healthaz-content")]' \
'/div[@id="webZoneLeft"]' \
'/preceding-sibling::node()'
content_text_xpath_selector = 'descendant-or-self::*/text()'
content = ( response.xpath(content_xpath_selector)
.xpath(content_text_xpath_selector)
.extract() )
if content:
item['content'] = ' '.join(content)
else:
self.logger.info('Could not find content for %s, xpath used: %s',
url, 'multiple '+content_xpath_selector+' '+
content_text_xpath_selector)
# Extract last reviewed date.
last_reviewed_pattern = '%d/%m/%Y'
last_reviewed_xpath_selector = '//div[contains(@class,"review-date")]' \
'//span[contains(@class,"review-pad")]/text()'
last_reviewed_date = ( response.xpath(last_reviewed_xpath_selector)
.extract_first() )
if last_reviewed_date:
try:
last_reviewed_epoch_date = int( time.mktime(
time.strptime(
last_reviewed_date, last_reviewed_pattern)) )
if last_reviewed_epoch_date:
item['last_reviewed_epoch_date'] = last_reviewed_epoch_date
else:
self.logger.info('Failed to convert extracted last reviewed date (%s)' \
'to an epoch representation. Date pattern used %s',
last_reviewed_date, last_reviewed_pattern)
except:
self.logger.info('Failed to convert extracted last reviewed date (%s)' \
'to an epoch representation. Date pattern used %s',
last_reviewed_date, last_reviewed_pattern)
pass
else:
self.logger.info('Could not find last reviewed date for %s, xpath used: %s',
url, last_reviewed_xpath_selector)
return item
|
nichelia/docker-scraper
|
nhsbot/nhsbot/spiders/nhs_uk.py
|
Python
|
mit
| 8,352
|
from distutils.core import setup
setup(name='ftrobopy',
description='Python Interface for Fischertechnik ROBOTICS TXT Controller',
version='1.80',
author='Torsten Stuehn',
author_email='Torsten Stuehn',
url='https://github.com/ftrobopy/ftrobopy',
download_url='https://github.com/ftrobopy/ftrobopy/archive/1.80.tar.gz',
license='MIT',
py_modules=['ftrobopy']
)
|
ftrobopy/ftrobopy
|
setup.py
|
Python
|
mit
| 416
|
"""
ErrorClass Plugins
------------------
ErrorClass plugins provide an easy way to add support for custom
handling of particular classes of exceptions.
An ErrorClass plugin defines one or more ErrorClasses and how each is
handled and reported on. Each error class is stored in a different
attribute on the result, and reported separately. Each error class must
indicate the exceptions that fall under that class, the label to use
for reporting, and whether exceptions of the class should be
considered as failures for the whole test run.
ErrorClasses use a declarative syntax. Assign an ErrorClass to the
attribute you wish to add to the result object, defining the
exceptions, label and isfailure attributes. For example, to declare an
ErrorClassPlugin that defines TodoErrors (and subclasses of TodoError)
as an error class with the label 'TODO' that is considered a failure,
do this:
>>> class Todo(Exception):
... pass
>>> class TodoError(ErrorClassPlugin):
... todo = ErrorClass(Todo, label='TODO', isfailure=True)
The MetaErrorClass metaclass translates the ErrorClass declarations
into the tuples used by the error handling and reporting functions in
the result. This is an internal format and subject to change; you
should always use the declarative syntax for attaching ErrorClasses to
an ErrorClass plugin.
>>> TodoError.errorClasses # doctest: +ELLIPSIS
((<class ...Todo...>, ('todo', 'TODO', True)),)
Let's see the plugin in action. First some boilerplate.
>>> import sys
>>> import unittest
>>> try:
... # 2.7+
... from unittest.runner import _WritelnDecorator
... except ImportError:
... from unittest import _WritelnDecorator
...
>>> buf = _WritelnDecorator(sys.stdout)
Now define a test case that raises a Todo.
>>> class TestTodo(unittest.TestCase):
... def runTest(self):
... raise Todo("I need to test something")
>>> case = TestTodo()
Prepare the result using our plugin. Normally this happens during the
course of test execution within nose -- you won't be doing this
yourself. For the purposes of this testing document, I'm stepping
through the internal process of nose so you can see what happens at
each step.
>>> plugin = TodoError()
>>> from nose.result import _TextTestResult
>>> result = _TextTestResult(stream=buf, descriptions=0, verbosity=2)
>>> plugin.prepareTestResult(result)
Now run the test. TODO is printed.
>>> _ = case(result) # doctest: +ELLIPSIS
runTest (....TestTodo) ... TODO: I need to test something
Errors and failures are empty, but todo has our test:
>>> result.errors
[]
>>> result.failures
[]
>>> result.todo # doctest: +ELLIPSIS
[(<....TestTodo testMethod=runTest>, '...Todo: I need to test something\\n')]
>>> result.printErrors() # doctest: +ELLIPSIS
<BLANKLINE>
======================================================================
TODO: runTest (....TestTodo)
----------------------------------------------------------------------
Traceback (most recent call last):
...
...Todo: I need to test something
<BLANKLINE>
Since we defined a Todo as a failure, the run was not successful.
>>> result.wasSuccessful()
False
"""
from nose.pyversion import make_instancemethod
from nose.plugins.base import Plugin
from nose.result import TextTestResult
from nose.util import isclass
class MetaErrorClass(type):
"""Metaclass for ErrorClassPlugins that allows error classes to be
set up in a declarative manner.
"""
def __init__(self, name, bases, attr):
errorClasses = []
for name, detail in attr.items():
if isinstance(detail, ErrorClass):
attr.pop(name)
for cls in detail:
errorClasses.append(
(cls, (name, detail.label, detail.isfailure)))
super(MetaErrorClass, self).__init__(name, bases, attr)
self.errorClasses = tuple(errorClasses)
class ErrorClass(object):
def __init__(self, *errorClasses, **kw):
self.errorClasses = errorClasses
try:
for key in ('label', 'isfailure'):
setattr(self, key, kw.pop(key))
except KeyError:
raise TypeError("%r is a required named argument for ErrorClass"
% key)
def __iter__(self):
return iter(self.errorClasses)
class ErrorClassPlugin(Plugin):
"""
Base class for ErrorClass plugins. Subclass this class and declare the
exceptions that you wish to handle as attributes of the subclass.
"""
__metaclass__ = MetaErrorClass
score = 1000
errorClasses = ()
def addError(self, test, err):
err_cls, a, b = err
if not isclass(err_cls):
return
classes = [e[0] for e in self.errorClasses]
if filter(lambda c: issubclass(err_cls, c), classes):
return True
def prepareTestResult(self, result):
if not hasattr(result, 'errorClasses'):
self.patchResult(result)
for cls, (storage_attr, label, isfail) in self.errorClasses:
if cls not in result.errorClasses:
storage = getattr(result, storage_attr, [])
setattr(result, storage_attr, storage)
result.errorClasses[cls] = (storage, label, isfail)
def patchResult(self, result):
result.printLabel = print_label_patch(result)
result._orig_addError, result.addError = \
result.addError, add_error_patch(result)
result._orig_wasSuccessful, result.wasSuccessful = \
result.wasSuccessful, wassuccessful_patch(result)
if hasattr(result, 'printErrors'):
result._orig_printErrors, result.printErrors = \
result.printErrors, print_errors_patch(result)
if hasattr(result, 'addSkip'):
result._orig_addSkip, result.addSkip = \
result.addSkip, add_skip_patch(result)
result.errorClasses = {}
def add_error_patch(result):
"""Create a new addError method to patch into a result instance
that recognizes the errorClasses attribute and deals with
errorclasses correctly.
"""
return make_instancemethod(TextTestResult.addError, result)
def print_errors_patch(result):
"""Create a new printErrors method that prints errorClasses items
as well.
"""
return make_instancemethod(TextTestResult.printErrors, result)
def print_label_patch(result):
"""Create a new printLabel method that prints errorClasses items
as well.
"""
return make_instancemethod(TextTestResult.printLabel, result)
def wassuccessful_patch(result):
"""Create a new wasSuccessful method that checks errorClasses for
exceptions that were put into other slots than error or failure
but that still count as not success.
"""
return make_instancemethod(TextTestResult.wasSuccessful, result)
def add_skip_patch(result):
"""Create a new addSkip method to patch into a result instance
that delegates to addError.
"""
return make_instancemethod(TextTestResult.addSkip, result)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Nexenta/s3-tests
|
virtualenv/lib/python2.7/site-packages/nose/plugins/errorclass.py
|
Python
|
mit
| 7,279
|
"""
library
"""
from __future__ import absolute_import, division, print_function
import logging
import os
from PySide import QtGui, QtCore
from mcedit2.util.directories import getUserSchematicsDirectory
from mcedit2.widgets.layout import Column
log = logging.getLogger(__name__)
class LibraryTreeModel(QtGui.QFileSystemModel):
def columnCount(self, *args, **kwargs):
return 1
class LibraryWidget(QtGui.QWidget):
def __init__(self):
super(LibraryWidget, self).__init__()
self.folderPath = getUserSchematicsDirectory()
if not os.path.exists(self.folderPath):
os.makedirs(self.folderPath)
self.treeView = QtGui.QTreeView()
self.model = LibraryTreeModel()
self.model.setRootPath(self.folderPath)
self.treeView.setModel(self.model)
self.treeView.setRootIndex(self.model.index(self.folderPath))
self.treeView.doubleClicked.connect(self.itemDoubleClicked)
openLibraryButton = QtGui.QPushButton("Open Schematics Folder")
openLibraryButton.clicked.connect(self.openFolder)
self.setLayout(Column(self.treeView, openLibraryButton))
def openFolder(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl.fromLocalFile(self.folderPath))
def itemDoubleClicked(self, index):
filename = self.model.filePath(index)
self.doubleClicked.emit(filename)
doubleClicked = QtCore.Signal(str)
|
Rubisk/mcedit2
|
src/mcedit2/library.py
|
Python
|
bsd-3-clause
| 1,436
|
{
'name': 'View Editor',
'category': 'Hidden',
'description': """
OpenERP Web to edit views.
==========================
""",
'version': '2.0',
'depends':['web'],
'data' : [
'views/web_view_editor.xml',
],
'qweb': ['static/src/xml/view_editor.xml'],
'auto_install': True,
}
|
mycodeday/crm-platform
|
web_view_editor/__openerp__.py
|
Python
|
gpl-3.0
| 326
|
import subprocess, threading
from subprocess import PIPE
class TimedSubProc (object):
def __init__(self, cmd):
self.cmd = cmd.split()
self.process = None
def run(self, timeout=5, stdin=None, stdout=PIPE, stderr=PIPE):
self.output = None
def target():
self.process = subprocess.Popen(self.cmd, stdin=stdin, stdout=stdout, stderr=stderr)
self.output = self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print 'Process timeout! Terminating...'
self.process.terminate()
thread.join()
return False
return (self.process.returncode, self.output[0], self.output[1])
|
maxspad/MGrader
|
autograder/modules/questions/timedsubproc.py
|
Python
|
bsd-3-clause
| 861
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-08 16:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangocms_blog', '0014_auto_20160215_1331'),
]
operations = [
migrations.AlterField(
model_name='post',
name='date_published',
field=models.DateTimeField(blank=True, null=True, verbose_name='published since'),
),
]
|
skirsdeda/djangocms-blog
|
djangocms_blog/migrations/0015_auto_20160408_1849.py
|
Python
|
bsd-3-clause
| 507
|
# -*- coding: utf-8 -*-
from allauth.socialaccount.tests import create_oauth_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import TumblrProvider
class TumblrTests(create_oauth_tests(registry.by_id(TumblrProvider.id))):
def get_mocked_response(self):
return [MockedResponse(200, u"""
{
"meta": {
"status": 200,
"msg": "OK"
},
"response": {
"user": {
"following": 263,
"default_post_format": "html",
"name": "derekg",
"likes": 606,
"blogs": [
{
"name": "derekg",
"title": "Derek Gottfrid",
"url": "http://derekg.org/",
"tweet": "auto",
"primary": true,
"followers": 33004929
},
{
"name": "ihatehipstrz",
"title": "I Hate Hipstrz"
}
]
}
} }
""")]
|
agconti/njode
|
env/lib/python2.7/site-packages/allauth/socialaccount/providers/tumblr/tests.py
|
Python
|
bsd-3-clause
| 931
|
# -*- coding: utf-8 -*-
"""
python-aop is part of LemonFramework.
python-aop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
python-aop is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with python-aop. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) 2013 Vicente Ruiz <vruiz2.0@gmail.com>
"""
class AspectType(type):
"""Metaclase para la construcción de aspectos. Añade el método ``pointcut``
a la clase, de forma que permite vincular un advise a un joinpoint."""
def __new__(mcs, name, bases, classdict):
# Preparamos una función que se encarga de realizar el pointcut para
# cualquier método ó atributo de la clase
def pointcut(cls, joinpoint, advise_class, **kwargs):
# Se prepara el punto donde se ejecutará el aspecto
joinpoint_attr = getattr(cls, joinpoint)
# Se obtienen parámetros adicionales para el aspecto
advise_args = () if not 'args' in kwargs else tuple(kwargs['args'])
advise_kwargs = {} if not 'kwargs' in kwargs else dict(kwargs['kwargs'])
# Se crea el advise
advise = advise_class(joinpoint_attr, *advise_args, **advise_kwargs)
# Preparamos un wrapper
def wrapper(self, *args, **kwargs):
return advise(self, *args, **kwargs)
setattr(cls, joinpoint, wrapper)
# Añadimos el método ``pointcut`` a la clase
classdict['pointcut'] = classmethod(pointcut)
return type.__new__(mcs, name, bases, classdict)
|
andresriancho/python-aop
|
aop/aspecttype.py
|
Python
|
gpl-3.0
| 1,966
|
import os
import tempfile
import struct
import re
from subprocess import Popen, PIPE
from nose.plugins.skip import Skip, SkipTest
import ubpf.assembler
import testdata
VM = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "vm", "test")
def check_datafile(filename):
"""
Given assembly source code and an expected result, run the eBPF program and
verify that the result matches.
"""
data = testdata.read(filename)
if 'asm' not in data and 'raw' not in data:
raise SkipTest("no asm or raw section in datafile")
if 'result' not in data and 'error' not in data and 'error pattern' not in data:
raise SkipTest("no result or error section in datafile")
if not os.path.exists(VM):
raise SkipTest("VM not found")
if 'raw' in data:
code = b''.join(struct.pack("=Q", x) for x in data['raw'])
else:
code = ubpf.assembler.assemble(data['asm'])
memfile = None
cmd = [VM]
if 'mem' in data:
memfile = tempfile.NamedTemporaryFile()
memfile.write(data['mem'])
memfile.flush()
cmd.extend(['-m', memfile.name])
if 'reload' in data:
cmd.extend(['-R'])
if 'unload' in data:
cmd.extend(['-U'])
cmd.append('-')
vm = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = vm.communicate(code)
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
stderr = stderr.strip()
if memfile:
memfile.close()
if 'error' in data:
if data['error'] != stderr:
raise AssertionError("Expected error %r, got %r" % (data['error'], stderr))
elif 'error pattern' in data:
if not re.search(data['error pattern'], stderr):
raise AssertionError("Expected error matching %r, got %r" % (data['error pattern'], stderr))
else:
if stderr:
raise AssertionError("Unexpected error %r" % stderr)
if 'result' in data:
if vm.returncode != 0:
raise AssertionError("VM exited with status %d, stderr=%r" % (vm.returncode, stderr))
expected = int(data['result'], 0)
result = int(stdout, 0)
if expected != result:
raise AssertionError("Expected result 0x%x, got 0x%x, stderr=%r" % (expected, result, stderr))
else:
if vm.returncode == 0:
raise AssertionError("Expected VM to exit with an error code")
def test_datafiles():
# Nose test generator
# Creates a testcase for each datafile
for filename in testdata.list_files():
yield check_datafile, filename
|
iovisor/ubpf
|
test_framework/test_vm.py
|
Python
|
apache-2.0
| 2,599
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
NAN = float('nan')
metrics = metrics_lib
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_sparse_value(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_2d_label_to_sparse(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_2d_label_to_sparse_value(labels))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_3d_label_to_sparse(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_3d_label_to_sparse_value(labels))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_metric_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
test_case.assertEquals(
set(expected),
set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
class StreamingMeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean(array_ops.ones([4, 3]))
_assert_metric_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
class StreamingMeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_tensor(array_ops.ones([4, 3]))
_assert_metric_variables(self,
('mean/total_tensor:0', 'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class StreamingAccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_metric_variables(self,
('my_accuracy/count:0', 'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=2)
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class StreamingTruePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tp, tp_update_op = metrics.streaming_true_positives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(1, tp_update_op.eval())
self.assertEqual(1, tp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels, weights=37.0)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(37.0, tp_update_op.eval())
self.assertEqual(37.0, tp.eval())
class StreamingFalseNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives((0, 1, 0),
(0, 1, 1))
_assert_metric_variables(self, ('false_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fn, fn_update_op = metrics.streaming_false_negatives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(2, fn_update_op.eval())
self.assertEqual(2, fn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels, weights=((3.0,), (5.0,), (7.0,)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(8.0, fn_update_op.eval())
self.assertEqual(8.0, fn.eval())
class StreamingFalsePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives((0, 1, 0),
(0, 1, 1))
_assert_metric_variables(self, ('false_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fp, fp_update_op = metrics.streaming_false_positives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(4, fp_update_op.eval())
self.assertEqual(4, fp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
fp, fp_update_op = metrics.streaming_false_positives(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(42.0, fp_update_op.eval())
self.assertEqual(42.0, fp.eval())
class StreamingTrueNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives((0, 1, 0),
(0, 1, 1))
_assert_metric_variables(self, ('true_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tn, tn_update_op = metrics.streaming_true_negatives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(5, tn_update_op.eval())
self.assertEqual(5, tn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels, weights=((0.0, 2.0, 3.0, 5.0),))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(15.0, tn_update_op.eval())
self.assertEqual(15.0, tn.eval())
class StreamingTruePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp.eval())
self.assertAllEqual((3, 1, 0), tp_update_op.eval())
self.assertAllEqual((3, 1, 0), tp.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, weights=37.0, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())
class StreamingFalseNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(
0.15,
0.5,
0.85,))
_assert_metric_variables(self, ('false_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn.eval())
self.assertAllEqual((0, 2, 3), fn_update_op.eval())
self.assertAllEqual((0, 2, 3), fn.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions,
labels,
weights=((3.0,), (5.0,), (7.0,)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())
class StreamingFalsePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('false_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp.eval())
self.assertAllEqual((7, 4, 2), fp_update_op.eval())
self.assertAllEqual((7, 4, 2), fp.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())
class StreamingTrueNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn.eval())
self.assertAllEqual((2, 5, 7), tn_update_op.eval())
self.assertAllEqual((2, 5, 7), tn.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions,
labels,
weights=((0.0, 2.0, 3.0, 5.0),),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())
class StreamingPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class StreamingRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('recall/false_negatives/count:0', 'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class StreamingFPRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_positive_rate/false_positives/count:0',
'false_positive_rate/true_negatives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertEqual(initial_fpr, fpr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fpr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 2.0 + 5.0
weighted_f = (2.0 + 2.0) + (5.0 + 5.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 1.0 + 3.0
weighted_f = (1.0 + 4.0) + (2.0 + 3.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fpr.eval())
def testZeroFalsePositivesAndTrueNegativesGivesZeroFPR(self):
predictions = array_ops.ones((1, 4))
labels = array_ops.ones((1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
class StreamingFNRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_negative_rate/false_negatives/count:0',
'false_negative_rate/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertEqual(initial_fnr, fnr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fnr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 4.0
weighted_t = (2.0 + 3.0) + (1.0 + 4.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fnr.eval())
def testZeroFalseNegativesAndTruePositivesGivesZeroFNR(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
class StreamingCurvePointsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metric_ops.streaming_curve_points(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('curve_points/true_positives:0', 'curve_points/false_negatives:0',
'curve_points/false_positives:0', 'curve_points/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
points, _ = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [points])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def _testValueTensorIsIdempotent(self, curve):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(high=2, size=(10, 3)), dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels, predictions=predictions, curve=curve)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
initial_points = points.eval()
sess.run(update_op)
self.assertAllClose(initial_points, points.eval())
def testValueTensorIsIdempotentROC(self):
self._testValueTensorIsIdempotent(curve='ROC')
def testValueTensorIsIdempotentPR(self):
self._testValueTensorIsIdempotent(curve='PR')
def _testCase(self, labels, predictions, curve, expected_points):
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels=labels_tensor,
predictions=predictions_tensor,
num_thresholds=3,
curve=curve)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAllClose(expected_points, points.eval())
def testEdgeCasesROC(self):
self._testCase([[1]], [[1]], 'ROC', [[0, 1], [0, 1], [0, 0]])
self._testCase([[0]], [[0]], 'ROC', [[1, 1], [0, 1], [0, 1]])
self._testCase([[0]], [[1]], 'ROC', [[1, 1], [1, 1], [0, 1]])
self._testCase([[1]], [[0]], 'ROC', [[0, 1], [0, 0], [0, 0]])
def testManyValuesROC(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'ROC',
[[1.0, 1.0], [0.0, 0.75], [0.0, 0.0]])
def testEdgeCasesPR(self):
self._testCase([[1]], [[1]], 'PR', [[1, 1], [1, 1], [0, 1]])
self._testCase([[0]], [[0]], 'PR', [[1, 0], [1, 1], [1, 1]])
self._testCase([[0]], [[1]], 'PR', [[1, 0], [1, 0], [1, 1]])
self._testCase([[1]], [[0]], 'PR', [[1, 1], [0, 1], [0, 1]])
def testManyValuesPR(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'PR',
[[1.0, 4.0 / 6.0], [0.75, 1.0], [0.0, 1.0]])
def _np_auc(predictions, labels, weights=None):
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
class StreamingAUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_auc(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_auc(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testPredictionsOutOfRange(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, -1, 1, -1], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
_, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertRaises(errors_impl.InvalidArgumentError, update_op.eval)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_auc(predictions, labels, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.test_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples), np.random.exponential(
scale=1.0, size=num_samples)):
expected_auc = _np_auc(predictions, labels, weights)
with self.test_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (_enqueue_as_batches(weights, enqueue_ops) if
weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.streaming_auc(
tf_predictions,
tf_labels,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class StreamingDynamicAUCTest(test.TestCase):
def setUp(self):
super(StreamingDynamicAUCTest, self).setUp()
np.random.seed(1)
ops.reset_default_graph()
def testUnknownCurve(self):
with self.assertRaisesRegexp(
ValueError, 'curve must be either ROC or PR, TEST_CURVE unknown'):
metrics.streaming_dynamic_auc(labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
curve='TEST_CURVE')
def testVars(self):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)), predictions=array_ops.ones((10, 1)))
_assert_metric_variables(self, ['dynamic_auc/concat_labels/array:0',
'dynamic_auc/concat_labels/size:0',
'dynamic_auc/concat_preds/array:0',
'dynamic_auc/concat_preds/size:0'])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
auc, _ = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [auc])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in xrange(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in xrange(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testAllLabelsOnes(self):
with self.test_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([1, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testAllLabelsZeros(self):
with self.test_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([0, 0, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testNonZeroOnePredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant([2.5, -2.5, 2.5, -2.5],
dtype=dtypes_lib.float32)
labels = constant_op.constant([1, 0, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(auc.eval(), 1.0)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0, 1, 0])
labels = constant_op.constant([0, 1, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.5, auc.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, auc.eval())
def testExceptionOnIncompatibleShapes(self):
with self.test_session() as sess:
predictions = array_ops.ones([5])
labels = array_ops.zeros([6])
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
def testExceptionOnGreaterThanOneLabel(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([2, 1, 0])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is >1.*'):
sess.run(update_op)
def testExceptionOnNegativeLabel(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([1, 0, -1])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is <0.*'):
sess.run(update_op)
def testWithMultipleUpdates(self):
batch_size = 10
num_batches = 100
labels = np.array([])
predictions = np.array([])
tf_labels = variables.Variable(array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.Variable(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(tf_labels, tf_predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.normal(0.0, scale=0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
sess.run(tf_labels.assign(new_labels))
sess.run(tf_predictions.assign(new_predictions))
sess.run(update_op)
expected_auc = _np_auc(predictions, labels)
self.assertAlmostEqual(expected_auc, auc.eval())
def testAUCPRReverseIncreasingPredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-5)
def testAUCPRJumbledPredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81], dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-6)
def testAUCPRPredictionsLessThanHalf(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-5)
class StreamingPrecisionRecallAtEqualThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def _testResultsEqual(self, expected_dict, gotten_result):
"""Tests that 2 results (dicts) represent the same data.
Args:
expected_dict: A dictionary with keys that are the names of properties
of PrecisionRecallData and whose values are lists of floats.
gotten_result: A PrecisionRecallData object.
"""
gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}
self.assertItemsEqual(
list(expected_dict.keys()), list(gotten_dict.keys()))
for key, expected_values in expected_dict.items():
self.assertAllClose(expected_values, gotten_dict[key])
def _testCase(self, predictions, labels, expected_result, weights=None):
"""Performs a test given a certain scenario of labels, predictions, weights.
Args:
predictions: The predictions tensor. Of type float32.
labels: The labels tensor. Of type bool.
expected_result: The expected result (dict) that maps to tensors.
weights: Optional weights tensor.
"""
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.bool)
weights_tensor = None
if weights:
weights_tensor = constant_op.constant(weights, dtype=dtypes_lib.float32)
gotten_result, update_op = (
metric_ops.precision_recall_at_equal_thresholds(
labels=labels_tensor,
predictions=predictions_tensor,
weights=weights_tensor,
num_thresholds=3))
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self._testResultsEqual(expected_result, gotten_result)
def testVars(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32))
_assert_metric_variables(
self, ('precision_recall_at_equal_thresholds/variables/tp_buckets:0',
'precision_recall_at_equal_thresholds/variables/fp_buckets:0'))
def testVarsWithName(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32),
name='foo')
_assert_metric_variables(
self, ('foo/variables/tp_buckets:0', 'foo/variables/fp_buckets:0'))
def testValuesAreIdempotent(self):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(size=(10, 3)) > 0.5, dtype=dtypes_lib.bool)
result, update_op = metric_ops.precision_recall_at_equal_thresholds(
labels=labels, predictions=predictions)
with self.test_session() as sess:
# Run several updates.
sess.run(variables.local_variables_initializer())
for _ in range(3):
sess.run(update_op)
# Then verify idempotency.
initial_result = {k: value.eval().tolist() for k, value in
result._asdict().items()}
for _ in range(3):
self._testResultsEqual(initial_result, result)
def testAllTruePositives(self):
self._testCase([[1]], [[True]], {
'tp': [1, 1, 1],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [1.0, 1.0, 1.0],
'recall': [1.0, 1.0, 1.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllTrueNegatives(self):
self._testCase([[0]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 0, 0],
'tn': [0, 1, 1],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalsePositives(self):
self._testCase([[1]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 1, 1],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalseNegatives(self):
self._testCase([[0]], [[True]], {
'tp': [1, 0, 0],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 1, 1],
'precision': [1.0, 0.0, 0.0],
'recall': [1.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValues(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]],
{
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValuesWithWeights(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]],
{
'tp': [1.5, 1.5, 0.0],
'fp': [2.5, 0.0, 0.0],
'tn': [0.0, 2.5, 2.5],
'fn': [0.0, 0.0, 1.5],
'precision': [0.375, 1.0, 0.0],
'recall': [1.0, 1.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
weights=[[0.0, 0.5, 2.0, 0.0, 0.5, 1.0]])
class StreamingSpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_metric_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class StreamingSensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_metric_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, weights=weights, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class StreamingPrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run([prec_op, rec_op])
# Then verify idempotency.
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.streaming_precision_at_thresholds(tf_predictions,
tf_labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(tf_predictions,
tf_labels,
thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
class StreamingFPRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_positive_rate_at_thresholds/false_positives:0',
'false_positive_rate_at_thresholds/true_negatives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fpr, _ = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fpr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fpr_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertAllClose(initial_fpr, fpr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.5, fpr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1, fpr.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0, fpr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fp = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 0:
fp += 1
else:
if labels[i] == 0:
tn += 1
epsilon = 1e-7
expected_fpr = fp / (epsilon + fp + tn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fpr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fpr, fpr.eval(), 2)
class RecallAtPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7)
_assert_metric_variables(self, ('recall_at_precision/true_positives:0',
'recall_at_precision/false_negatives:0',
'recall_at_precision/false_positives:0',
'recall_at_precision/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertAlmostEqual(initial_recall, recall.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=1.0)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, recall.eval())
def testSomeCorrectHighPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3]
labels_values = [1, 1, 1, 1, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, recall.eval())
def testSomeCorrectLowPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3, .2, .1]
labels_values = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
def testWeighted(self):
predictions_values = [1, .9, .8, .7, .6]
labels_values = [1, 1, 0, 0, 1]
weights_values = [1, 1, 3, 4, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, weights=weights, precision=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
class StreamingFNRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_negative_rate_at_thresholds/false_negatives:0',
'false_negative_rate_at_thresholds/true_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fnr, _ = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fnr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fnr_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertAllClose(initial_fnr, fnr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.5, fnr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(1, fnr.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval())
self.assertAlmostEqual(1.0, fnr_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0, fnr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fn = 0
tp = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
if labels[i] == 1:
fn += 1
epsilon = 1e-7
expected_fnr = fn / (epsilon + fn + tp)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fnr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fnr, fnr.eval(), 2)
# TODO(ptucker): Remove when we remove `streaming_recall_at_k`.
# This op will be deprecated soon in favor of `streaming_sparse_recall_at_k`.
# Until then, this test validates that both ops yield the same results.
class StreamingRecallAtKTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
self._batch_size = 4
self._num_classes = 3
self._np_predictions = np.matrix(('0.1 0.2 0.7;'
'0.6 0.2 0.2;'
'0.0 0.9 0.1;'
'0.2 0.0 0.8'))
self._np_labels = [0, 0, 0, 0]
def testVars(self):
metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1)
_assert_metric_variables(self,
('recall_at_1/count:0', 'recall_at_1/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testSingleUpdateKIs1(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=1)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.25, sess.run(update_op))
self.assertEqual(0.25, recall.eval())
self.assertEqual(0.25, sess.run(sp_update_op))
self.assertEqual(0.25, sp_recall.eval())
def testSingleUpdateKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=2)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, recall.eval())
self.assertEqual(0.5, sess.run(sp_update_op))
self.assertEqual(0.5, sp_recall.eval())
def testSingleUpdateKIs3(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=3)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=3)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
def testSingleUpdateSomeMissingKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
weights = constant_op.constant(
[0, 1, 0, 1], shape=(self._batch_size,), dtype=dtypes_lib.float32)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=2, weights=weights)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions,
array_ops.reshape(labels, (self._batch_size, 1)),
k=2,
weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
class StreamingSparsePrecisionTest(test.TestCase):
def _test_streaming_sparse_precision_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
labels=labels,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_k(self,
predictions,
labels,
k,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_k(
predictions, labels, k, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_top_k(
top_k_predictions, labels, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def test_top_k_rank_invalid(self):
with self.test_session():
# top_k_predictions has rank < 2.
top_k_predictions = [9, 4, 6, 2, 0]
sp_labels = sparse_tensor.SparseTensorValue(
indices=np.array([[0,], [1,], [2,]], np.int64),
values=np.array([2, 7, 8], np.int64),
dense_shape=np.array([10,], np.int64))
with self.assertRaises(ValueError):
precision, _ = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int64),
labels=sp_labels)
variables.variables_initializer(variables.local_variables()).run()
precision.eval()
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_top_k_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=precision_ex2[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [(ex1 + ex2) / 2
for ex1, ex2 in zip(precision_ex1, precision_ex2)]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
predictions_top_k = (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k])
self._test_streaming_sparse_precision_at_top_k(
predictions_top_k, labels, expected=streaming_precision[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
self._test_streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k], predictions_top_k_ex2[:k]),
labels,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
def test_average_precision_at_top_k_static_shape_check(self):
predictions_top_k = array_ops.placeholder(shape=(2, None),
dtype=dtypes_lib.int64)
labels = np.array(((1,), (2,)), dtype=np.int64)
# Fails due to non-static predictions_idx shape.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
predictions_top_k = (2, 1)
# Fails since rank of predictions_idx is less than one.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
predictions_top_k = ((2,), (1,))
# Valid static shape.
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=7.0 / 20)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected_precision = 0.5
with self.test_session():
_, precision = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_precision, precision.eval())
class StreamingSparseRecallTest(test.TestCase):
def _test_streaming_sparse_recall_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_sparse_recall_at_top_k(self,
labels,
top_k_predictions,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metric_ops.sparse_recall_at_top_k(
labels=labels,
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in (sparse_labels, dense_labels):
for class_id in (-1, 0, 1, 4):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_one_label_at_k1_no_predictions(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 0 predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2)
def test_one_label_at_k1_weighted(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=3, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.6 / 0.9, weights=(0.6, 0.3))
def test_three_labels_at_k5_nan(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 8: 1 label, no predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=8)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=8)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=3.0 / 6)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 6)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=2.0 / 2,
class_id=2)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=2.0 / 2,
class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=1.0 / 1,
class_id=5)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=1.0 / 1,
class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=0.0 / 1,
class_id=7)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=0.0 / 1,
class_id=7)
# All classes: 8 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=3.0 / 8)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=3.0 / 8)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 labels, all correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=7.0 / 12)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=7.0 / 12)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4],
[0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 1, 0],
[0, 0, 0, 1]]
expected_recall = 0.5
with self.test_session():
_, recall = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_recall, recall.eval())
class StreamingMeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_absolute_error/count:0', 'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_absolute_error(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_absolute_error(predictions,
labels, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class StreamingMeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_relative_error/count:0', 'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.streaming_mean_relative_error(predictions,
labels, normalizer)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=array_ops.zeros_like(labels))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class StreamingMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_squared_error/count:0', 'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_squared_error(predictions, labels,
weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.streaming_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.test_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.streaming_mean_squared_error(
predictions0, labels0, name='msd0')
mse1, update_op1 = metrics.streaming_mean_squared_error(
predictions1, labels1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.streaming_mean_absolute_error(predictions,
labels)
mse, ms_update_op = metrics.streaming_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class StreamingRootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('root_mean_squared_error/count:0', 'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels,
weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
class StreamingCovarianceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_metric_variables(self, (
'covariance/comoment:0',
'covariance/count:0',
'covariance/mean_label:0',
'covariance/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
cov, _ = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [cov])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
cov, update_op = metrics.streaming_covariance(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_cov = cov.eval()
for _ in range(10):
self.assertEqual(initial_cov, cov.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op), 5)
self.assertAlmostEqual(expected_cov, cov.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, update_op.eval())
self.assertAlmostEqual(expected_cov, cov.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 7], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 3, 1], shape=(1, 4), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(
predictions, labels, weights=weights)
expected_cov = np.cov([2, 4, 6, 8],
[1, 3, 2, 7],
fweights=[0, 1, 3, 1])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op))
self.assertAlmostEqual(expected_cov, cov.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(
prev_expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_cov, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
prev_expected_cov = expected_cov
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(
prev_expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_cov, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
prev_expected_cov = expected_cov
class StreamingPearsonRTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_metric_variables(self, (
'pearson_r/covariance/comoment:0',
'pearson_r/covariance/count:0',
'pearson_r/covariance/mean_label:0',
'pearson_r/covariance/mean_prediction:0',
'pearson_r/variance_labels/count:0',
'pearson_r/variance_labels/comoment:0',
'pearson_r/variance_labels/mean_label:0',
'pearson_r/variance_labels/mean_prediction:0',
'pearson_r/variance_predictions/comoment:0',
'pearson_r/variance_predictions/count:0',
'pearson_r/variance_predictions/mean_label:0',
'pearson_r/variance_predictions/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
pearson_r, _ = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [pearson_r])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_r = pearson_r.eval()
for _ in range(10):
self.assertEqual(initial_r, pearson_r.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
expected_r = np.corrcoef(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op), 5)
self.assertAlmostEqual(expected_r, pearson_r.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
expected_r = np.corrcoef([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, update_op.eval())
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = np.array([2, 4, 6, 8])
labels = np.array([1, 3, 2, 7])
weights = np.array([0, 1, 3, 1])
predictions_t = constant_op.constant(
predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels_t = constant_op.constant(
labels, shape=(1, 4), dtype=dtypes_lib.float32)
weights_t = constant_op.constant(
weights, shape=(1, 4), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
cmat = np.cov(predictions, labels, fweights=weights)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op))
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(
prev_expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
expected_r = np.corrcoef(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_r, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(
prev_expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
cmat = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
self.assertAlmostEqual(
expected_r, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndSingletonBatches(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
weights = (np.arange(n).reshape(n//stride, stride) % stride == 0)
for row in weights:
np.random.shuffle(row)
# Now, weights is one-hot by row - one item per batch has non-zero weight.
weights = weights.reshape((n,))
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
cmat = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
actual_r = sess.run(update_op, feed_dict=feed_dict)
self.assertEqual(np.isnan(expected_r), np.isnan(actual_r))
self.assertEqual(np.isnan(expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(expected_r):
self.assertAlmostEqual(
expected_r, actual_r, 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
class StreamingMeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_metric_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_percentage_less(values=array_ops.ones((10,)), threshold=2)
_assert_metric_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class StreamingMeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_metric_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(predictions, labels, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(
predictions, labels, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=2)
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes=num_classes)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_miou = miou.eval()
for _ in range(10):
self.assertEqual(initial_miou, miou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
weights = array_ops.concat(
[
constant_op.constant(
0, shape=[1]), constant_op.constant(
1, shape=[8]), constant_op.constant(
0, shape=[1])
],
0)
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testMissingClassInLabels(self):
labels = constant_op.constant([
[[0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant([
[[0, 0, 2, 1, 1, 0],
[0, 1, 2, 2, 0, 1]],
[[0, 0, 2, 1, 1, 1],
[1, 1, 2, 0, 0, 0]]])
num_classes = 3
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[7, 4, 3], [3, 5, 2], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(
1 / 3 * (7 / (7 + 3 + 7) + 5 / (5 + 4 + 5) + 0 / (0 + 5 + 0)),
miou.eval())
def testMissingClassOverallSmall(self):
labels = constant_op.constant([0])
predictions = constant_op.constant([0])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[1, 0], [0, 0]], update_op.eval())
self.assertAlmostEqual(1, miou.eval())
def testMissingClassOverallLarge(self):
labels = constant_op.constant([
[[0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant([
[[0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1]],
[[0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0]]])
num_classes = 3
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[9, 5, 0], [3, 7, 0], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(
1 / 2 * (9 / (9 + 3 + 5) + 7 / (7 + 5 + 3)), miou.eval())
class StreamingConcatTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_concat(values=array_ops.ones((10,)))
_assert_metric_variables(self, (
'streaming_concat/array:0',
'streaming_concat/size:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
value, _ = metrics.streaming_concat(
values=array_ops.ones((10,)), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [value])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_concat(
values=array_ops.ones((10,)), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testNextArraySize(self):
next_array_size = metric_ops._next_array_size # pylint: disable=protected-access
with self.test_session():
self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2)
self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(4, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(5, growth_factor=2).eval(), 8)
self.assertEqual(next_array_size(6, growth_factor=2).eval(), 8)
def testStreamingConcat(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())
sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})
self.assertAllEqual(np.arange(10), concatenated.eval())
def testStreamingConcatStringValues(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.string, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertItemsEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: ['a', 'b', 'c']})
self.assertItemsEqual([b'a', b'b', b'c'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['d', 'e']})
self.assertItemsEqual([b'a', b'b', b'c', b'd', b'e'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['f', 'g', 'h', 'i', 'j']})
self.assertItemsEqual(
[b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j'],
concatenated.eval())
def testStreamingConcatMaxSize(self):
with self.test_session() as sess:
values = math_ops.range(3)
concatenated, update_op = metrics.streaming_concat(values, max_size=5)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
def testStreamingConcat2D(self):
with self.test_session() as sess:
values = array_ops.reshape(math_ops.range(3), (3, 1))
concatenated, update_op = metrics.streaming_concat(values, axis=-1)
sess.run(variables.local_variables_initializer())
for _ in range(10):
sess.run([update_op])
self.assertAllEqual([[0] * 10, [1] * 10, [2] * 10], concatenated.eval())
def testStreamingConcatErrors(self):
with self.assertRaises(ValueError):
metrics.streaming_concat(array_ops.placeholder(dtypes_lib.float32))
values = array_ops.zeros((2, 3))
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=-3, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=2, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(
array_ops.placeholder(dtypes_lib.float32, [None, None]))
def testStreamingConcatReset(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run(variables.local_variables_initializer())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([3, 4], concatenated.eval())
class AggregateMetricsTest(test.TestCase):
def testAggregateNoMetricsRaisesValueError(self):
with self.assertRaises(ValueError):
metrics.aggregate_metrics()
def testAggregateSingleMetricReturnsOneItemLists(self):
values = array_ops.ones((10, 4))
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean(values))
self.assertEqual(len(value_tensors), 1)
self.assertEqual(len(update_ops), 1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, update_ops[0].eval())
self.assertEqual(1, value_tensors[0].eval())
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean_absolute_error(predictions, labels),
metrics.streaming_mean_squared_error(predictions, labels))
self.assertEqual(len(value_tensors), 2)
self.assertEqual(len(update_ops), 2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, update_ops[0].eval())
self.assertEqual(4, update_ops[1].eval())
self.assertEqual(2, value_tensors[0].eval())
self.assertEqual(4, value_tensors[1].eval())
class AggregateMetricMapTest(test.TestCase):
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
names_to_values, names_to_updates = metrics.aggregate_metric_map({
'm1': metrics.streaming_mean_absolute_error(predictions, labels),
'm2': metrics.streaming_mean_squared_error(predictions, labels),
})
self.assertEqual(2, len(names_to_values))
self.assertEqual(2, len(names_to_updates))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, names_to_updates['m1'].eval())
self.assertEqual(4, names_to_updates['m2'].eval())
self.assertEqual(2, names_to_values['m1'].eval())
self.assertEqual(4, names_to_values['m2'].eval())
class CountTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.count(array_ops.ones([4, 3]))
_assert_metric_variables(self, ['count/count:0'])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.count(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.count(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(2.0, sess.run(update_op), 5)
self.assertAlmostEqual(4.0, sess.run(update_op), 5)
self.assertAlmostEqual(6.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [0.5])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1.2])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(3.4, result.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 0.5, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1.2, shape=(1,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(3.4, result.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1.1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(4.1, result.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1.1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(4.1, result.eval(), 5)
if __name__ == '__main__':
test.main()
|
Kongsea/tensorflow
|
tensorflow/contrib/metrics/python/ops/metric_ops_test.py
|
Python
|
apache-2.0
| 258,475
|
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hmac
import unittest
from hashlib import sha1
from StringIO import StringIO
from time import time
from swift.common.swob import Request, Response
from swift.common.middleware import tempauth, formpost
from swift.common.utils import split_path
class FakeApp(object):
def __init__(self, status_headers_body_iter=None,
check_no_query_string=True):
self.status_headers_body_iter = status_headers_body_iter
if not self.status_headers_body_iter:
self.status_headers_body_iter = iter([('404 Not Found', {
'x-test-header-one-a': 'value1',
'x-test-header-two-a': 'value2',
'x-test-header-two-b': 'value3'}, '')])
self.requests = []
self.check_no_query_string = check_no_query_string
def __call__(self, env, start_response):
try:
if self.check_no_query_string and env.get('QUERY_STRING'):
raise Exception('Query string %s should have been discarded!' %
env['QUERY_STRING'])
body = ''
while True:
chunk = env['wsgi.input'].read()
if not chunk:
break
body += chunk
env['wsgi.input'] = StringIO(body)
self.requests.append(Request.blank('', environ=env))
if env.get('swift.authorize_override') and \
env.get('REMOTE_USER') != '.wsgi.pre_authed':
raise Exception(
'Invalid REMOTE_USER %r with swift.authorize_override' % (
env.get('REMOTE_USER'),))
if 'swift.authorize' in env:
resp = env['swift.authorize'](self.requests[-1])
if resp:
return resp(env, start_response)
status, headers, body = self.status_headers_body_iter.next()
return Response(status=status, headers=headers,
body=body)(env, start_response)
except EOFError:
start_response('499 Client Disconnect',
[('Content-Type', 'text/plain')])
return ['Client Disconnect\n']
class TestCappedFileLikeObject(unittest.TestCase):
def test_whole(self):
self.assertEquals(
formpost._CappedFileLikeObject(StringIO('abc'), 10).read(), 'abc')
def test_exceeded(self):
exc = None
try:
formpost._CappedFileLikeObject(StringIO('abc'), 2).read()
except EOFError as err:
exc = err
self.assertEquals(str(exc), 'max_file_size exceeded')
def test_whole_readline(self):
fp = formpost._CappedFileLikeObject(StringIO('abc\ndef'), 10)
self.assertEquals(fp.readline(), 'abc\n')
self.assertEquals(fp.readline(), 'def')
self.assertEquals(fp.readline(), '')
def test_exceeded_readline(self):
fp = formpost._CappedFileLikeObject(StringIO('abc\ndef'), 5)
self.assertEquals(fp.readline(), 'abc\n')
exc = None
try:
self.assertEquals(fp.readline(), 'def')
except EOFError as err:
exc = err
self.assertEquals(str(exc), 'max_file_size exceeded')
def test_read_sized(self):
fp = formpost._CappedFileLikeObject(StringIO('abcdefg'), 10)
self.assertEquals(fp.read(2), 'ab')
self.assertEquals(fp.read(2), 'cd')
self.assertEquals(fp.read(2), 'ef')
self.assertEquals(fp.read(2), 'g')
self.assertEquals(fp.read(2), '')
class TestFormPost(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
def _make_request(self, path, tempurl_keys=(), **kwargs):
req = Request.blank(path, **kwargs)
# Fake out the caching layer so that get_account_info() finds its
# data. Include something that isn't tempurl keys to prove we skip it.
meta = {'user-job-title': 'Personal Trainer',
'user-real-name': 'Jim Shortz'}
for idx, key in enumerate(tempurl_keys):
meta_name = 'temp-url-key' + (("-%d" % (idx + 1) if idx else ""))
if key:
meta[meta_name] = key
_junk, account, _junk, _junk = split_path(path, 2, 4)
req.environ['swift.account/' + account] = self._fake_cache_env(
account, tempurl_keys)
return req
def _fake_cache_env(self, account, tempurl_keys=()):
# Fake out the caching layer so that get_account_info() finds its
# data. Include something that isn't tempurl keys to prove we skip it.
meta = {'user-job-title': 'Personal Trainer',
'user-real-name': 'Jim Shortz'}
for idx, key in enumerate(tempurl_keys):
meta_name = 'temp-url-key' + ("-%d" % (idx + 1) if idx else "")
if key:
meta[meta_name] = key
return {'status': 204,
'container_count': '0',
'total_object_count': '0',
'bytes': '0',
'meta': meta}
def _make_sig_env_body(self, path, redirect, max_file_size, max_file_count,
expires, key, user_agent=True):
sig = hmac.new(
key,
'%s\n%s\n%s\n%s\n%s' % (
path, redirect, max_file_size, max_file_count, expires),
sha1).hexdigest()
body = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR--',
'',
]
wsgi_errors = StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=----WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate',
'HTTP_ACCEPT_LANGUAGE': 'en-us',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_HOST': 'ubuntu:8080',
'HTTP_ORIGIN': 'file://',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X '
'10_7_2) AppleWebKit/534.52.7 (KHTML, like Gecko) '
'Version/5.1.2 Safari/534.52.7',
'PATH_INFO': path,
'REMOTE_ADDR': '172.16.83.1',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'wsgi.errors': wsgi_errors,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
if user_agent is False:
del env['HTTP_USER_AGENT']
return sig, env, body
def test_passthrough(self):
for method in ('HEAD', 'GET', 'PUT', 'POST', 'DELETE'):
resp = self._make_request(
'/v1/a/c/o',
environ={'REQUEST_METHOD': method}).get_response(self.formpost)
self.assertEquals(resp.status_int, 401)
self.assertTrue('FormPost' not in resp.body)
def test_auth_scheme(self):
# FormPost rejects
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() - 10), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
authenticate_v = None
for h, v in headers:
if h.lower() == 'www-authenticate':
authenticate_v = v
self.assertTrue('FormPost: Form Expired' in body)
self.assertEquals('Swift realm="unknown"', authenticate_v)
def test_safari(self):
key = 'abc'
path = '/v1/AUTH_test/container'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig = hmac.new(
key,
'%s\n%s\n%s\n%s\n%s' % (
path, redirect, max_file_size, max_file_count, expires),
sha1).hexdigest()
wsgi_input = StringIO('\r\n'.join([
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR--',
'',
]))
wsgi_errors = StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=----WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate',
'HTTP_ACCEPT_LANGUAGE': 'en-us',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_HOST': 'ubuntu:8080',
'HTTP_ORIGIN': 'file://',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X '
'10_7_2) AppleWebKit/534.52.7 (KHTML, like Gecko) '
'Version/5.1.2 Safari/534.52.7',
'PATH_INFO': path,
'REMOTE_ADDR': '172.16.83.1',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'swift.account/AUTH_test': self._fake_cache_env(
'AUTH_test', [key]),
'swift.container/AUTH_test/container': {'meta': {}},
'wsgi.errors': wsgi_errors,
'wsgi.input': wsgi_input,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, 'http://brim.net?status=201&message=')
self.assertEquals(exc_info, None)
self.assertTrue('http://brim.net?status=201&message=' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_firefox(self):
key = 'abc'
path = '/v1/AUTH_test/container'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig = hmac.new(
key,
'%s\n%s\n%s\n%s\n%s' % (
path, redirect, max_file_size, max_file_count, expires),
sha1).hexdigest()
wsgi_input = StringIO('\r\n'.join([
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'-----------------------------168072824752491622650073',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'-----------------------------168072824752491622650073--',
''
]))
wsgi_errors = StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=---------------------------168072824752491622650073',
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate',
'HTTP_ACCEPT_LANGUAGE': 'en-us,en;q=0.5',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_HOST': 'ubuntu:8080',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; '
'rv:8.0.1) Gecko/20100101 Firefox/8.0.1',
'PATH_INFO': '/v1/AUTH_test/container',
'REMOTE_ADDR': '172.16.83.1',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'swift.account/AUTH_test': self._fake_cache_env(
'AUTH_test', [key]),
'swift.container/AUTH_test/container': {'meta': {}},
'wsgi.errors': wsgi_errors,
'wsgi.input': wsgi_input,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, 'http://brim.net?status=201&message=')
self.assertEquals(exc_info, None)
self.assertTrue('http://brim.net?status=201&message=' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_chrome(self):
key = 'abc'
path = '/v1/AUTH_test/container'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig = hmac.new(
key,
'%s\n%s\n%s\n%s\n%s' % (
path, redirect, max_file_size, max_file_count, expires),
sha1).hexdigest()
wsgi_input = StringIO('\r\n'.join([
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'------WebKitFormBoundaryq3CFxUjfsDMu8XsA--',
''
]))
wsgi_errors = StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=----WebKitFormBoundaryq3CFxUjfsDMu8XsA',
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate,sdch',
'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;'
'q=0.9,*/*;q=0.8',
'HTTP_CACHE_CONTROL': 'max-age=0',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_HOST': 'ubuntu:8080',
'HTTP_ORIGIN': 'null',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X '
'10_7_2) AppleWebKit/535.7 (KHTML, like Gecko) '
'Chrome/16.0.912.63 Safari/535.7',
'PATH_INFO': '/v1/AUTH_test/container',
'REMOTE_ADDR': '172.16.83.1',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'swift.account/AUTH_test': self._fake_cache_env(
'AUTH_test', [key]),
'swift.container/AUTH_test/container': {'meta': {}},
'wsgi.errors': wsgi_errors,
'wsgi.input': wsgi_input,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, 'http://brim.net?status=201&message=')
self.assertEquals(exc_info, None)
self.assertTrue('http://brim.net?status=201&message=' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_explorer(self):
key = 'abc'
path = '/v1/AUTH_test/container'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig = hmac.new(
key,
'%s\n%s\n%s\n%s\n%s' % (
path, redirect, max_file_size, max_file_count, expires),
sha1).hexdigest()
wsgi_input = StringIO('\r\n'.join([
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="file1"; '
'filename="C:\\testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="file2"; '
'filename="C:\\testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'-----------------------------7db20d93017c',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'-----------------------------7db20d93017c--',
''
]))
wsgi_errors = StringIO()
env = {
'CONTENT_TYPE': 'multipart/form-data; '
'boundary=---------------------------7db20d93017c',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate',
'HTTP_ACCEPT_LANGUAGE': 'en-US',
'HTTP_ACCEPT': 'text/html, application/xhtml+xml, */*',
'HTTP_CACHE_CONTROL': 'no-cache',
'HTTP_CONNECTION': 'Keep-Alive',
'HTTP_HOST': '172.16.83.128:8080',
'HTTP_USER_AGENT': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT '
'6.1; WOW64; Trident/5.0)',
'PATH_INFO': '/v1/AUTH_test/container',
'REMOTE_ADDR': '172.16.83.129',
'REQUEST_METHOD': 'POST',
'SCRIPT_NAME': '',
'SERVER_NAME': '172.16.83.128',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'swift.account/AUTH_test': self._fake_cache_env(
'AUTH_test', [key]),
'swift.container/AUTH_test/container': {'meta': {}},
'wsgi.errors': wsgi_errors,
'wsgi.input': wsgi_input,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0),
}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, 'http://brim.net?status=201&message=')
self.assertEquals(exc_info, None)
self.assertTrue('http://brim.net?status=201&message=' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_messed_up_start(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://brim.net', 5, 10,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('XX' + '\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
def log_assert_int_status(env, response_status_int):
self.assertTrue(isinstance(response_status_int, int))
self.formpost._log_request = log_assert_int_status
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '400 Bad Request')
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: invalid starting boundary' in body)
self.assertEquals(len(self.app.requests), 0)
def test_max_file_size_exceeded(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://brim.net', 5, 10,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '400 Bad Request')
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: max_file_size exceeded' in body)
self.assertEquals(len(self.app.requests), 0)
def test_max_file_count_exceeded(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://brim.net', 1024, 1,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(
location,
'http://brim.net?status=400&message=max%20file%20count%20exceeded')
self.assertEquals(exc_info, None)
self.assertTrue(
'http://brim.net?status=400&message=max%20file%20count%20exceeded'
in body)
self.assertEquals(len(self.app.requests), 1)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
def test_subrequest_does_not_pass_query(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['QUERY_STRING'] = 'this=should¬=get&passed'
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(
iter([('201 Created', {}, ''),
('201 Created', {}, '')]),
check_no_query_string=True)
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
# Make sure we 201 Created, which means we made the final subrequest
# (and FakeApp verifies that no QUERY_STRING got passed).
self.assertEquals(status, '201 Created')
self.assertEquals(exc_info, None)
self.assertTrue('201 Created' in body)
self.assertEquals(len(self.app.requests), 2)
def test_subrequest_fails(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://brim.net', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('404 Not Found', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, 'http://brim.net?status=404&message=')
self.assertEquals(exc_info, None)
self.assertTrue('http://brim.net?status=404&message=' in body)
self.assertEquals(len(self.app.requests), 1)
def test_truncated_attr_value(self):
key = 'abc'
redirect = 'a' * formpost.MAX_VALUE_LENGTH
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', redirect, max_file_size, max_file_count,
expires, key)
# Tack on an extra char to redirect, but shouldn't matter since it
# should get truncated off on read.
redirect += 'b'
env['wsgi.input'] = StringIO('\r\n'.join([
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file1"; '
'filename="testfile1.txt"',
'Content-Type: text/plain',
'',
'Test File\nOne\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file2"; '
'filename="testfile2.txt"',
'Content-Type: text/plain',
'',
'Test\nFile\nTwo\n',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="file3"; filename=""',
'Content-Type: application/octet-stream',
'',
'',
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR--',
'',
]))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(
location,
('a' * formpost.MAX_VALUE_LENGTH) + '?status=201&message=')
self.assertEquals(exc_info, None)
self.assertTrue(
('a' * formpost.MAX_VALUE_LENGTH) + '?status=201&message=' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_no_file_to_process(self):
key = 'abc'
redirect = 'http://brim.net'
max_file_size = 1024
max_file_count = 10
expires = int(time() + 86400)
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', redirect, max_file_size, max_file_count,
expires, key)
env['wsgi.input'] = StringIO('\r\n'.join([
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="redirect"',
'',
redirect,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_size"',
'',
str(max_file_size),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="max_file_count"',
'',
str(max_file_count),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="expires"',
'',
str(expires),
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="signature"',
'',
sig,
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR--',
'',
]))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(
location,
'http://brim.net?status=400&message=no%20files%20to%20process')
self.assertEquals(exc_info, None)
self.assertTrue(
'http://brim.net?status=400&message=no%20files%20to%20process'
in body)
self.assertEquals(len(self.app.requests), 0)
def test_formpost_without_useragent(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key, user_agent=False)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
def start_response(s, h, e=None):
pass
body = ''.join(self.formpost(env, start_response))
self.assertTrue('User-Agent' in self.app.requests[0].headers)
self.assertEquals(self.app.requests[0].headers['User-Agent'],
'FormPost')
def test_formpost_with_origin(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key, user_agent=False)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
env['HTTP_ORIGIN'] = 'http://localhost:5000'
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created',
{'Access-Control-Allow-Origin':
'http://localhost:5000'}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
headers = {}
def start_response(s, h, e=None):
for k, v in h:
headers[k] = v
pass
body = ''.join(self.formpost(env, start_response))
self.assertEquals(headers['Access-Control-Allow-Origin'],
'http://localhost:5000')
def test_formpost_with_multiple_keys(self):
key = 'ernie'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
# Stick it in X-Account-Meta-Temp-URL-Key-2 and make sure we get it
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', ['bert', key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
body = ''.join(self.formpost(env, start_response))
self.assertEqual('303 See Other', status[0])
self.assertEqual(
'http://redirect?status=201&message=',
dict(headers[0]).get('Location'))
def test_formpost_with_multiple_container_keys(self):
first_key = 'ernie'
second_key = 'bert'
keys = [first_key, second_key]
meta = {}
for idx, key in enumerate(keys):
meta_name = 'temp-url-key' + ("-%d" % (idx + 1) if idx else "")
if key:
meta[meta_name] = key
for key in keys:
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env('AUTH_test')
# Stick it in X-Container-Meta-Temp-URL-Key-2 and ensure we get it
env['swift.container/AUTH_test/container'] = {'meta': meta}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
body = ''.join(self.formpost(env, start_response))
self.assertEqual('303 See Other', status[0])
self.assertEqual(
'http://redirect?status=201&message=',
dict(headers[0]).get('Location'))
def test_redirect(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, 'http://redirect?status=201&message=')
self.assertEquals(exc_info, None)
self.assertTrue(location in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_redirect_with_query(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', 'http://redirect?one=two', 1024, 10,
int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '303 See Other')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location,
'http://redirect?one=two&status=201&message=')
self.assertEquals(exc_info, None)
self.assertTrue(location in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_no_redirect(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '201 Created')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('201 Created' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertEquals(self.app.requests[0].body, 'Test File\nOne\n')
self.assertEquals(self.app.requests[1].body, 'Test\nFile\nTwo\n')
def test_no_redirect_expired(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() - 10), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: Form Expired' in body)
def test_no_redirect_invalid_sig(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
# Change key to invalidate sig
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key + ' is bogus now'])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: Invalid Signature' in body)
def test_no_redirect_with_error(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('XX' + '\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '400 Bad Request')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: invalid starting boundary' in body)
def test_no_v1(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v2/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: Invalid Signature' in body)
def test_empty_v1(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'//AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: Invalid Signature' in body)
def test_empty_account(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1//container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: Invalid Signature' in body)
def test_wrong_account(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_tst/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([
('200 Ok', {'x-account-meta-temp-url-key': 'def'}, ''),
('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: Invalid Signature' in body)
def test_no_container(self):
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '401 Unauthorized')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: Invalid Signature' in body)
def test_completely_non_int_expires(self):
key = 'abc'
expires = int(time() + 86400)
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, expires, key)
for i, v in enumerate(body):
if v == str(expires):
body[i] = 'badvalue'
break
env['wsgi.input'] = StringIO('\r\n'.join(body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '400 Bad Request')
location = None
for h, v in headers:
if h.lower() == 'location':
location = v
self.assertEquals(location, None)
self.assertEquals(exc_info, None)
self.assertTrue('FormPost: expired not an integer' in body)
def test_x_delete_at(self):
delete_at = int(time() + 100)
x_delete_body_part = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="x_delete_at"',
'',
str(delete_at),
]
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(x_delete_body_part + body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '201 Created')
self.assertTrue('201 Created' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertTrue("X-Delete-At" in self.app.requests[0].headers)
self.assertTrue("X-Delete-At" in self.app.requests[1].headers)
self.assertEquals(delete_at,
self.app.requests[0].headers["X-Delete-At"])
self.assertEquals(delete_at,
self.app.requests[1].headers["X-Delete-At"])
def test_x_delete_at_not_int(self):
delete_at = "2014-07-16"
x_delete_body_part = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="x_delete_at"',
'',
str(delete_at),
]
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(x_delete_body_part + body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '400 Bad Request')
self.assertTrue('FormPost: x_delete_at not an integer' in body)
def test_x_delete_after(self):
delete_after = 100
x_delete_body_part = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="x_delete_after"',
'',
str(delete_after),
]
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(x_delete_body_part + body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
env['swift.container/AUTH_test/container'] = {'meta': {}}
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '201 Created')
self.assertTrue('201 Created' in body)
self.assertEquals(len(self.app.requests), 2)
self.assertTrue("X-Delete-After" in self.app.requests[0].headers)
self.assertTrue("X-Delete-After" in self.app.requests[1].headers)
self.assertEqual(delete_after,
self.app.requests[0].headers["X-Delete-After"])
self.assertEqual(delete_after,
self.app.requests[1].headers["X-Delete-After"])
def test_x_delete_after_not_int(self):
delete_after = "2 days"
x_delete_body_part = [
'------WebKitFormBoundaryNcxTqxSlX7t4TDkR',
'Content-Disposition: form-data; name="x_delete_after"',
'',
str(delete_after),
]
key = 'abc'
sig, env, body = self._make_sig_env_body(
'/v1/AUTH_test/container', '', 1024, 10, int(time() + 86400), key)
env['wsgi.input'] = StringIO('\r\n'.join(x_delete_body_part + body))
env['swift.account/AUTH_test'] = self._fake_cache_env(
'AUTH_test', [key])
self.app = FakeApp(iter([('201 Created', {}, ''),
('201 Created', {}, '')]))
self.auth = tempauth.filter_factory({})(self.app)
self.formpost = formpost.filter_factory({})(self.auth)
status = [None]
headers = [None]
exc_info = [None]
def start_response(s, h, e=None):
status[0] = s
headers[0] = h
exc_info[0] = e
body = ''.join(self.formpost(env, start_response))
status = status[0]
headers = headers[0]
exc_info = exc_info[0]
self.assertEquals(status, '400 Bad Request')
self.assertTrue('FormPost: x_delete_after not an integer' in body)
if __name__ == '__main__':
unittest.main()
|
wbhuber/local_swift_branch
|
test/unit/common/middleware/test_formpost.py
|
Python
|
apache-2.0
| 69,565
|
# This was taken from http://python.org/sf/1541697
# It's not technically a crasher. It may not even truly be infinite,
# however, I haven't waited a long time to see the result. It takes
# 100% of CPU while running this and should be fixed.
import re
starttag = re.compile(r'<[a-zA-Z][-_.:a-zA-Z0-9]*\s*('
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~@]'
r'[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*(?=[\s>/<])))?'
r')*\s*/?\s*(?=[<>])')
if __name__ == '__main__':
foo = '<table cellspacing="0" cellpadding="0" style="border-collapse'
starttag.match(foo)
|
nmercier/linux-cross-gcc
|
win32/bin/Lib/test/crashers/infinite_loop_re.py
|
Python
|
bsd-3-clause
| 661
|
from collections import defaultdict
from compare_mt import corpus_utils
def _count_ngram(sent, order):
gram_pos = dict()
for i in range(order):
gram_pos[i+1] = defaultdict(lambda: [])
for i, word in enumerate(sent):
for j in range(min(i+1, order)):
gram_pos[j+1][word].append(i-j)
word = sent[i-j-1] + ' ' + word
return gram_pos
def ngram_context_align(ref, out, order=-1, case_insensitive=False):
"""
Calculate the word alignment between a reference sentence and an output sentence.
Proposed in the following paper:
Automatic Evaluation of Translation Quality for Distant Language Pairs
Hideki Isozaki, Tsutomu Hirao, Kevin Duh, Katsuhito Sudoh, Hajime Tsukada
http://www.anthology.aclweb.org/D/D10/D10-1092.pdf
Args:
ref: A reference sentence
out: An output sentence
order: The highest order of grams we want to consider (-1=inf)
case_insensitive: A boolean specifying whether to turn on the case insensitive option
Returns:
The word alignment, represented as a list of integers.
"""
if case_insensitive:
ref = corpus_utils.lower(ref)
out = corpus_utils.lower(out)
order = len(ref) if order == -1 else order
ref_gram_pos = _count_ngram(ref, order)
out_gram_pos = _count_ngram(out, order)
worder = []
for i, word in enumerate(out):
if len(ref_gram_pos[1][word]) == 0:
continue
if len(ref_gram_pos[1][word]) == len(out_gram_pos[1][word]) == 1:
worder.append(ref_gram_pos[1][word][0])
else:
word_forward = word
word_backward = word
for j in range(1, order):
if i - j >= 0:
word_backward = out[i-j] + ' ' + word_backward
if len(ref_gram_pos[j+1][word_backward]) == len(out_gram_pos[j+1][word_backward]) == 1:
worder.append(ref_gram_pos[j+1][word_backward][0]+j)
break
if i + j < len(out):
word_forward = word_forward + ' ' + out[i+j]
if len(ref_gram_pos[j+1][word_forward]) == len(out_gram_pos[j+1][word_forward]) == 1:
worder.append(ref_gram_pos[j+1][word_forward][0])
break
return worder
|
neulab/compare-mt
|
compare_mt/align_utils.py
|
Python
|
bsd-3-clause
| 2,139
|
SECRET_KEY = 'c&2sr12q0^g^+epf5g#-lm6+3a(trr5+&v_47jwv4!87oj4k+l'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'app_name.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'face_off',
)
ROOT_URLCONF = 'face_off.urls'
SITE_ID = 1
|
hkage/django-face-off
|
tests/settings.py
|
Python
|
mit
| 608
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
import webob
from neutron.agent.linux import utils as agent_utils
from neutron.agent.metadata import agent
from neutron.agent.metadata import config
from neutron.agent import metadata_agent
from neutron.common import constants as n_const
from neutron.common import utils
from neutron.tests import base
class FakeConf(object):
auth_ca_cert = None
nova_metadata_ip = '9.9.9.9'
nova_metadata_port = 8775
metadata_proxy_shared_secret = 'secret'
nova_metadata_protocol = 'http'
nova_metadata_insecure = True
nova_client_cert = 'nova_cert'
nova_client_priv_key = 'nova_priv_key'
cache_url = ''
class FakeConfCache(FakeConf):
cache_url = 'memory://?default_ttl=5'
class TestMetadataProxyHandlerBase(base.BaseTestCase):
fake_conf = FakeConf
def setUp(self):
super(TestMetadataProxyHandlerBase, self).setUp()
self.log_p = mock.patch.object(agent, 'LOG')
self.log = self.log_p.start()
self.handler = agent.MetadataProxyHandler(self.fake_conf)
self.handler.plugin_rpc = mock.Mock()
self.handler.context = mock.Mock()
class TestMetadataProxyHandlerRpc(TestMetadataProxyHandlerBase):
def test_get_port_filters(self):
router_id = 'test_router_id'
ip = '1.2.3.4'
networks = ('net_id1', 'net_id2')
expected = {'device_id': [router_id],
'device_owner': n_const.ROUTER_INTERFACE_OWNERS,
'network_id': networks,
'fixed_ips': {'ip_address': [ip]}}
actual = self.handler._get_port_filters(router_id, ip, networks)
self.assertEqual(expected, actual)
def test_get_router_networks(self):
router_id = 'router-id'
expected = ('network_id1', 'network_id2')
ports = [{'network_id': 'network_id1', 'something': 42},
{'network_id': 'network_id2', 'something_else': 32}]
self.handler.plugin_rpc.get_ports.return_value = ports
networks = self.handler._get_router_networks(router_id)
self.assertEqual(expected, networks)
def test_get_ports_for_remote_address(self):
ip = '1.1.1.1'
networks = ('network_id1', 'network_id2')
expected = [{'port_id': 'port_id1'},
{'port_id': 'port_id2'}]
self.handler.plugin_rpc.get_ports.return_value = expected
ports = self.handler._get_ports_for_remote_address(ip, networks)
self.assertEqual(expected, ports)
class TestMetadataProxyHandlerCache(TestMetadataProxyHandlerBase):
fake_conf = FakeConfCache
def test_call(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_tenant_id') as get_ids:
get_ids.return_value = ('instance_id', 'tenant_id')
with mock.patch.object(self.handler, '_proxy_request') as proxy:
proxy.return_value = 'value'
retval = self.handler(req)
self.assertEqual(retval, 'value')
def test_call_no_instance_match(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_tenant_id') as get_ids:
get_ids.return_value = None, None
retval = self.handler(req)
self.assertIsInstance(retval, webob.exc.HTTPNotFound)
def test_call_internal_server_error(self):
req = mock.Mock()
with mock.patch.object(self.handler,
'_get_instance_and_tenant_id') as get_ids:
get_ids.side_effect = Exception
retval = self.handler(req)
self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)
self.assertEqual(len(self.log.mock_calls), 2)
def test_get_router_networks(self):
router_id = 'router-id'
expected = ('network_id1', 'network_id2')
ports = [{'network_id': 'network_id1', 'something': 42},
{'network_id': 'network_id2', 'something_else': 32}]
mock_get_ports = self.handler.plugin_rpc.get_ports
mock_get_ports.return_value = ports
networks = self.handler._get_router_networks(router_id)
mock_get_ports.assert_called_once_with(
mock.ANY,
{'device_id': [router_id],
'device_owner': n_const.ROUTER_INTERFACE_OWNERS})
self.assertEqual(expected, networks)
def _test_get_router_networks_twice_helper(self):
router_id = 'router-id'
ports = [{'network_id': 'network_id1', 'something': 42}]
expected_networks = ('network_id1',)
with mock.patch(
'oslo_utils.timeutils.utcnow_ts', return_value=0):
mock_get_ports = self.handler.plugin_rpc.get_ports
mock_get_ports.return_value = ports
networks = self.handler._get_router_networks(router_id)
mock_get_ports.assert_called_once_with(
mock.ANY,
{'device_id': [router_id],
'device_owner': n_const.ROUTER_INTERFACE_OWNERS})
self.assertEqual(expected_networks, networks)
networks = self.handler._get_router_networks(router_id)
def test_get_router_networks_twice(self):
self._test_get_router_networks_twice_helper()
self.assertEqual(
1, self.handler.plugin_rpc.get_ports.call_count)
def _get_ports_for_remote_address_cache_hit_helper(self):
remote_address = 'remote_address'
networks = ('net1', 'net2')
mock_get_ports = self.handler.plugin_rpc.get_ports
mock_get_ports.return_value = [{'network_id': 'net1', 'something': 42}]
self.handler._get_ports_for_remote_address(remote_address, networks)
mock_get_ports.assert_called_once_with(
mock.ANY,
{'network_id': networks,
'fixed_ips': {'ip_address': [remote_address]}}
)
self.assertEqual(1, mock_get_ports.call_count)
self.handler._get_ports_for_remote_address(remote_address,
networks)
def test_get_ports_for_remote_address_cache_hit(self):
self._get_ports_for_remote_address_cache_hit_helper()
self.assertEqual(
1, self.handler.plugin_rpc.get_ports.call_count)
def test_get_ports_network_id(self):
network_id = 'network-id'
router_id = 'router-id'
remote_address = 'remote-address'
expected = ['port1']
networks = (network_id,)
with mock.patch.object(self.handler,
'_get_ports_for_remote_address'
) as mock_get_ip_addr,\
mock.patch.object(self.handler,
'_get_router_networks'
) as mock_get_router_networks:
mock_get_ip_addr.return_value = expected
ports = self.handler._get_ports(remote_address, network_id,
router_id)
mock_get_ip_addr.assert_called_once_with(remote_address,
networks)
self.assertFalse(mock_get_router_networks.called)
self.assertEqual(expected, ports)
def test_get_ports_router_id(self):
router_id = 'router-id'
remote_address = 'remote-address'
expected = ['port1']
networks = ('network1', 'network2')
with mock.patch.object(self.handler,
'_get_ports_for_remote_address',
return_value=expected
) as mock_get_ip_addr,\
mock.patch.object(self.handler,
'_get_router_networks',
return_value=networks
) as mock_get_router_networks:
ports = self.handler._get_ports(remote_address,
router_id=router_id)
mock_get_router_networks.called_once_with(router_id)
mock_get_ip_addr.assert_called_once_with(remote_address, networks)
self.assertEqual(expected, ports)
def test_get_ports_no_id(self):
self.assertRaises(TypeError, self.handler._get_ports, 'remote_address')
def _get_instance_and_tenant_id_helper(self, headers, list_ports_retval,
networks=None, router_id=None):
remote_address = '192.168.1.1'
headers['X-Forwarded-For'] = remote_address
req = mock.Mock(headers=headers)
def mock_get_ports(*args, **kwargs):
return list_ports_retval.pop(0)
self.handler.plugin_rpc.get_ports.side_effect = mock_get_ports
instance_id, tenant_id = self.handler._get_instance_and_tenant_id(req)
expected = []
if router_id:
expected.append(
mock.call(
mock.ANY,
{'device_id': [router_id],
'device_owner': n_const.ROUTER_INTERFACE_OWNERS}
)
)
expected.append(
mock.call(
mock.ANY,
{'network_id': networks,
'fixed_ips': {'ip_address': ['192.168.1.1']}}
)
)
self.handler.plugin_rpc.get_ports.assert_has_calls(expected)
return (instance_id, tenant_id)
def test_get_instance_id_router_id(self):
router_id = 'the_id'
headers = {
'X-Neutron-Router-ID': router_id
}
networks = ('net1', 'net2')
ports = [
[{'network_id': 'net1'}, {'network_id': 'net2'}],
[{'device_id': 'device_id', 'tenant_id': 'tenant_id',
'network_id': 'net1'}]
]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=networks,
router_id=router_id),
('device_id', 'tenant_id')
)
def test_get_instance_id_router_id_no_match(self):
router_id = 'the_id'
headers = {
'X-Neutron-Router-ID': router_id
}
networks = ('net1', 'net2')
ports = [
[{'network_id': 'net1'}, {'network_id': 'net2'}],
[]
]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=networks,
router_id=router_id),
(None, None)
)
def test_get_instance_id_network_id(self):
network_id = 'the_id'
headers = {
'X-Neutron-Network-ID': network_id
}
ports = [
[{'device_id': 'device_id',
'tenant_id': 'tenant_id',
'network_id': 'the_id'}]
]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=('the_id',)),
('device_id', 'tenant_id')
)
def test_get_instance_id_network_id_no_match(self):
network_id = 'the_id'
headers = {
'X-Neutron-Network-ID': network_id
}
ports = [[]]
self.assertEqual(
self._get_instance_and_tenant_id_helper(headers, ports,
networks=('the_id',)),
(None, None)
)
def _proxy_request_test_helper(self, response_code=200, method='GET'):
hdrs = {'X-Forwarded-For': '8.8.8.8'}
body = 'body'
req = mock.Mock(path_info='/the_path', query_string='', headers=hdrs,
method=method, body=body)
resp = mock.MagicMock(status=response_code)
req.response = resp
with mock.patch.object(self.handler, '_sign_instance_id') as sign:
sign.return_value = 'signed'
with mock.patch('httplib2.Http') as mock_http:
resp.__getitem__.return_value = "text/plain"
mock_http.return_value.request.return_value = (resp, 'content')
retval = self.handler._proxy_request('the_id', 'tenant_id',
req)
mock_http.assert_called_once_with(
ca_certs=None, disable_ssl_certificate_validation=True)
mock_http.assert_has_calls([
mock.call().add_certificate(
FakeConf.nova_client_priv_key,
FakeConf.nova_client_cert,
"%s:%s" % (FakeConf.nova_metadata_ip,
FakeConf.nova_metadata_port)
),
mock.call().request(
'http://9.9.9.9:8775/the_path',
method=method,
headers={
'X-Forwarded-For': '8.8.8.8',
'X-Instance-ID-Signature': 'signed',
'X-Instance-ID': 'the_id',
'X-Tenant-ID': 'tenant_id'
},
body=body
)]
)
return retval
def test_proxy_request_post(self):
response = self._proxy_request_test_helper(method='POST')
self.assertEqual(response.content_type, "text/plain")
self.assertEqual(response.body, 'content')
def test_proxy_request_200(self):
response = self._proxy_request_test_helper(200)
self.assertEqual(response.content_type, "text/plain")
self.assertEqual(response.body, 'content')
def test_proxy_request_400(self):
self.assertIsInstance(self._proxy_request_test_helper(400),
webob.exc.HTTPBadRequest)
def test_proxy_request_403(self):
self.assertIsInstance(self._proxy_request_test_helper(403),
webob.exc.HTTPForbidden)
def test_proxy_request_404(self):
self.assertIsInstance(self._proxy_request_test_helper(404),
webob.exc.HTTPNotFound)
def test_proxy_request_409(self):
self.assertIsInstance(self._proxy_request_test_helper(409),
webob.exc.HTTPConflict)
def test_proxy_request_500(self):
self.assertIsInstance(self._proxy_request_test_helper(500),
webob.exc.HTTPInternalServerError)
def test_proxy_request_other_code(self):
with testtools.ExpectedException(Exception):
self._proxy_request_test_helper(302)
def test_sign_instance_id(self):
self.assertEqual(
self.handler._sign_instance_id('foo'),
'773ba44693c7553d6ee20f61ea5d2757a9a4f4a44d2841ae4e95b52e4cd62db4'
)
class TestMetadataProxyHandlerNoCache(TestMetadataProxyHandlerCache):
fake_conf = FakeConf
def test_get_router_networks_twice(self):
self._test_get_router_networks_twice_helper()
self.assertEqual(
2, self.handler.plugin_rpc.get_ports.call_count)
def test_get_ports_for_remote_address_cache_hit(self):
self._get_ports_for_remote_address_cache_hit_helper()
self.assertEqual(
2, self.handler.plugin_rpc.get_ports.call_count)
class TestUnixDomainMetadataProxy(base.BaseTestCase):
def setUp(self):
super(TestUnixDomainMetadataProxy, self).setUp()
self.cfg_p = mock.patch.object(agent, 'cfg')
self.cfg = self.cfg_p.start()
looping_call_p = mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall')
self.looping_mock = looping_call_p.start()
self.cfg.CONF.metadata_proxy_socket = '/the/path'
self.cfg.CONF.metadata_workers = 0
self.cfg.CONF.metadata_backlog = 128
self.cfg.CONF.metadata_proxy_socket_mode = config.USER_MODE
@mock.patch.object(utils, 'ensure_dir')
def test_init_doesnot_exists(self, ensure_dir):
agent.UnixDomainMetadataProxy(mock.Mock())
ensure_dir.assert_called_once_with('/the')
def test_init_exists(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
isdir.return_value = True
agent.UnixDomainMetadataProxy(mock.Mock())
unlink.assert_called_once_with('/the/path')
def test_init_exists_unlink_no_file(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
with mock.patch('os.path.exists') as exists:
isdir.return_value = True
exists.return_value = False
unlink.side_effect = OSError
agent.UnixDomainMetadataProxy(mock.Mock())
unlink.assert_called_once_with('/the/path')
def test_init_exists_unlink_fails_file_still_exists(self):
with mock.patch('os.path.isdir') as isdir:
with mock.patch('os.unlink') as unlink:
with mock.patch('os.path.exists') as exists:
isdir.return_value = True
exists.return_value = True
unlink.side_effect = OSError
with testtools.ExpectedException(OSError):
agent.UnixDomainMetadataProxy(mock.Mock())
unlink.assert_called_once_with('/the/path')
@mock.patch.object(agent, 'MetadataProxyHandler')
@mock.patch.object(agent_utils, 'UnixDomainWSGIServer')
@mock.patch.object(utils, 'ensure_dir')
def test_run(self, ensure_dir, server, handler):
p = agent.UnixDomainMetadataProxy(self.cfg.CONF)
p.run()
ensure_dir.assert_called_once_with('/the')
server.assert_has_calls([
mock.call('neutron-metadata-agent'),
mock.call().start(handler.return_value,
'/the/path', workers=0,
backlog=128, mode=0o644),
mock.call().wait()]
)
def test_main(self):
with mock.patch.object(agent, 'UnixDomainMetadataProxy') as proxy:
with mock.patch.object(metadata_agent, 'config') as config:
with mock.patch.object(metadata_agent, 'cfg') as cfg:
with mock.patch.object(utils, 'cfg'):
metadata_agent.main()
self.assertTrue(config.setup_logging.called)
proxy.assert_has_calls([
mock.call(cfg.CONF),
mock.call().run()]
)
def test_init_state_reporting(self):
with mock.patch('os.makedirs'):
proxy = agent.UnixDomainMetadataProxy(mock.Mock())
self.looping_mock.assert_called_once_with(proxy._report_state)
self.looping_mock.return_value.start.assert_called_once_with(
interval=mock.ANY)
def test_report_state(self):
with mock.patch('neutron.agent.rpc.PluginReportStateAPI') as state_api:
with mock.patch('os.makedirs'):
proxy = agent.UnixDomainMetadataProxy(mock.Mock())
self.assertTrue(proxy.agent_state['start_flag'])
proxy._report_state()
self.assertNotIn('start_flag', proxy.agent_state)
state_api_inst = state_api.return_value
state_api_inst.report_state.assert_called_once_with(
proxy.context, proxy.agent_state, use_call=True)
|
yanheven/neutron
|
neutron/tests/unit/agent/metadata/test_agent.py
|
Python
|
apache-2.0
| 20,431
|
import time
import threading
import serial
from actuators.interface import SerialActuator
# TODO : plutôt que de d'envoyer et attendre une réponse, faire en sorte que
# l'arduino envoie en continue sont état, le stocker et comme ça juste lire la
# variable stockée quand on veut la valeur
# => C'est ici qu'il faut le thread différent !
class Motor(SerialActuator):
pwm = 0
rotation = 0 # 0: ANTICLOCKWISE, 1: CLOCKWISE
pos_digit_number = 6 # number of digits in the pos order
_read_thread = None
_prev_speed_measure = (0, 0)
_state = {
'pos' : 0,
'speed': 0
}
def __init__(self, serial_console, pos_digit_number=6, debug=False):
"""
params:
- serial_console: the serial.Serial object used to communicate
- pos_digit_number : number of digits (not including the minus sign) in the position value
- debug: if true, the object becomes verbose
"""
self._pwm = 0
self._rotation = 0
self._prev_speed_measure = (0, time.clock())
self.pos_digit_number = pos_digit_number
# Initiate a separate thread to read value from serial console
#self._read_thread = threading.Thread(target=self._update_motor_state, daemon=True)
super().__init__(serial_console, debug)
#self._read_thread.start()
def _update_motor_state(self):
"""
Reads serial input and store values in the state dict
"""
success = False
self.exec(b'7') # Code to get arduino info
value = self.get_value()
if self._debug:
print('[SERIAL MOTOR] VALUE ', value)
if value.startswith('[') and value.endswith(']'):
values = value.replace('[', '').replace(']', '').split()
if len(values) == 2:
pos, speed = values
if pos.replace('.', '').replace('-', '').isdigit() \
and speed.replace('.', '').replace('-', '').isdigit():
self._state['pos'] = int(float(pos))
self._state['speed'] = float(speed)
success = True
if self._debug:
print('[SERIAL MOTOR] STATE UPDATE : ', self._state)
if self._debug and not success:
print('[SERIAL MOTOR] COULD NOT UPDATE STATE')
@property
def speed(self):
"""
Computes and returns the current speed of the motor in rad/s
"""
self._update_motor_state()
return self._state['speed']
@speed.setter
def speed(self, value):
"""
Sets the speed asked if in the range the maxspeed value
of the motor or set it to max
params:
- value : speed in pwm [-255, 255]
"""
if not type(value) in [int, float]:
raise ValueError('The speed must be an int or a float, not a ' + str(type(value)))
pwm = min(abs(value), 255)
# Fill with zeros so that the number of characters is always 3
pwm = '0' * (3 - len(str(pwm))) + str(pwm)
rotation = (0, 1)[value >= 0]
order = bytes('9' + str(rotation) + pwm, 'ascii')
if self._debug:
print('[SERIAL MOTOR] SENT ORDER : ', order)
result = self.exec(order)
@property
def position(self):
self._update_motor_state()
return self._state['pos']
@position.setter
def position(self, value):
"""
Send a position order to the serial port
"""
if type(value) != int:
raise TypeError('Position must be an int')
nb_digits = len(str(abs(value)))
if nb_digits > self.pos_digit_number:
raise ValueError('Position value too big: ' + str(nb_digits) + ' greater than ' + int(self.pos_digit_number))
order = '8'
order += ('+', '-')[value < 0]
order += '0'*(self.pos_digit_number - nb_digits)
order += str(abs(value))
order = bytes(order, 'ascii')
if self._debug:
print('[SERIAL MOTOR] SENT ORDER : ', order)
self.exec(order)
# arduino_console = serial.Serial('COM3', 9600, timeout=1, write_timeout=2)
# belt_motor = Motor(arduino_console)
# # avoids some bugs
# time.sleep(0.5)
# belt_motor.speed = 200
# time.sleep(1)
# belt_motor.speed = 0
|
ingegus/tipe-corbeillator
|
actuators/motor.py
|
Python
|
mit
| 3,785
|
# -*- coding: utf-8 -*-
import logging
import datetime
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
import pytz
import sys
logger = logging.getLogger(__name__)
class QueueManager(models.Manager):
timezone = pytz.timezone('Europe/Berlin')
def enqueue(self, *instances, **kwargs ):
"""
Put the given instances into the queue.
If an object is already in the queue, delete the old one and put a new
object into the queue.
"""
deleted = kwargs.pop('deleted', False)
due = kwargs.pop('due', datetime.datetime.now(self.timezone))
function = kwargs.pop('function')
instance_by_ct = {}
# Group instances by content type.
for instance in instances:
cls = instance.__class__
content_type = ContentType.objects.get_for_model(cls)
if content_type not in instance_by_ct:
instance_by_ct[content_type] = []
instance_by_ct[content_type].append(instance)
for content_type, instances in instance_by_ct.iteritems():
# Let's see what objects are already enqueued.
instance_pks = [instance.pk for instance in instances]
existing_queue = self.filter(
content_type=content_type,
object_id__in=instance_pks)
# Delete the ones that are already in the queue. Therefore we
# prevent indexing a object that got updated multiple times
# recently to be indexed more than once.
existing_queue.delete()
# We delete the old ones instead of not creating the new one for
# one particular reason:
# It can happen that an object is already beeing processed just
# now. If we now skip the creation, the new version won't get
# indexed since the ``process_search_queue`` command only took care
# of the OLD version.
to_be_created = [
self.model(
content_object=instance,
deleted=deleted,
due=due,
function=function
)
for instance in instances]
self.bulk_create(to_be_created)
def process(self, queryset=None, max_execution_time=datetime.timedelta(seconds=300)):
current_time = datetime.datetime.now(self.timezone)
if queryset is None:
queryset = self.filter(
due__lte=current_time,
executed__isnull=True,
).order_by('due')
for instance in queryset:
instance.process()
if max_execution_time is not None and \
datetime.datetime.now(self.timezone) >= current_time + max_execution_time:
break
class Queue(models.Model):
"""
Save your objects and the required function into this model and
let the queue call it at at the required due date
"""
content_type = models.ForeignKey(ContentType)
object_id = models.IntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
function = models.CharField(max_length=255)
deleted = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
due = models.DateTimeField()
executed = models.DateTimeField(null=True, blank=True)
objects = QueueManager()
def process(self):
model = self.content_type.model_class()
if not self.deleted and self.content_object is not None:
obj = self.content_object
else:
obj = model(pk=self.object_id)
try:
getattr(obj, self.function)()
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.error(
u'(Queue #%d) Error processing (%s / pk=%d): %s (in %s / Line: %s)' % (
self.pk,
obj.__class__.__name__,
obj.pk,
e,
exc_traceback.tb_frame.f_code.co_filename,
exc_traceback.tb_lineno
)
)
finally:
self.executed = datetime.datetime.now(pytz.timezone('Europe/Berlin'))
self.save()
|
unicornfox/django_queue
|
queue/models.py
|
Python
|
mit
| 4,418
|
'''
Created on Jun 6, 2014
@author: rtermondt
'''
from django.conf import settings
def global_settings(request):
invitation_system_setting = getattr(settings, 'INVITATION_SYSTEM', None)
if invitation_system_setting == True:
invite_system = True
else:
invite_system = False
return {
'INVITATION_SYSTEM': invite_system
}
|
richtermondt/inithub-web
|
inithub/inithub/context_processors.py
|
Python
|
mit
| 376
|
import cgi
import os
import logging
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from google.appengine.api import urlfetch
import gmemsess
import ckeynsecret
from myspace.config.MySpaceError import MySpaceError
from myspace.myspaceapi import MySpace
from oauthlib import oauth
import urllib
"""
webapp handlers
1. / Index page, not logged in
- Intro text plus link to auth with MySpace
2. /StartAuth Auth with MySpace
- Creates an unauthed token, stashes in session, redirects user to MySpace
3. /OauthCallback Callback URL
- Checks user's unauthed token session matches, if not shows error, otherwise
exchanges for access token, stashes that in the session and redirects the
user to /displayprofile page
4. /DisplayProfile
- Looks up the user's profile and friends using their access token
"""
class IndexPage(webapp.RequestHandler):
def get(self):
session=gmemsess.Session(self)
if 'access_token' in session:
self.redirect('/displayprofile')
else:
path = os.path.join(os.path.dirname(__file__), 'templates/index.html')
self.response.out.write(template.render(path, {}))
class StartAuth(webapp.RequestHandler):
def get(self):
session=gmemsess.Session(self)
callback_url = self.request.host_url + '/callback'
ms = MySpace(ckeynsecret.CONSUMER_KEY, ckeynsecret.CONSUMER_SECRET)
request_token = ms.get_request_token()
auth_url = ms.get_authorization_url(request_token, callback_url)
session['unauthed_token'] = request_token.to_string()
session.save()
self.redirect(auth_url)
class OauthCallback(webapp.RequestHandler):
def get(self):
session=gmemsess.Session(self)
unauthed_token = session['unauthed_token'] if 'unauthed_token' in session else None
if not unauthed_token:
self.response.out.write("No un-authed token found in session")
return
token = oauth.OAuthToken.from_string(unauthed_token)
if token.key != urllib.unquote( self.request.get('oauth_token', 'no-token') ):
self.response.out.write("Something went wrong! Tokens do not match")
return
ms = MySpace(ckeynsecret.CONSUMER_KEY, ckeynsecret.CONSUMER_SECRET)
access_token = ms.get_access_token(token)
session['access_token'] = access_token.to_string()
session.save()
self.redirect('/displayprofile')
class DisplayProfile(webapp.RequestHandler):
def get(self):
session=gmemsess.Session(self)
str_access_token = session['access_token'] if 'access_token' in session else None
if not str_access_token:
self.response.out.write("You need an access token in the session!")
return
access_token = oauth.OAuthToken.from_string(str_access_token)
ms = MySpace(ckeynsecret.CONSUMER_KEY, ckeynsecret.CONSUMER_SECRET, access_token.key, access_token.secret)
user_id = ms.get_userid()
profile_data = ms.get_person("@me",fields="@all")
friends_data = ms.get_friends(person_id="@me")
albums_data = ms.get_albums(person_id="@me")
template_values = {
'profile_data': profile_data,
'friends_data': friends_data,
'albums_data': albums_data,
}
path = os.path.join(os.path.dirname(__file__), 'templates/profile.html')
self.response.out.write(template.render(path, template_values))
session.invalidate()
application = webapp.WSGIApplication(
[('/', IndexPage),
('/startauth', StartAuth),
('/callback', OauthCallback),
('/displayprofile', DisplayProfile)],
debug=True)
def main():
#logging.getLogger().setLevel(logging.DEBUG)
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
dgouldin/myspaceid-python-sdk
|
samples/google-app-engine/oauth/consumer.py
|
Python
|
apache-2.0
| 4,219
|
import re
from thefuck.utils import sudo_support
@sudo_support
def match(command, settings):
return (command.script.startswith('cd ')
and ('no such file or directory' in command.stderr.lower()
or 'cd: can\'t cd to' in command.stderr.lower()))
@sudo_support
def get_new_command(command, settings):
return re.sub(r'^cd (.*)', 'mkdir -p \\1 && cd \\1', command.script)
|
JianfengYao/thefuck
|
thefuck/rules/cd_mkdir.py
|
Python
|
mit
| 398
|
from setuptools import setup
setup(
name='dhtapi',
packages=['dhtapi'],
include_package_data=True,
install_requires=[
'flask',
],
)
|
BenSimonds/DHTSite
|
api/setup.py
|
Python
|
mit
| 160
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from system_test import TestCase, Qdrouterd, main_module, TIMEOUT, Logger, TestTimeout
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container
# How many worker threads?
W_THREADS = 2
# Define oversize denial condition
OVERSIZE_CONDITION_NAME = "amqp:connection:forced"
OVERSIZE_CONDITION_DESC = "Message size exceeded"
#
# DISPATCH-975 Detect that an oversize message is blocked.
# These tests check basic blocking where the the sender is blocked by
# the ingress routers. It does not check compound blocking where
# oversize is allowed or denied by an ingress edge router but also
# denied by the uplink interior router.
class OversizeMessageTransferTest(MessagingHandler):
"""
This test connects a sender and a receiver. Then it tries to send _count_
number of messages of the given size through the router or router network.
With expect_block=True the ingress router should detect the sender's oversize
message and close the sender connection. The receiver may receive
aborted message indications but that is not guaranteed. If any aborted
messages are received then the count must be at most one.
The test is a success when the sender receives a connection error with
oversize indication and the receiver has not received too many aborts.
With expect_block=False sender messages should be received normally.
The test is a success when n_accepted == count.
"""
def __init__(self, sender_host, receiver_host, test_address,
message_size=100000, count=10, expect_block=True, print_to_console=False):
super(OversizeMessageTransferTest, self).__init__()
self.sender_host = sender_host
self.receiver_host = receiver_host
self.test_address = test_address
self.msg_size = message_size
self.count = count
self.expect_block = expect_block
self.sender_conn = None
self.receiver_conn = None
self.error = None
self.sender = None
self.receiver = None
self.proxy = None
self.n_sent = 0
self.n_rcvd = 0
self.n_accepted = 0
self.n_rejected = 0
self.n_aborted = 0
self.n_connection_error = 0
self.shut_down = False
self.logger = Logger(title=("OversizeMessageTransferTest - %s" % (self.test_address)), print_to_console=print_to_console)
self.log_unhandled = False
def timeout(self):
self.error = "Timeout Expired: n_sent=%d n_rcvd=%d n_rejected=%d n_aborted=%d" % \
(self.n_sent, self.n_rcvd, self.n_rejected, self.n_aborted)
self.logger.log("self.timeout " + self.error)
self._shut_down_test()
def on_start(self, event):
self.logger.log("on_start")
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.logger.log("on_start: opening receiver connection to %s" % (self.receiver_host.addresses[0]))
self.receiver_conn = event.container.connect(self.receiver_host.addresses[0])
self.logger.log("on_start: opening sender connection to %s" % (self.sender_host.addresses[0]))
self.sender_conn = event.container.connect(self.sender_host.addresses[0])
self.logger.log("on_start: Creating receiver")
self.receiver = event.container.create_receiver(self.receiver_conn, self.test_address)
self.logger.log("on_start: Creating sender")
self.sender = event.container.create_sender(self.sender_conn, self.test_address)
self.logger.log("on_start: done")
def send(self):
while self.sender.credit > 0 and self.n_sent < self.count:
# construct message in indentifiable chunks
body_msg = ""
padchar = "abcdefghijklmnopqrstuvwxyz@#$%"[self.n_sent % 30]
while len(body_msg) < self.msg_size:
chunk = "[%s:%d:%d" % (self.test_address, self.n_sent, len(body_msg))
padlen = 50 - len(chunk)
chunk += padchar * padlen
body_msg += chunk
if len(body_msg) > self.msg_size:
body_msg = body_msg[:self.msg_size]
self.logger.log("send. address:%s message:%d of %s length=%d" %
(self.test_address, self.n_sent, self.count, self.msg_size))
m = Message(body=body_msg)
self.sender.send(m)
self.n_sent += 1
def on_sendable(self, event):
if event.sender == self.sender:
self.logger.log("on_sendable")
self.send()
def on_message(self, event):
if self.expect_block:
# All messages should violate maxMessageSize.
# Receiving any is an error.
self.error = "Received a message. Expected to receive no messages."
self.logger.log(self.error)
self._shut_down_test()
else:
self.n_rcvd += 1
self.accept(event.delivery)
self._check_done()
def on_connection_remote_close(self, event):
if self.shut_down:
return
if event.connection == self.sender_conn:
if event.connection.remote_condition is not None:
if event.connection.remote_condition.name == OVERSIZE_CONDITION_NAME and \
event.connection.remote_condition.description == OVERSIZE_CONDITION_DESC:
self.logger.log("on_connection_remote_close: sender closed with correct condition")
self.n_connection_error += 1
self.sender_conn.close()
self.sender_conn = None
else:
# sender closed but for wrong reason
self.error = "sender close error: Expected name: %s, description: %s, but received name: %s, description: %s" % (
OVERSIZE_CONDITION_NAME, OVERSIZE_CONDITION_DESC,
event.connection.remote_condition.name, event.connection.remote_condition.description)
self.logger.log(self.error)
else:
self.error = "sender close error: Expected a remote_condition but there was none."
self.logger.log(self.error)
else:
# connection error but not for sender
self.error = "unexpected connection close error: wrong connection closed."
self.logger.log(self.error)
self._check_done()
def _shut_down_test(self):
self.shut_down = True
if self.timer:
self.timer.cancel()
self.timer = None
if self.sender:
self.sender.close()
self.sender = None
if self.receiver:
self.receiver.close()
self.receiver = None
if self.sender_conn:
self.sender_conn.close()
self.sender_conn = None
if self.receiver_conn:
self.receiver_conn.close()
self.receiver_conn = None
def _check_done(self):
current = ("check_done: sent=%d rcvd=%d rejected=%d aborted=%d connection_error:%d" %
(self.n_sent, self.n_rcvd, self.n_rejected, self.n_aborted, self.n_connection_error))
self.logger.log(current)
if self.error is not None:
self.logger.log("TEST FAIL")
self._shut_down_test()
else:
done = (self.n_connection_error == 1) \
if self.expect_block else \
(self.n_sent == self.count and self.n_rcvd == self.count)
if done:
self.logger.log("TEST DONE!!!")
# self.log_unhandled = True # verbose debugging
self._shut_down_test()
def on_rejected(self, event):
self.n_rejected += 1
if self.expect_block:
self.logger.log("on_rejected: entry")
self._check_done()
else:
self.error = "Unexpected on_reject"
self.logger.log(self.error)
self._check_done()
def on_aborted(self, event):
self.logger.log("on_aborted")
self.n_aborted += 1
self._check_done()
def on_error(self, event):
self.error = "Container error"
self.logger.log(self.error)
self._shut_down_test()
def on_unhandled(self, method, *args):
if self.log_unhandled:
self.logger.log("on_unhandled: method: %s, args: %s" % (method, args))
def run(self):
try:
Container(self).run()
except Exception as e:
self.error = "Container run exception: %s" % (e)
self.logger.log(self.error)
self.logger.dump()
# For the next test case define max sizes for each router.
# These are the configured maxMessageSize values
EA1_MAX_SIZE = 50000
INTA_MAX_SIZE = 100000
INTB_MAX_SIZE = 150000
EB1_MAX_SIZE = 200000
# DISPATCH-1645 S32 max size is chosen to expose signed 32-bit
# wraparound bug. Sizes with bit 31 set look negative when used as
# C 'int' and prevent any message from passing policy checks.
S32_MAX_SIZE = 2**31
# Interior routers enforce max size directly.
# Edge routers are also checked by the attached interior router.
# Block tests that use edge routers that send messages to the network must
# account for the fact that the attached interior router will apply
# another max size. These tests do not check against EB1 max for the
# sender if the receiver is on EA1, INTA, or INTB since INTB's max
# would kick an and cause a false positive.
# Tests that check for allowing near-max sizes use the minimum of
# the edge router's max and the attached interior router's max.
# The bytes-over and bytes-under max that should trigger allow or deny.
# Messages with content this much over should be blocked while
# messages with content this much under should be allowed.
# * client overhead is typically 16 bytes or so
# * interrouter overhead is much larger with annotations
OVER_UNDER = 200
class MaxMessageSizeBlockOversize(TestCase):
"""
verify that maxMessageSize blocks oversize messages
"""
@classmethod
def setUpClass(cls):
"""Start the router"""
super(MaxMessageSizeBlockOversize, cls).setUpClass()
def router(name, mode, max_size, extra):
config = [
('router', {'mode': mode,
'id': name,
'allowUnsettledMulticast': 'yes',
'workerThreads': W_THREADS}),
('listener', {'role': 'normal',
'port': cls.tester.get_port()}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
('policy', {'maxConnections': 100, 'enableVhostPolicy': 'true', 'maxMessageSize': max_size, 'defaultVhost': '$default'}),
('vhost', {'hostname': '$default',
'allowUnknownUser': 'true',
'groups': {
'$default': {
'users': '*',
'maxConnections': 100,
'remoteHosts': '*',
'sources': '*',
'targets': '*',
'allowAnonymousSender': 'true',
'allowWaypointLinks': 'true',
'allowDynamicSource': 'true'
}
}
})
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
return cls.routers[-1]
# configuration:
# two edge routers connected via 2 interior routers with max sizes
#
# +-------+ +---------+ +---------+ +-------+
# | EA1 |<==>| INT.A |<==>| INT.B |<==>| EB1 |
# | 50,000| | 100,000 | | 150,000 | |200,000|
# +-------+ +---------+ +---------+ +-------+
#
# Note:
# * Messages whose senders connect to INT.A or INT.B are subject to max message size
# defined for the ingress router only.
# * Message whose senders connect to EA1 or EA2 are subject to max message size
# defined for the ingress router. If the message is forwarded through the
# connected interior router then the message is subject to another max message size
# defined by the interior router.
cls.routers = []
interrouter_port = cls.tester.get_port()
cls.INTA_edge_port = cls.tester.get_port()
cls.INTB_edge_port = cls.tester.get_port()
router('INT.A', 'interior', INTA_MAX_SIZE,
[('listener', {'role': 'inter-router',
'port': interrouter_port}),
('listener', {'role': 'edge', 'port': cls.INTA_edge_port})])
cls.INT_A = cls.routers[0]
cls.INT_A.listener = cls.INT_A.addresses[0]
router('INT.B', 'interior', INTB_MAX_SIZE,
[('connector', {'name': 'connectorToA',
'role': 'inter-router',
'port': interrouter_port}),
('listener', {'role': 'edge',
'port': cls.INTB_edge_port})])
cls.INT_B = cls.routers[1]
cls.INT_B.listener = cls.INT_B.addresses[0]
router('EA1', 'edge', EA1_MAX_SIZE,
[('listener', {'name': 'rc', 'role': 'route-container',
'port': cls.tester.get_port()}),
('connector', {'name': 'uplink', 'role': 'edge',
'port': cls.INTA_edge_port})])
cls.EA1 = cls.routers[2]
cls.EA1.listener = cls.EA1.addresses[0]
router('EB1', 'edge', EB1_MAX_SIZE,
[('connector', {'name': 'uplink',
'role': 'edge',
'port': cls.INTB_edge_port,
'maxFrameSize': 1024}),
('listener', {'name': 'rc', 'role': 'route-container',
'port': cls.tester.get_port()})])
cls.EB1 = cls.routers[3]
cls.EB1.listener = cls.EB1.addresses[0]
router('S32', 'standalone', S32_MAX_SIZE, [])
cls.S32 = cls.routers[4]
cls.S32.listener = cls.S32.addresses[0]
cls.INT_A.wait_router_connected('INT.B')
cls.INT_B.wait_router_connected('INT.A')
cls.EA1.wait_connectors()
cls.EB1.wait_connectors()
def test_40_block_oversize_INTA_INTA(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.INT_A,
"e40",
message_size=INTA_MAX_SIZE + OVER_UNDER,
expect_block=True,
print_to_console=False)
test.run()
if test.error is not None:
test.logger.log("test_40 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_41_block_oversize_INTA_INTB(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.INT_B,
"e41",
message_size=INTA_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_41 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_42_block_oversize_INTA_EA1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.EA1,
"e42",
message_size=INTA_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_42 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_43_block_oversize_INTA_EB1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.EB1,
"e43",
message_size=INTA_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_43 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_44_block_oversize_INTB_INTA(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.INT_A,
"e44",
message_size=INTB_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_44 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_45_block_oversize_INTB_INTB(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.INT_B,
"e45",
message_size=INTB_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_45 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_46_block_oversize_INTB_EA1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.EA1,
"e46",
message_size=INTB_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_46 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_47_block_oversize_INTB_EB1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.EB1,
"e47",
message_size=INTB_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_47 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_48_block_oversize_EA1_INTA(self):
if EA1_MAX_SIZE >= INTA_MAX_SIZE:
self.skipTest("EA1 sending to INT.A may be blocked by EA1 limit and also by INT.A limit. That condition is tested in compound test.")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.INT_A,
"e48",
message_size=EA1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_48 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_49_block_oversize_EA1_INTB(self):
if EA1_MAX_SIZE >= INTA_MAX_SIZE:
self.skipTest("EA1 sending to INT.B may be blocked by EA1 limit and also by INT.A limit. That condition is tested in compound test.")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.INT_B,
"e49",
message_size=EA1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_49 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_4a_block_oversize_EA1_EA1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.EA1,
"e4a",
message_size=EA1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_4a test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_4b_block_oversize_EA1_EB1(self):
if EA1_MAX_SIZE >= INTA_MAX_SIZE:
self.skipTest("EA1 sending to EB1 may be blocked by EA1 limit and also by INT.A limit. That condition is tested in compound test.")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.EB1,
"e4b",
message_size=EA1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_4b test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_4c_block_oversize_EB1_INTA(self):
if EB1_MAX_SIZE > INTB_MAX_SIZE:
self.skipTest("EB1 sending to INT.A may be blocked by EB1 limit and also by INT.B limit. That condition is tested in compound test.")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.INT_A,
"e4c",
message_size=EB1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_4c test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_4d_block_oversize_EB1_INTB(self):
if EB1_MAX_SIZE > INTB_MAX_SIZE:
self.skipTest("EB1 sending to INT.B may be blocked by EB1 limit and also by INT.B limit. That condition is tested in compound test.")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.INT_B,
"e4d",
message_size=EB1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_4d test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_4e_block_oversize_EB1_EA1(self):
if EB1_MAX_SIZE > INTB_MAX_SIZE:
self.skipTest("EB1 sending to EA1 may be blocked by EB1 limit and also by INT.B limit. That condition is tested in compound test.")
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.EA1,
"e4e",
message_size=EB1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_4e test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_4f_block_oversize_EB1_EB1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.EB1,
"e4f",
message_size=EB1_MAX_SIZE + OVER_UNDER,
expect_block=True)
test.run()
if test.error is not None:
test.logger.log("test_4f test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
#
# tests under maxMessageSize should not block
#
def test_50_allow_undersize_INTA_INTA(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.INT_A,
"e50",
message_size=INTA_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_50 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_51_allow_undersize_INTA_INTB(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.INT_B,
"e51",
message_size=INTA_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_51 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_52_allow_undersize_INTA_EA1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.EA1,
"e52",
message_size=INTA_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_52 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_53_allow_undersize_INTA_EB1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_A,
MaxMessageSizeBlockOversize.EB1,
"e53",
message_size=INTA_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_53 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_54_allow_undersize_INTB_INTA(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.INT_A,
"e54",
message_size=INTB_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_54 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_55_allow_undersize_INTB_INTB(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.INT_B,
"e55",
message_size=INTB_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_55 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_56_allow_undersize_INTB_EA1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.EA1,
"e56",
message_size=INTB_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_56 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_57_allow_undersize_INTB_EB1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.INT_B,
MaxMessageSizeBlockOversize.EB1,
"e57",
message_size=INTB_MAX_SIZE - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_57 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_58_allow_undersize_EA1_INTA(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.INT_A,
"e58",
message_size=min(EA1_MAX_SIZE, INTA_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_58 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_59_allow_undersize_EA1_INTB(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.INT_B,
"e59",
message_size=min(EA1_MAX_SIZE, INTA_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_59 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_5a_allow_undersize_EA1_EA1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.EA1,
"e5a",
message_size=min(EA1_MAX_SIZE, INTA_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_5a test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_5b_allow_undersize_EA1_EB1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EA1,
MaxMessageSizeBlockOversize.EB1,
"e5b",
message_size=min(EA1_MAX_SIZE, INTA_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_5b test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_5c_allow_undersize_EB1_INTA(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.INT_A,
"e5c",
message_size=min(EB1_MAX_SIZE, INTB_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_5c test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_5d_allow_undersize_EB1_INTB(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.INT_B,
"e5d",
message_size=min(EB1_MAX_SIZE, INTB_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_5d test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_5e_allow_undersize_EB1_EA1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.EA1,
"e5e",
message_size=min(EB1_MAX_SIZE, INTB_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_5e test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_5f_allow_undersize_EB1_EB1(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.EB1,
MaxMessageSizeBlockOversize.EB1,
"e5f",
message_size=min(EB1_MAX_SIZE, INTB_MAX_SIZE) - OVER_UNDER,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_5f test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
def test_s32_allow_gt_signed_32bit_max(self):
test = OversizeMessageTransferTest(MaxMessageSizeBlockOversize.S32,
MaxMessageSizeBlockOversize.S32,
"s32",
message_size=200,
expect_block=False)
test.run()
if test.error is not None:
test.logger.log("test_s32 test error: %s" % (test.error))
test.logger.dump()
self.assertTrue(test.error is None)
if __name__ == '__main__':
unittest.main(main_module())
|
ErnieAllen/qpid-dispatch
|
tests/system_tests_policy_oversize_basic.py
|
Python
|
apache-2.0
| 36,740
|
def gameStart():
pokers = gameIn()
if pokers:
sum = getPokersSum(pokers)
judge(pokers, sum)
# print(pokers)
# print(sum)
def gameIn():
print('what pokers ?')
print('("0" for 10, joker not allowed)')
gameIn = input()
if not gameIn:
print('empty pokers')
return False
# change to upper case
gameIn = gameIn.upper()
# delete wrong characters
valid = '1234567890ajqkAJQK'
for item in gameIn:
if not item in valid:
gameIn = gameIn.replace(item, '')
# less than 5 is not right
if len(gameIn) < 5:
print('not enough pokers')
return False
# take the first 5
gameIn = gameIn[:5]
# change to array
gameIn = list(gameIn)
# change 1 to A
while '1' in gameIn:
del gameIn[gameIn.index('1')]
gameIn.append('A')
# change 0 to 10
while '0' in gameIn:
del gameIn[gameIn.index('0')]
gameIn.append('10')
return gameIn
def judge(pokers, sum):
if not judgeBigBonus(pokers, sum):
if not judgeOtherBonus(pokers, sum):
print('LOSE')
def judgeBigBonus(pokers, sum):
# bomb
count = []
for poker in set(pokers):
count.append([poker, pokers.count(poker)])
for item in count:
if item[1] == 4:
print('BOMB :', item[0])
return True
# five bulls
if sum == 50:
print('FIVE BULLS')
return True
# five small bulls
if sum == 10:
print('FIVE SMALL BULLS')
return True
return False
def judgeOtherBonus(pokers, sum):
value = sum % 10
if value == 0:
value = 10
if judge10(pokers, 0, 1, 2):
showBull(pokers, value, 0, 1, 2)
return True
elif judge10(pokers, 0, 1, 3):
showBull(pokers, value, 0, 1, 3)
return True
elif judge10(pokers, 0, 1, 4):
showBull(pokers, value, 0, 1, 4)
return True
elif judge10(pokers, 0, 2, 3):
showBull(pokers, value, 0, 2, 3)
return True
elif judge10(pokers, 0, 2, 4):
showBull(pokers, value, 0, 2, 4)
return True
elif judge10(pokers, 0, 3, 4):
showBull(pokers, value, 0, 3, 4)
return True
elif judge10(pokers, 1, 2, 3):
showBull(pokers, value, 1, 2, 3)
return True
elif judge10(pokers, 1, 2, 4):
showBull(pokers, value, 1, 2, 4)
return True
elif judge10(pokers, 1, 3, 4):
showBull(pokers, value, 1, 3, 4)
return True
elif judge10(pokers, 2, 3, 4):
showBull(pokers, value, 2, 3, 4)
return True
else:
return False
def getPokersSum(pokers):
sum = 0
for item in pokers:
sum += getPokerVal(item)
return sum
def getPokerVal(poker):
if poker == 'J' or poker == 'Q' or poker == 'K':
return 10
elif poker == 'A':
return 1
else:
return int(poker)
def judge10(pokers, a, b, c):
return (getPokerVal(pokers[a]) + getPokerVal(pokers[b]) + getPokerVal(pokers[c])) % 10 == 0
def showBull(pokers, value, a, b, c):
d, e = -1, -1
for i in range(5):
if not (i == a or i == b or i == c):
if d == -1:
d = i
else:
e = i
print('BULLS', value, ':')
print('[', pokers[a], pokers[b], pokers[c], '] + (', pokers[d], pokers[e], ')')
if __name__ == '__main__':
print("Welcome to the game:")
while 1:
gameStart()
|
BlueSky-07/Poker
|
Python/Poker.py
|
Python
|
mit
| 3,505
|
"""List diff preferences associated with one's account"""
# pylint: disable=invalid-name
import argparse
import logging
from libpycr.exceptions import PyCRError
from libpycr.gerrit.client import Gerrit
from libpycr.meta import GerritAccountBuiltin
from libpycr.pager import Pager
from libpycr.utils.commandline import expect_account_as_positional
from libpycr.utils.output import checkmark
from libpycr.utils.system import fail
from prettytable import PrettyTable
class LsDiffPrefs(GerritAccountBuiltin):
"""Implement the LS-DIFF-PREFS command"""
# Logger for this command
log = logging.getLogger(__name__)
@property
def name(self):
return 'ls-diff-prefs'
@property
def description(self):
return 'list diff preferences'
@staticmethod
def parse_command_line(arguments):
"""Parse the LS-DIFF-PREFS command command-line arguments
Returns the account id that is provided on the command line. If no
account is provided, returns None.
:param arguments: a list of command-line arguments to parse
:type arguments: list[str]
:rtype: str
"""
parser = argparse.ArgumentParser(
description='List account diff preferences')
expect_account_as_positional(parser)
cmdline = parser.parse_args(arguments)
# fetch changes details
return cmdline.account
def run(self, arguments, *args, **kwargs):
account_id = self.parse_command_line(arguments)
try:
account = Gerrit.get_account(account_id or 'self')
prefs = Gerrit.get_diff_prefs(account_id or 'self')
except PyCRError as why:
fail('cannot list account diff preferences', why)
table = PrettyTable(['Preference', 'Value'])
table.align['Preference'] = 'l'
table.align['Value'] = 'c'
table.add_row(['Context', prefs.context])
table.add_row(['Expand all comments',
checkmark(prefs.expand_all_comments)])
table.add_row(['Ignore whitespace', prefs.ignore_whitespace])
table.add_row(['Intraline difference',
checkmark(prefs.intraline_difference)])
table.add_row(['Line length', prefs.line_length])
table.add_row(['Manual review', checkmark(prefs.manual_review)])
table.add_row(['Retain header', checkmark(prefs.retain_header)])
table.add_row(['Show line endings',
checkmark(prefs.show_line_endings)])
table.add_row(['Show tabs', checkmark(prefs.show_tabs)])
table.add_row(['Show whitespace errors',
checkmark(prefs.show_whitespace_errors)])
table.add_row(['Skip deleted', checkmark(prefs.skip_deleted)])
table.add_row(['Skip uncommented', checkmark(prefs.skip_uncommented)])
table.add_row(['Syntax highlighting',
checkmark(prefs.syntax_highlighting)])
table.add_row(['Tab size', prefs.tab_size])
with Pager(command=self.name):
print 'Account: {}'.format(account.username)
print table
|
JcDelay/pycr
|
libpycr/builtin/accounts/ls-diff-prefs.py
|
Python
|
apache-2.0
| 3,136
|
#!/usr/bin/env python
#
# Copyright 2005-2007,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gru, filter
from gnuradio import eng_notation
from gnuradio import digital
from gnuradio import analog
import copy
import sys
# /////////////////////////////////////////////////////////////////////////////
# receive path
# /////////////////////////////////////////////////////////////////////////////
class receive_path(gr.hier_block2):
def __init__(self, demod_class, rx_callback, options):
gr.hier_block2.__init__(self, "receive_path",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(0, 0, 0))
options = copy.copy(options) # make a copy so we can destructively modify
self._verbose = options.verbose
self._bitrate = options.bitrate # desired bit rate
self._rx_callback = rx_callback # this callback is fired when a packet arrives
self._demod_class = demod_class # the demodulator_class we're using
self._chbw_factor = options.chbw_factor # channel filter bandwidth factor
# Get demod_kwargs
demod_kwargs = self._demod_class.extract_kwargs_from_options(options)
# Build the demodulator
self.demodulator = self._demod_class(**demod_kwargs)
# Make sure the channel BW factor is between 1 and sps/2
# or the filter won't work.
if(self._chbw_factor < 1.0 or self._chbw_factor > self.samples_per_symbol()/2):
sys.stderr.write("Channel bandwidth factor ({0}) must be within the range [1.0, {1}].\n".format(self._chbw_factor, self.samples_per_symbol()/2))
sys.exit(1)
# Design filter to get actual channel we want
sw_decim = 1
chan_coeffs = filter.firdes.low_pass(1.0, # gain
sw_decim * self.samples_per_symbol(), # sampling rate
self._chbw_factor, # midpoint of trans. band
0.5, # width of trans. band
filter.firdes.WIN_HANN) # filter type
self.channel_filter = filter.fft_filter_ccc(sw_decim, chan_coeffs)
# receiver
self.packet_receiver = \
digital.demod_pkts(self.demodulator,
access_code=None,
callback=self._rx_callback,
threshold=-1)
# Carrier Sensing Blocks
alpha = 0.001
thresh = 30 # in dB, will have to adjust
self.probe = analog.probe_avg_mag_sqrd_c(thresh,alpha)
# Display some information about the setup
if self._verbose:
self._print_verbage()
# connect block input to channel filter
self.connect(self, self.channel_filter)
# connect the channel input filter to the carrier power detector
self.connect(self.channel_filter, self.probe)
# connect channel filter to the packet receiver
self.connect(self.channel_filter, self.packet_receiver)
def bitrate(self):
return self._bitrate
def samples_per_symbol(self):
return self.demodulator._samples_per_symbol
def differential(self):
return self.demodulator._differential
def carrier_sensed(self):
"""
Return True if we think carrier is present.
"""
#return self.probe.level() > X
return self.probe.unmuted()
def carrier_threshold(self):
"""
Return current setting in dB.
"""
return self.probe.threshold()
def set_carrier_threshold(self, threshold_in_db):
"""
Set carrier threshold.
Args:
threshold_in_db: set detection threshold (float (dB))
"""
self.probe.set_threshold(threshold_in_db)
@staticmethod
def add_options(normal, expert):
"""
Adds receiver-specific options to the Options Parser
"""
if not normal.has_option("--bitrate"):
normal.add_option("-r", "--bitrate", type="eng_float", default=100e3,
help="specify bitrate [default=%default].")
normal.add_option("-v", "--verbose", action="store_true", default=False)
expert.add_option("-S", "--samples-per-symbol", type="float", default=2,
help="set samples/symbol [default=%default]")
expert.add_option("", "--log", action="store_true", default=False,
help="Log all parts of flow graph to files (CAUTION: lots of data)")
expert.add_option("", "--chbw-factor", type="float", default=1.0,
help="Channel bandwidth = chbw_factor x signal bandwidth [default=%default]")
def _print_verbage(self):
"""
Prints information about the receive path
"""
print "\nReceive Path:"
print "modulation: %s" % (self._demod_class.__name__)
print "bitrate: %sb/s" % (eng_notation.num_to_str(self._bitrate))
print "samples/symbol: %.4f" % (self.samples_per_symbol())
print "Differential: %s" % (self.differential())
|
SpectreJan/gnuradio
|
gr-digital/examples/narrowband/receive_path.py
|
Python
|
gpl-3.0
| 6,020
|
"""
Adapted from
https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras
"""
import argparse
import tensorflow as tf
import numpy as np
import ray
from ray import tune
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.integration.keras import TuneReportCheckpointCallback
from ray.tune.integration.tensorflow import DistributedTrainableCreator, get_num_workers
def mnist_dataset(batch_size, mini=False):
(x_train, y_train), _ = tf.keras.datasets.mnist.load_data()
# The `x` arrays are in uint8 and have values in the range [0, 255].
# You need to convert them to float32 with values in the range [0, 1]
x_train = x_train / np.float32(255)
y_train = y_train.astype(np.int64)
if mini:
x_train = x_train[:512]
y_train = y_train[:512]
train_dataset = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.shuffle(60000)
.repeat()
.batch(batch_size)
)
return train_dataset
def build_and_compile_cnn_model(config):
model = tf.keras.Sequential(
[
tf.keras.Input(shape=(28, 28)),
tf.keras.layers.Reshape(target_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(32, 3, activation="relu"),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.get("hidden", 16), activation="relu"),
tf.keras.layers.Dense(10),
]
)
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.SGD(
learning_rate=config.get("lr", 0.05), momentum=config.get("momentum", 0.5)
),
metrics=["accuracy"],
)
return model
def train_mnist(config, checkpoint_dir=None):
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
per_worker_batch_size = 64
num_workers = get_num_workers()
global_batch_size = per_worker_batch_size * num_workers
multi_worker_dataset = mnist_dataset(global_batch_size, mini=config.get("use_mini"))
steps_per_epoch = 5 if config.get("use_mini") else 70
with strategy.scope():
multi_worker_model = build_and_compile_cnn_model(config)
multi_worker_model.fit(
multi_worker_dataset,
epochs=2,
steps_per_epoch=steps_per_epoch,
callbacks=[TuneReportCheckpointCallback({"mean_accuracy": "accuracy"})],
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--num-workers",
"-n",
type=int,
default=2,
help="Sets number of workers for training.",
)
parser.add_argument(
"--num-workers-per-host",
"-w",
type=int,
help="Sets number of workers for training.",
)
parser.add_argument(
"--num-cpus-per-worker",
"-c",
type=int,
default=2,
help="number of CPUs for this worker",
)
parser.add_argument(
"--num-gpus-per-worker",
"-g",
type=int,
default=0,
help="number of GPUs for this worker",
)
parser.add_argument(
"--cluster",
action="store_true",
default=False,
help="enables multi-node tuning",
)
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="enables small scale testing",
)
args = parser.parse_args()
if args.cluster:
options = dict(address="auto")
else:
options = dict(num_cpus=4)
ray.init(**options)
tf_trainable = DistributedTrainableCreator(
train_mnist,
num_workers=args.num_workers,
num_workers_per_host=args.num_workers_per_host,
num_cpus_per_worker=args.num_cpus_per_worker,
num_gpus_per_worker=args.num_gpus_per_worker,
)
sched = AsyncHyperBandScheduler(max_t=400, grace_period=20)
analysis = tune.run(
tf_trainable,
name="exp",
scheduler=sched,
metric="mean_accuracy",
mode="max",
stop={"mean_accuracy": 0.99, "training_iteration": 10},
num_samples=1,
config={
"use_mini": args.smoke_test,
"lr": tune.uniform(0.001, 0.1),
"momentum": tune.uniform(0.1, 0.9),
"hidden": tune.randint(32, 512),
},
)
print("Best hyperparameters found were: ", analysis.best_config)
|
ray-project/ray
|
python/ray/tune/examples/tf_distributed_keras_example.py
|
Python
|
apache-2.0
| 4,418
|
from tornado import httpserver,ioloop,web,gen,httpclient
from datetime import datetime
from base_handler import BaseHandler
from tools import *
import conf
import tornado_mysql
class RankHandler(BaseHandler):
@web.authenticated
@gen.coroutine
def get(self):
msg = self.get_argument('msg',None)
page_now = int(self.get_argument('page','1'))
page_now=norm_page(page_now)
conn = yield tornado_mysql.connect(host=conf.DBHOST,\
port=conf.DBPORT,user=conf.DBUSER,passwd=conf.DBPW,db=conf.DBNAME,charset='utf8')
cur = conn.cursor()
#visible
sql = "SELECT user,school,motto,ac_num,submit_num,email FROM user ORDER BY ac_num DESC LIMIT %s,%s"
yield cur.execute(sql,((page_now-1)*conf.USERS_PER_PAGE,conf.USERS_PER_PAGE))
users = [[row[0],row[1],row[2],row[3],row[4],row[5],int((row[3]+1)/(row[4]+1)*100)] for row in cur]
cur.close()
conn.close()
url='/rank'
pages=gen_pages(url,page_now)
self.render('rank.html',msg=msg,users=users,pages=pages,\
page_type='user',page_title='用户排名 -XOJ',num_from=(page_now-1)*conf.USERS_PER_PAGE)
|
zrt/XOJ
|
web/rank_handler.py
|
Python
|
gpl-3.0
| 1,181
|
import IMP
import IMP.core
import IMP.algebra
import IMP.test
import IMP.pmi.restraints.em
import IMP.pmi.representation
import math
class Tests(IMP.test.TestCase):
def setUp(self):
IMP.test.TestCase.setUp(self)
self.m = IMP.Model()
self.simo1 = IMP.pmi.representation.Representation(
self.m, upperharmonic=True, disorderedlength=False)
def test_GaussianEMRestraint_rigid_body(self):
"""Test rigid body movement of target EM map"""
fname = self.get_input_file_name('2A73.pdb50.txt')
target_ps = []
IMP.isd.gmm_tools.decorate_gmm_from_text(
fname,
target_ps,
self.m,
radius_scale=3.0,
mass_scale=1.0)
gemh = IMP.pmi.restraints.em.GaussianEMRestraint(target_ps, fname,
target_mass_scale=1.0,
slope=0.000001,
target_radii_scale=3.0,
target_is_rigid_body=True)
gemh.set_label("Mobile")
gemh.add_target_density_to_hierarchy(self.simo1.prot)
gemh.add_to_model()
gemh.set_weight(100.0)
before=gemh.rs.evaluate(False)
rb = gemh.rb
rbxyz = (rb.get_x(), rb.get_y(), rb.get_z())
transformation = IMP.algebra.get_random_local_transformation(
rbxyz,
100,
math.pi)
IMP.core.transform(rb, transformation)
after=gemh.rs.evaluate(False)
self.assertTrue(after>before)
def test_add_em_gmms_to_state(self):
"""Test adding EM Restraint GMMs to PMI2 Hierarchies and RMF"""
mdl = IMP.Model()
s = IMP.pmi.topology.System(mdl)
st1 = s.create_state()
seqs = IMP.pmi.topology.Sequences(self.get_input_file_name('seqs.fasta'))
m1 = st1.create_molecule("Prot1",sequence=seqs["Protein_1"])
m1.add_representation(m1.get_residues(),resolutions=[1], setup_particles_as_densities=True)
hier = m1.build()
densities = IMP.atom.Selection(hier,representation_type=IMP.atom.DENSITIES).get_selected_particles()
gem = IMP.pmi.restraints.em.GaussianEMRestraint(densities,
target_fn=self.get_input_file_name('prot_gmm.txt'),
target_is_rigid_body=True)
gem.set_label("em_1")
gem.add_to_model()
gem.add_target_density_to_hierarchy(st1)
# Add a second gmm, which should become a second chain, B
gem2 = IMP.pmi.restraints.em.GaussianEMRestraint(densities,
target_fn=self.get_input_file_name('prot_gmm.txt'),
target_is_rigid_body=True)
gem2.set_label("em_2")
gem2.add_to_model()
gem2.add_target_density_to_hierarchy(st1)
# Test that a two child molecules were added to State
self.assertEqual(len(st1.get_hierarchy().get_children()), 3)
if __name__ == '__main__':
IMP.test.main()
|
shanot/imp
|
modules/pmi/test/test_GaussianEMRestraint_rigidbody.py
|
Python
|
gpl-3.0
| 3,233
|
# Natural Language Toolkit: Confusion Matrices
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals
from nltk.probability import FreqDist
from nltk.compat import python_2_unicode_compatible
@python_2_unicode_compatible
class ConfusionMatrix(object):
"""
The confusion matrix between a list of reference values and a
corresponding list of test values. Entry *[r,t]* of this
matrix is a count of the number of times that the reference value
*r* corresponds to the test value *t*. E.g.:
>>> from nltk.metrics import ConfusionMatrix
>>> ref = 'DET NN VB DET JJ NN NN IN DET NN'.split()
>>> test = 'DET VB VB DET NN NN NN IN DET NN'.split()
>>> cm = ConfusionMatrix(ref, test)
>>> print(cm['NN', 'NN'])
3
Note that the diagonal entries *Ri=Tj* of this matrix
corresponds to correct values; and the off-diagonal entries
correspond to incorrect values.
"""
def __init__(self, reference, test, sort_by_count=False):
"""
Construct a new confusion matrix from a list of reference
values and a corresponding list of test values.
:type reference: list
:param reference: An ordered list of reference values.
:type test: list
:param test: A list of values to compare against the
corresponding reference values.
:raise ValueError: If ``reference`` and ``length`` do not have
the same length.
"""
if len(reference) != len(test):
raise ValueError('Lists must have the same length.')
# Get a list of all values.
if sort_by_count:
ref_fdist = FreqDist(reference)
test_fdist = FreqDist(test)
def key(v): return -(ref_fdist[v]+test_fdist[v])
values = sorted(set(reference+test), key=key)
else:
values = sorted(set(reference+test))
# Construct a value->index dictionary
indices = dict((val,i) for (i,val) in enumerate(values))
# Make a confusion matrix table.
confusion = [[0 for val in values] for val in values]
max_conf = 0 # Maximum confusion
for w,g in zip(reference, test):
confusion[indices[w]][indices[g]] += 1
max_conf = max(max_conf, confusion[indices[w]][indices[g]])
#: A list of all values in ``reference`` or ``test``.
self._values = values
#: A dictionary mapping values in ``self._values`` to their indices.
self._indices = indices
#: The confusion matrix itself (as a list of lists of counts).
self._confusion = confusion
#: The greatest count in ``self._confusion`` (used for printing).
self._max_conf = max_conf
#: The total number of values in the confusion matrix.
self._total = len(reference)
#: The number of correct (on-diagonal) values in the matrix.
self._correct = sum(confusion[i][i] for i in range(len(values)))
def __getitem__(self, li_lj_tuple):
"""
:return: The number of times that value ``li`` was expected and
value ``lj`` was given.
:rtype: int
"""
(li, lj) = li_lj_tuple
i = self._indices[li]
j = self._indices[lj]
return self._confusion[i][j]
def __repr__(self):
return '<ConfusionMatrix: %s/%s correct>' % (self._correct,
self._total)
def __str__(self):
return self.pretty_format()
def pretty_format(self, show_percents=False, values_in_chart=True,
truncate=None, sort_by_count=False):
"""
:return: A multi-line string representation of this confusion matrix.
:type truncate: int
:param truncate: If specified, then only show the specified
number of values. Any sorting (e.g., sort_by_count)
will be performed before truncation.
:param sort_by_count: If true, then sort by the count of each
label in the reference data. I.e., labels that occur more
frequently in the reference label will be towards the left
edge of the matrix, and labels that occur less frequently
will be towards the right edge.
@todo: add marginals?
"""
confusion = self._confusion
values = self._values
if sort_by_count:
values = sorted(values, key=lambda v:
-sum(self._confusion[self._indices[v]]))
if truncate:
values = values[:truncate]
if values_in_chart:
value_strings = ["%s" % val for val in values]
else:
value_strings = [str(n+1) for n in range(len(values))]
# Construct a format string for row values
valuelen = max(len(val) for val in value_strings)
value_format = '%' + repr(valuelen) + 's | '
# Construct a format string for matrix entries
if show_percents:
entrylen = 6
entry_format = '%5.1f%%'
zerostr = ' .'
else:
entrylen = len(repr(self._max_conf))
entry_format = '%' + repr(entrylen) + 'd'
zerostr = ' '*(entrylen-1) + '.'
# Write the column values.
s = ''
for i in range(valuelen):
s += (' '*valuelen)+' |'
for val in value_strings:
if i >= valuelen-len(val):
s += val[i-valuelen+len(val)].rjust(entrylen+1)
else:
s += ' '*(entrylen+1)
s += ' |\n'
# Write a dividing line
s += '%s-+-%s+\n' % ('-'*valuelen, '-'*((entrylen+1)*len(values)))
# Write the entries.
for val, li in zip(value_strings, values):
i = self._indices[li]
s += value_format % val
for lj in values:
j = self._indices[lj]
if confusion[i][j] == 0:
s += zerostr
elif show_percents:
s += entry_format % (100.0*confusion[i][j]/self._total)
else:
s += entry_format % confusion[i][j]
if i == j:
prevspace = s.rfind(' ')
s = s[:prevspace] + '<' + s[prevspace+1:] + '>'
else: s += ' '
s += '|\n'
# Write a dividing line
s += '%s-+-%s+\n' % ('-'*valuelen, '-'*((entrylen+1)*len(values)))
# Write a key
s += '(row = reference; col = test)\n'
if not values_in_chart:
s += 'Value key:\n'
for i, value in enumerate(values):
s += '%6d: %s\n' % (i+1, value)
return s
def key(self):
values = self._values
str = 'Value key:\n'
indexlen = len(repr(len(values)-1))
key_format = ' %'+repr(indexlen)+'d: %s\n'
for i in range(len(values)):
str += key_format % (i, values[i])
return str
def demo():
reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()
test = 'DET VB VB DET NN NN NN IN DET NN'.split()
print('Reference =', reference)
print('Test =', test)
print('Confusion matrix:')
print(ConfusionMatrix(reference, test))
print(ConfusionMatrix(reference, test).pretty_format(sort_by_count=True))
if __name__ == '__main__':
demo()
|
MyRookie/SentimentAnalyse
|
venv/lib/python2.7/site-packages/nltk/metrics/confusionmatrix.py
|
Python
|
mit
| 7,825
|
__all__ = ["Service"]
from kokki.base import Resource, ResourceArgument, BooleanArgument
class Service(Resource):
service_name = ResourceArgument(default=lambda obj:obj.name)
enabled = ResourceArgument()
running = ResourceArgument()
pattern = ResourceArgument()
start_command = ResourceArgument()
stop_command = ResourceArgument()
restart_command = ResourceArgument()
reload_command = ResourceArgument()
status_command = ResourceArgument()
supports_restart = BooleanArgument(default=lambda obj:bool(obj.restart_command))
supports_reload = BooleanArgument(default=lambda obj:bool(obj.reload_command))
supports_status = BooleanArgument(default=lambda obj:bool(obj.status_command))
actions = ["nothing", "start", "stop", "restart", "reload"]
|
samuel/kokki
|
kokki/resources/service.py
|
Python
|
bsd-3-clause
| 795
|
from p2pool.bitcoin import networks
PARENT = networks.nets['coin42']
SHARE_PERIOD = 5 # seconds
CHAIN_LENGTH = 12*60*60//5 # shares
REAL_CHAIN_LENGTH = 12*60*60//5 # shares
TARGET_LOOKBEHIND = 20 # shares
SPREAD = 50 # blocks
IDENTIFIER = 'ff42c01442c0c0ff'.decode('hex')
PREFIX = 'ee42c014aa42c014'.decode('hex')
P2P_PORT = 8042
MIN_TARGET = 0
MAX_TARGET = 2**256//2**20 - 1
PERSIST = False
WORKER_PORT = 9042
BOOTSTRAP_ADDRS = 'p2pool.e-pool.net p2pool-eu.gotgeeks.com p2pool-us.gotgeeks.com rav3n.dtdns.net p2pool.gotgeeks.com p2pool.dtdns.net solidpool.org'.split(' ')
ANNOUNCE_CHANNEL = '#p2pool-alt'
VERSION_CHECK = lambda v: True
|
ptcrypto/p2pool-adaptive
|
p2pool/networks/coin42.py
|
Python
|
gpl-3.0
| 638
|
"""
Utility functions for matrices and designs transformation.
"""
import warnings
import numpy as np
from scipy import linalg
def normalize_matrix_on_axis(m, axis=0):
""" Normalize a 2D matrix on an axis.
Parameters
----------
m : numpy 2D array,
The matrix to normalize.
axis : integer in {0, 1}, optional
A valid axis to normalize across.
Returns
-------
ret : numpy array, shape = m.shape
The normalized matrix
Examples
--------
>>> import numpy as np
>>> from nilearn.mass_univariate.utils import normalize_matrix_on_axis
>>> X = np.array([[0, 4], [1, 0]])
>>> normalize_matrix_on_axis(X)
array([[ 0., 1.],
[ 1., 0.]])
>>> normalize_matrix_on_axis(X, axis=1)
array([[ 0., 1.],
[ 1., 0.]])
"""
if m.ndim != 2:
raise ValueError('This function only accepts 2D arrays. '
'An array of shape %r was passed.' % (m.shape,))
if axis == 0:
column_squared_norms = np.sum(m ** 2, axis=0)
# check for null columns
n_zeros_column = np.sum(column_squared_norms == 0)
if n_zeros_column > 0:
# A matrix with null column(s) cannot be normalized, but this
# is supposed to have been handled at a higher level.
raise ValueError('The matrix cannot be normalized because it has'
'%d zeros column%s.'
% (n_zeros_column,
"s" if n_zeros_column > 1 else ""))
# array transposition preserves the contiguity flag of that array
ret = (m.T / np.sqrt(column_squared_norms)[:, np.newaxis]).T
elif axis == 1:
# array transposition preserves the contiguity flag of that array
ret = normalize_matrix_on_axis(m.T).T
else:
raise ValueError('axis(=%d) out of bounds' % axis)
return ret
def orthonormalize_matrix(m, tol=1.e-12):
""" Orthonormalize a matrix.
Uses a Singular Value Decomposition.
If the input matrix is rank-deficient, then its shape is cropped.
Parameters
----------
m : array-like,
The matrix to orthonormalize.
Returns
-------
ret : np.ndarray, shape = m.shape
The orthonormalized matrix.
Examples
--------
>>> import numpy as np
>>> from nilearn.mass_univariate.utils import orthonormalize_matrix
>>> X = np.array([[1, 2], [0, 1], [1, 1]])
>>> orthonormalize_matrix(X)
array([[-0.81049889, -0.0987837 ],
[-0.31970025, -0.75130448],
[-0.49079864, 0.65252078]])
>>> X = np.array([[0, 1], [4, 0]])
>>> orthonormalize_matrix(X)
array([[ 0., -1.],
[-1., 0.]])
"""
U, s, _ = linalg.svd(m, full_matrices=False)
n_eig = np.count_nonzero(s > tol)
return np.ascontiguousarray(U[:, :n_eig])
def orthogonalize_design(tested_vars, target_vars, confounding_vars=None):
"""Orthogonalize a design (tested, target, confounding variates)
- tested_vars and target_vars are normalized
- covars_orthonormalized are orthonormalized
- tested_vars and covars_orthonormalized are made orthogonal
(np.dot(tested_vars.T, covars) == 0)
Parameters
----------
tested_vars: array-like, shape=(n_samples, n_tested_vars)
Explanatory variates, fitted and tested independently from each others.
target_vars: array-like, shape=(n_samples, n_target_vars)
Target variates to be explained by explanatory and confounding variates.
confounding_vars: array-like, shape=(n_samples, n_covars)
Confounding variates (covariates), fitted but not tested.
If None (default), no confounding variate is added to the model.
Returns
-------
tested_vars_resid_covars: np.ndarray, shape=(n_samples, n_tested_vars)
Normalized tested variates, from which the effect of the covariates
has been removed.
target_vars_resid_covars: np.ndarray, shape=(n_samples, n_target_vars)
Normalized target variates, from which the effect of the covariates
has been removed.
covars_orthonormalized: np.ndarray, shape=(n_samples, n_covars)
Confounding variates (covariates), orthonormalized.
lost_dof: int,
Degress of freedom that are lost during the model estimation.
Note that the tested variates are to be fitted independently so
their number does not impact the value of `lost_dof`.
"""
if confounding_vars is not None:
# step 1: extract effect of covars from target vars
covars_orthonormalized = orthonormalize_matrix(confounding_vars)
if not covars_orthonormalized.flags['C_CONTIGUOUS']:
# useful to developer
warnings.warn('Confounding variates not C_CONTIGUOUS.')
covars_orthonormalized = np.ascontiguousarray(covars_orthonormalized)
target_vars_normalized = normalize_matrix_on_axis(
target_vars).T # faster with F-ordered target_vars_chunk
if not target_vars_normalized.flags['C_CONTIGUOUS']:
# useful to developer
warnings.warn('Target variates not C_CONTIGUOUS.')
target_vars_normalized = np.ascontiguousarray(
target_vars_normalized)
beta_target_vars_covars = np.dot(target_vars_normalized,
covars_orthonormalized)
target_vars_resid_covars = target_vars_normalized - np.dot(
beta_target_vars_covars, covars_orthonormalized.T)
target_vars_resid_covars = normalize_matrix_on_axis(
target_vars_resid_covars, axis=1)
lost_dof = covars_orthonormalized.shape[1]
# step 2: extract effect of covars from tested vars
tested_vars_normalized = normalize_matrix_on_axis(tested_vars.T,
axis=1)
beta_tested_vars_covars = np.dot(tested_vars_normalized,
covars_orthonormalized)
tested_vars_resid_covars = tested_vars_normalized - np.dot(
beta_tested_vars_covars, covars_orthonormalized.T)
tested_vars_resid_covars = normalize_matrix_on_axis(
tested_vars_resid_covars, axis=1).T.copy()
else:
target_vars_resid_covars = normalize_matrix_on_axis(target_vars).T
tested_vars_resid_covars = normalize_matrix_on_axis(tested_vars).copy()
covars_orthonormalized = None
lost_dof = 0
# check arrays contiguousity (for the sake of code efficiency)
if not target_vars_resid_covars.flags['C_CONTIGUOUS']:
# useful to developer
warnings.warn('Target variates not C_CONTIGUOUS.')
target_vars_resid_covars = np.ascontiguousarray(
target_vars_resid_covars)
if not tested_vars_resid_covars.flags['C_CONTIGUOUS']:
# useful to developer
warnings.warn('Tested variates not C_CONTIGUOUS.')
tested_vars_resid_covars = np.ascontiguousarray(
tested_vars_resid_covars)
orthogonalized_design = (tested_vars_resid_covars,
target_vars_resid_covars.T,
covars_orthonormalized, lost_dof)
return orthogonalized_design
def t_score_with_covars_and_normalized_design(tested_vars, target_vars,
covars_orthonormalized=None):
"""t-score in the regression of tested variates against target variates
Covariates are taken into account (if not None).
The normalized_design case corresponds to the following assumptions:
- tested_vars and target_vars are normalized
- covars_orthonormalized are orthonormalized
- tested_vars and covars_orthonormalized are orthogonal
(np.dot(tested_vars.T, covars) == 0)
Parameters
----------
tested_vars : array-like, shape=(n_samples, n_tested_vars)
Explanatory variates.
target_vars : array-like, shape=(n_samples, n_target_vars)
Targets variates. F-ordered is better for efficient computation.
covars_orthonormalized : array-like, shape=(n_samples, n_covars) or None
Confounding variates.
Returns
-------
score : numpy.ndarray, shape=(n_target_vars, n_tested_vars)
t-scores associated with the tests of each explanatory variate against
each target variate (in the presence of covars).
"""
if covars_orthonormalized is None:
lost_dof = 0
else:
lost_dof = covars_orthonormalized.shape[1]
# Tested variates are fitted independently,
# so lost_dof is unrelated to n_tested_vars.
dof = target_vars.shape[0] - lost_dof
beta_target_vars_tested_vars = np.dot(target_vars.T, tested_vars)
if covars_orthonormalized is None:
rss = (1 - beta_target_vars_tested_vars ** 2)
else:
beta_target_vars_covars = np.dot(target_vars.T, covars_orthonormalized)
a2 = np.sum(beta_target_vars_covars ** 2, 1)
rss = (1 - a2[:, np.newaxis] - beta_target_vars_tested_vars ** 2)
return beta_target_vars_tested_vars * np.sqrt((dof - 1.) / rss)
|
nilearn/nilearn_sandbox
|
nilearn_sandbox/mass_univariate/utils.py
|
Python
|
bsd-3-clause
| 9,113
|
from Tkinter import *
class ScrolledList:
default = "(None)"
def __init__(self, master, **options):
# Create top frame, with scrollbar and listbox
self.master = master
self.frame = frame = Frame(master)
self.frame.pack(fill="both", expand=1)
self.vbar = vbar = Scrollbar(frame, name="vbar")
self.vbar.pack(side="right", fill="y")
self.listbox = listbox = Listbox(frame, exportselection=0,
background="white")
if options:
listbox.configure(options)
listbox.pack(expand=1, fill="both")
# Tie listbox and scrollbar together
vbar["command"] = listbox.yview
listbox["yscrollcommand"] = vbar.set
# Bind events to the list box
listbox.bind("<ButtonRelease-1>", self.click_event)
listbox.bind("<Double-ButtonRelease-1>", self.double_click_event)
listbox.bind("<ButtonPress-3>", self.popup_event)
listbox.bind("<Key-Up>", self.up_event)
listbox.bind("<Key-Down>", self.down_event)
# Mark as empty
self.clear()
def close(self):
self.frame.destroy()
def clear(self):
self.listbox.delete(0, "end")
self.empty = 1
self.listbox.insert("end", self.default)
def append(self, item):
if self.empty:
self.listbox.delete(0, "end")
self.empty = 0
self.listbox.insert("end", str(item))
def get(self, index):
return self.listbox.get(index)
def click_event(self, event):
self.listbox.activate("@%d,%d" % (event.x, event.y))
index = self.listbox.index("active")
self.select(index)
self.on_select(index)
return "break"
def double_click_event(self, event):
index = self.listbox.index("active")
self.select(index)
self.on_double(index)
return "break"
menu = None
def popup_event(self, event):
if not self.menu:
self.make_menu()
menu = self.menu
self.listbox.activate("@%d,%d" % (event.x, event.y))
index = self.listbox.index("active")
self.select(index)
menu.tk_popup(event.x_root, event.y_root)
def make_menu(self):
menu = Menu(self.listbox, tearoff=0)
self.menu = menu
self.fill_menu()
def up_event(self, event):
index = self.listbox.index("active")
if self.listbox.selection_includes(index):
index = index - 1
else:
index = self.listbox.size() - 1
if index < 0:
self.listbox.bell()
else:
self.select(index)
self.on_select(index)
return "break"
def down_event(self, event):
index = self.listbox.index("active")
if self.listbox.selection_includes(index):
index = index + 1
else:
index = 0
if index >= self.listbox.size():
self.listbox.bell()
else:
self.select(index)
self.on_select(index)
return "break"
def select(self, index):
self.listbox.focus_set()
self.listbox.activate(index)
self.listbox.selection_clear(0, "end")
self.listbox.selection_set(index)
self.listbox.see(index)
# Methods to override for specific actions
def fill_menu(self):
pass
def on_select(self, index):
pass
def on_double(self, index):
pass
def _scrolled_list(parent):
root = Tk()
root.title("Test ScrolledList")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
class MyScrolledList(ScrolledList):
def fill_menu(self): self.menu.add_command(label="right click")
def on_select(self, index): print "select", self.get(index)
def on_double(self, index): print "double", self.get(index)
scrolled_list = MyScrolledList(root)
for i in range(30):
scrolled_list.append("Item %02d" % i)
root.mainloop()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_scrolled_list)
|
svanschalkwyk/datafari
|
windows/python/Lib/idlelib/ScrolledList.py
|
Python
|
apache-2.0
| 4,157
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import numpy
from . import _ni_support
from . import _nd_image
from scipy.misc import doccer
from scipy._lib._version import NumpyVersion
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
_input_doc = \
"""input : array_like
Input array to filter."""
_axis_doc = \
"""axis : int, optional
The axis of `input` along which to calculate. Default is -1."""
_output_doc = \
"""output : array, optional
The `output` parameter passes an array in which to store the
filter output. Output array should have different name as compared
to input array to avoid aliasing errors."""
_size_foot_doc = \
"""size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
"""
_mode_doc = \
"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'"""
_mode_multiple_doc = \
"""mode : str or sequence, optional
The `mode` parameter determines how the array borders are
handled. Valid modes are {'reflect', 'constant', 'nearest',
'mirror', 'wrap'}. `cval` is the value used when mode is equal to
'constant'. A list of modes with length equal to the number of
axes can be provided to specify different modes for different
axes. Default is 'reflect'"""
_cval_doc = \
"""cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0"""
_origin_doc = \
"""origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0.0."""
_extra_arguments_doc = \
"""extra_arguments : sequence, optional
Sequence of extra positional arguments to pass to passed function"""
_extra_keywords_doc = \
"""extra_keywords : dict, optional
dict of extra keyword arguments to pass to passed function"""
docdict = {
'input': _input_doc,
'axis': _axis_doc,
'output': _output_doc,
'size_foot': _size_foot_doc,
'mode': _mode_doc,
'mode_multiple': _mode_multiple_doc,
'cval': _cval_doc,
'origin': _origin_doc,
'extra_arguments': _extra_arguments_doc,
'extra_keywords': _extra_keywords_doc,
}
docfiller = doccer.filldoc(docdict)
@docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import correlate1d
>>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([ 8, 26, 8, 12, 7, 28, 36, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if (len(weights) // 2 + origin < 0) or (len(weights) // 2 +
origin > len(weights)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return return_value
@docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
Examples
--------
>>> from scipy.ndimage import convolve1d
>>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([14, 24, 4, 13, 12, 36, 27, 0])
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
def _gaussian_kernel1d(sigma, order, radius):
"""
Computes a 1D Gaussian convolution kernel.
"""
if order < 0:
raise ValueError('order must be non-negative')
p = numpy.polynomial.Polynomial([0, 0, -0.5 / (sigma * sigma)])
x = numpy.arange(-radius, radius + 1)
phi_x = numpy.exp(p(x), dtype=numpy.double)
phi_x /= phi_x.sum()
if order > 0:
q = numpy.polynomial.Polynomial([1])
p_deriv = p.deriv()
for _ in range(order):
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
q = q.deriv() + q * p_deriv
phi_x *= q(x)
return phi_x
@docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : int, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. A positive order corresponds to convolution with
that derivative of a Gaussian.
%(output)s
%(mode)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
Examples
--------
>>> from scipy.ndimage import gaussian_filter1d
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
>>> import matplotlib.pyplot as plt
>>> np.random.seed(280490)
>>> x = np.random.randn(101).cumsum()
>>> y3 = gaussian_filter1d(x, 3)
>>> y6 = gaussian_filter1d(x, 6)
>>> plt.plot(x, 'k', label='original data')
>>> plt.plot(y3, '--', label='filtered, sigma=3')
>>> plt.plot(y6, ':', label='filtered, sigma=6')
>>> plt.legend()
>>> plt.grid()
>>> plt.show()
"""
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
# Since we are calling correlate, not convolve, revert the kernel
weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
return correlate1d(input, weights, axis, output, mode, cval, 0)
@docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : int or sequence of ints, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. A positive order
corresponds to convolution with that derivative of a Gaussian.
%(output)s
%(mode_multiple)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> from scipy.ndimage import gaussian_filter
>>> a = np.arange(50, step=2).reshape((5,5))
>>> a
array([[ 0, 2, 4, 6, 8],
[10, 12, 14, 16, 18],
[20, 22, 24, 26, 28],
[30, 32, 34, 36, 38],
[40, 42, 44, 46, 48]])
>>> gaussian_filter(a, sigma=1)
array([[ 4, 6, 8, 9, 11],
[10, 12, 14, 15, 17],
[20, 22, 24, 25, 27],
[29, 31, 33, 34, 36],
[35, 37, 39, 40, 42]])
>>> from scipy import misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = gaussian_filter(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order, mode in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.prewitt(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
return return_value
@docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.sobel(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
return return_value
@docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords=None):
"""
N-dimensional Laplace filter using a provided second derivative function.
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative2(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return return_value
@docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-dimensional Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.laplace(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multidimensional Laplace filter using gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> result = ndimage.gaussian_laplace(ascent, sigma=1)
>>> ax1.imshow(result)
>>> result = ndimage.gaussian_laplace(ascent, sigma=3)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,),
extra_keywords=kwargs)
@docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords=None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
numpy.sqrt(output, output, casting='unsafe')
else:
output[...] = input[...]
return return_value
@docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0, **kwargs):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Returns
-------
gaussian_gradient_magnitude : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode,
cval, **kwargs)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,),
extra_keywords=kwargs)
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw):
raise ValueError('invalid origin')
if not weights.flags.contiguous:
weights = weights.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
return return_value
@docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multi-dimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
input : array-like
input array to filter
weights : ndarray
array of weights, same number of dimensions as input
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output. Output array should have different name as
compared to input array to avoid aliasing errors.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
See Also
--------
convolve : Convolve an image with a kernel.
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
input : array_like
Input array to filter.
weights : array_like
Array of weights, same number of dimensions as input
output : ndarray, optional
The `output` parameter passes an array in which to store the
filter output. Output array should have different name as
compared to input array to avoid aliasing errors.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
the `mode` parameter determines how the array borders are
handled. For 'constant' mode, values beyond borders are set to be
`cval`. Default is 'reflect'.
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
origin : array_like, optional
The `origin` parameter controls the placement of the filter,
relative to the centre of the current element of the input.
Default of 0 is equivalent to ``(0,)*input.ndim``.
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
W is the `weights` kernel,
j is the n-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e. where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`.
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : int
length of uniform filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import uniform_filter1d
>>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([4, 3, 4, 1, 4, 6, 6, 3])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return return_value
@docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multi-dimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints, optional
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
%(origin)s
Returns
-------
uniform_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.uniform_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin, mode in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
This function implements the MINLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import minimum_filter1d
>>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([2, 0, 0, 0, 1, 1, 0, 0])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return return_value
@docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
maximum1d : ndarray, None
Maximum-filtered array with same shape as input.
None if `output` is not None
Notes
-----
This function implements the MAXLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import maximum_filter1d
>>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([8, 8, 8, 4, 9, 9, 9, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return return_value
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint, dtype=bool)
if not footprint.any():
raise ValueError("All-zero footprint is not supported.")
if footprint.all():
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin, mode in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
return return_value
@docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin)s
Returns
-------
minimum_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.minimum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin)s
Returns
-------
maximum_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.maximum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
return return_value
@docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional rank filter.
Parameters
----------
%(input)s
rank : int
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
rank_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.rank_filter(ascent, rank=42, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculate a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
median_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.median_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculate a multi-dimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
percentile_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a one-dimensional filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int function(double *input_line, npy_intp input_length,
double *output_line, npy_intp output_length,
void *user_data)
int function(double *input_line, intptr_t input_length,
double *output_line, intptr_t output_length,
void *user_data)
The calling function iterates over the lines of the input and output
arrays, calling the callback function at each line. The current line
is extended according to the border conditions set by the calling
function, and the result is copied into the array that is passed
through ``input_line``. The length of the input line (after extension)
is passed through ``input_length``. The callback function should apply
the filter and store the result in the array passed through
``output_line``. The length of the output line is passed through
``output_length``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = _ni_support._check_axis(axis, input.ndim)
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
filter_size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments,
extra_keywords)
return return_value
@docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a multi-dimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1D array of double values.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int callback(double *buffer, npy_intp filter_size,
double *return_value, void *user_data)
int callback(double *buffer, intptr_t filter_size,
double *return_value, void *user_data)
The calling function iterates over the elements of the input and
output arrays, calling the callback function at each element. The
elements within the footprint of the filter at the current element are
passed through the ``buffer`` parameter, and the number of elements
within the footprint through ``filter_size``. The calculated value is
returned in ``return_value``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return return_value
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/scipy/ndimage/filters.py
|
Python
|
mit
| 52,520
|
# -*- coding: utf-8 -*-
from django.core.management.base import NoArgsCommand, BaseCommand
from ffclub.newsletter.utils import generate_newsletter
class Command(BaseCommand):
help = 'Generate Newsletter Static Page'
option_list = NoArgsCommand.option_list
def handle(self, *args, **options):
issue = args[0]
generate_newsletter(issue)
|
elin-moco/ffclub
|
ffclub/newsletter/management/commands/gen_newsletter.py
|
Python
|
bsd-3-clause
| 366
|
#substitution cipher
#The user will supply an alphabet as a key.
import random
#You will need to write the methods to encode and decode given a key.
#-------------------------------------------------------------------
def encode(message, key):
alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
message = message.upper()
secret = ""
# To look at every letter in a message
for letter in message:
#To find the spot of a letter
spot = alpha.find(letter) #this is the numbered spot (0 - 25) of your letter in the alphabet.
#To print the spot letter in the key
if(spot >= 0 ):
secret = secret + key[spot]
else:
secret = secret + letter
return secret
def decode(message, key):
alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
message = message.upper()
plaintext = ""
return plaintext
#--------------------------------------------------------------------
#Generates a key using a password.
#The first letters of the alphabet come from the password. Duplicate letters are ignored
#The remaining letters of the alphabet are placed in order to generate the key
def generatePasswordKey(password =""):
alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
password = password.upper()
key = ""
for letter in password:
if key.find(letter) == -1: #letter not yet in key
key = key + letter
for letter in alpha:
if key.find(letter) == -1: #letter not yet in key
key = key + letter
return key
#Generates a random permutation of the alphabet and returns the key.
def generateRandomKey():
alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
key = ""
alphaList =[]
for letter in alpha:
alphaList.append(letter)
random.shuffle(alphaList)
for letter in alphaList:
key += letter
return key
def main():
message = input("Enter a message: ")
key = input("Enter a key: ")
secret = encode(message, key)
print ("Encrypted:", secret)
#plaintext = decode(secret, key)
#print ("Decrypted:", plaintext)
|
DerekBabb/CyberSecurity
|
Classic_Cryptography/code/SubstitutionCipher.py
|
Python
|
gpl-3.0
| 2,066
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2022 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`PankowPechmann2004`.
"""
import numpy as np
from scipy.constants import g as gravity
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, PGV, SA
class PankowPechmann2004(GMPE):
"""
Implements GMPE developed by Kris L. Pankow and James C. Pechmann
and published as "The SEA99 Ground-Motion Predictive Relations for
Extensional Tectonic Regimes: Revisions and a New Peak Ground Velocity
Relation"
Bulletin of the Seismological Society of America,
Vol. 94, No. 1, pp. 341–348, February 2004
"""
#: Supported tectonic region type is active shallow crust,
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: TO CHECK PSV!
DEFINED_FOR_INTENSITY_MEASURE_TYPES = {PGA, PGV, SA}
#: Supported intensity measure component is VECTORIAL
#: :attr:`~openquake.hazardlib.const.IMC.VECTORIAL`,
#: NOTE: The paper indicates it as Geometric mean (to check)
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = {
const.IMC.VECTORIAL, const.IMC.RANDOM_HORIZONTAL}
#: Supported standard deviation type is total
DEFINED_FOR_STANDARD_DEVIATION_TYPES = {const.StdDev.TOTAL}
#: Required site parameter is only Vs30
REQUIRES_SITES_PARAMETERS = {'vs30'}
#: Required rupture parameter is magnitude
REQUIRES_RUPTURE_PARAMETERS = {'mag'}
#: Required distance measure is Rjb distance
#: see paragraph 'Predictor Variables', page 6.
REQUIRES_DISTANCES = {'rjb'}
#: No independent tests - verification against paper for PGA and PGV,
#: but not for SA and Standard Deviations
non_verified = True
def compute(self, ctx: np.recarray, imts, mean, sig, tau, phi):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.compute>`
for spec of input and result values.
"""
for m, imt in enumerate(imts):
C = self.COEFFS[imt]
M = ctx.mag - 6
R = np.sqrt(ctx.rjb ** 2 + C['h'] ** 2)
# In the original formulation of the GMPE, distinction is only made
# between rock and soil ctx, which I assumed separated by the Vs30
# value of 910m/s (see equation 5 of the paper)
gamma = np.array([0 if v > 910. else 1 for v in ctx.vs30])
mean[m] = (C['b1'] +
C['b2'] * M +
C['b3'] * M ** 2 +
C['b5'] * np.log10(R) +
C['b6'] * gamma)
# Convert from base 10 to base e
mean[m] /= np.log10(np.e)
# Converting PSV to PSA
if imt != PGA() and imt != PGV():
omega = 2.*np.pi/imt.period
mean[m] += np.log(omega/(gravity*100))
# Computing standard deviation
if (self.DEFINED_FOR_INTENSITY_MEASURE_COMPONENT ==
'Random horizontal'):
# Using equation 8 of the paper,
# corrected as indicated in the erratum
Sr = np.sqrt(C['SlZ']**2 + (C['S3'] / np.sqrt(2))**2)
else:
Sr = C['SlZ']
# Convert from base 10 to base e
sig[m] = Sr / np.log10(np.e)
#: coefficient table provided by GSC (corrected as in the erratum)
COEFFS = CoeffsTable(sa_damping=5, table="""\
imt Bv b1 b2 b3 b5 b6 h SlZ S3
pgv 0 2.252 0.490 0 -1.196 0.195 7.06 0.246 0.075
pga -0.371 0.237 0.229 0 -1.052 0.174 7.27 0.203 0.094
0.100 -0.212 2.109 0.327 -0.098 -1.250 0.099 9.99 0.273 0.110
0.110 -0.211 2.120 0.318 -0.100 -1.207 0.099 9.84 0.265 0.111
0.120 -0.215 2.129 0.313 -0.101 -1.173 0.101 9.69 0.257 0.113
0.130 -0.221 2.138 0.309 -0.101 -1.145 0.103 9.54 0.252 0.114
0.140 -0.228 2.145 0.307 -0.100 -1.122 0.107 9.39 0.247 0.115
0.150 -0.238 2.152 0.305 -0.099 -1.103 0.111 9.25 0.242 0.116
0.160 -0.248 2.158 0.305 -0.098 -1.088 0.116 9.12 0.239 0.117
0.170 -0.258 2.163 0.305 -0.096 -1.075 0.121 8.99 0.237 0.118
0.180 -0.270 2.167 0.306 -0.094 -1.064 0.126 8.86 0.235 0.119
0.190 -0.281 2.172 0.308 -0.092 -1.055 0.131 8.74 0.234 0.119
0.200 -0.292 2.175 0.309 -0.090 -1.047 0.137 8.63 0.233 0.120
0.220 -0.315 2.182 0.313 -0.086 -1.036 0.147 8.41 0.231 0.121
0.240 -0.338 2.186 0.318 -0.082 -1.029 0.158 8.22 0.231 0.122
0.260 -0.360 2.190 0.323 -0.078 -1.024 0.168 8.04 0.231 0.123
0.280 -0.381 2.194 0.329 -0.073 -1.021 0.178 7.87 0.231 0.124
0.300 -0.401 2.196 0.334 -0.070 -1.020 0.188 7.72 0.232 0.125
0.320 -0.420 2.198 0.340 -0.066 -1.019 0.196 7.58 0.232 0.126
0.340 -0.438 2.199 0.345 -0.062 -1.020 0.205 7.45 0.233 0.126
0.360 -0.456 2.200 0.350 -0.059 -1.021 0.213 7.33 0.234 0.127
0.380 -0.472 2.200 0.356 -0.055 -1.023 0.221 7.22 0.236 0.128
0.400 -0.487 2.201 0.361 -0.052 -1.025 0.228 7.11 0.237 0.128
0.420 -0.502 2.201 0.365 -0.049 -1.027 0.235 7.02 0.238 0.129
0.440 -0.516 2.201 0.370 -0.047 -1.030 0.241 6.93 0.239 0.129
0.460 -0.529 2.201 0.375 -0.044 -1.032 0.247 6.85 0.241 0.129
0.480 -0.541 2.201 0.379 -0.042 -1.035 0.253 6.77 0.242 0.130
0.500 -0.553 2.199 0.384 -0.039 -1.038 0.259 6.70 0.243 0.130
0.550 -0.579 2.197 0.394 -0.034 -1.044 0.271 6.55 0.246 0.131
0.600 -0.602 2.195 0.403 -0.030 -1.051 0.281 6.42 0.249 0.132
0.650 -0.622 2.191 0.411 -0.026 -1.057 0.291 6.32 0.252 0.132
0.700 -0.639 2.187 0.418 -0.023 -1.062 0.299 6.23 0.254 0.133
0.750 -0.653 2.184 0.425 -0.020 -1.067 0.305 6.17 0.257 0.133
0.800 -0.666 2.179 0.431 -0.018 -1.071 0.311 6.11 0.260 0.134
0.850 -0.676 2.174 0.437 -0.016 -1.075 0.316 6.07 0.262 0.134
0.900 -0.685 2.170 0.442 -0.015 -1.078 0.320 6.04 0.264 0.134
0.950 -0.692 2.164 0.446 -0.014 -1.081 0.324 6.02 0.267 0.135
1.000 -0.698 2.160 0.450 -0.014 -1.083 0.326 6.01 0.269 0.135
1.100 -0.706 2.150 0.457 -0.013 -1.085 0.330 6.01 0.273 0.135
1.200 -0.710 2.140 0.462 -0.014 -1.086 0.332 6.03 0.278 0.136
1.300 -0.711 2.129 0.466 -0.015 -1.085 0.333 6.07 0.282 0.136
1.400 -0.709 2.119 0.469 -0.017 -1.083 0.331 6.13 0.286 0.136
1.500 -0.704 2.109 0.471 -0.019 -1.079 0.329 6.21 0.291 0.137
1.600 -0.697 2.099 0.472 -0.022 -1.075 0.326 6.29 0.295 0.137
1.700 -0.689 2.088 0.473 -0.025 -1.070 0.322 6.39 0.299 0.137
1.800 -0.679 2.079 0.472 -0.029 -1.063 0.317 6.49 0.303 0.137
1.900 -0.667 2.069 0.472 -0.032 -1.056 0.312 6.60 0.307 0.137
2.000 -0.655 2.059 0.471 -0.037 -1.049 0.306 6.71 0.312 0.137
""")
|
gem/oq-engine
|
openquake/hazardlib/gsim/pankow_pechmann_2004.py
|
Python
|
agpl-3.0
| 8,316
|
import itertools
import numbers
from . import util
from .attractiveness_finder import AttractivenessFinder
class Statistics(object):
def __init__(self, user, message_threads=None, filters=(),
attractiveness_finder=None):
self._user = user
self._message_threads = message_threads or set(itertools.chain(user.inbox,
user.outbox))
self._filters = filters
self._attractiveness_finder = attractiveness_finder or AttractivenessFinder()
def _thread_matches(self, message_thread):
return all(f(message_thread) for f in self._filters)
@util.cached_property
def threads(self):
return set(mt for mt in self._message_threads if self._thread_matches(mt))
@util.cached_property
def has_messages(self):
return self.with_filters(lambda mt: mt.has_messages)
@util.cached_property
def has_response(self):
return self.with_filters(lambda mt: mt.got_response)
@util.cached_property
def no_responses(self):
return self.with_filters(lambda mt: not mt.got_response)
@util.cached_property
def initiated(self):
return self.with_filters(lambda mt: mt.initiator == self._user.profile)
@util.cached_property
def received(self):
return self.with_filters(lambda mt: mt.initiator != self._user.profile)
@util.cached_property
def has_attractiveness(self):
return self.with_filters(lambda mt: self._attractiveness_finder.find_attractiveness(
mt.correspondent) is not None)
def time_filter(self, min_date=None, max_date=None):
def _time_filter(thread):
if min_date and min_date > thread.date:
return False
if max_date and max_date < thread.date:
return False
return True
return self.with_filters(_time_filter)
def attractiveness_filter(self, attractiveness_finder=None,
min_attractiveness=0, max_attractiveness=10000):
attractiveness_finder = attractiveness_finder or self._attractiveness_finder
def _attractiveness_filter(thread):
attractiveness = attractiveness_finder.find_attractiveness(
thread.correspondent
)
return (isinstance(attractiveness, numbers.Number) and
min_attractiveness <= attractiveness <= max_attractiveness)
return self.with_filters(_attractiveness_filter)
def with_filters(self, *filters, **kwargs):
message_threads = self.threads \
if kwargs.get('apply_filters_immediately', True) \
else self._message_threads
return type(self)(self._user, message_threads, self._filters + filters,
attractiveness_finder=self._attractiveness_finder)
@property
def count(self):
return len(self.threads)
@property
def response_rate(self):
return float(self.has_response.count)/self.count
def _average(self, function):
return sum(map(function, self.threads))/self.count
@property
def average_first_message_length(self):
return self._average(lambda thread: len(thread.messages[0].content))
@property
def average_conversation_length(self):
return self._average(lambda thread: thread.message_count)
def _average_attractiveness(self, attractiveness_finder=None):
attractiveness_finder = attractiveness_finder or self._attractiveness_finder
return self.has_attractiveness._average(lambda thread: (
attractiveness_finder.find_attractiveness(
thread.correspondent
))
)
@property
def average_attractiveness(self):
return self._average_attractiveness()
@property
def portion_initiated(self):
return self.initiated.count/self.count
@property
def portion_received(self):
return 1 - self.portion_initiated
|
IvanMalison/okcupyd
|
okcupyd/statistics.py
|
Python
|
mit
| 4,061
|
#!/usr/bin/python
#
# Expand the bundled cairo-1.0.gir.in files
# for use in Visual C++ builds of G-I
#
# Author: Fan, Chun-wei
# Date: January 20, 2014
#
# (Adapted from setup.py in
# $(glib_src_root)/build/win32/setup.py written by Shixin Zeng)
import os
import sys
import argparse
import replace
from gi_msvc_build_utils import parent_dir
def main(argv):
parser = argparse.ArgumentParser(description='Generate the complete cairo-1.0.gir')
parser.add_argument('--dllname',
required=True,
help='Full file name of the Cairo-GObject DLL for the Cairo Introspection File')
args = parser.parse_args()
# Get the srcroot and the path where the bundled .gir files reside in the package
srcroot = parent_dir(parent_dir(__file__))
preset_gir_path = os.path.join(srcroot, 'gir')
# Set up variables in cairo-1.0.gir.in to produce cairo-1.0.gir
replace.replace(os.path.join(preset_gir_path, 'cairo-1.0.gir.in'),
'cairo-1.0.gir.tmp',
'%CAIRO_GIR_PACKAGE%',
'cairo-gobject')
replace.replace('cairo-1.0.gir.tmp',
'cairo-1.0.gir',
'%CAIRO_SHARED_LIBRARY%',
args.dllname)
os.unlink('cairo-1.0.gir.tmp')
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
anthrotype/gobject-introspection
|
build/win32/gen-win32-cairo-gir.py
|
Python
|
gpl-2.0
| 1,359
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2013 by Yaco Sistemas <goinnn@gmail.com>
# 2015 by Pablo Martín <goinnn@gmail.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this programe. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from testing.unusual_fields.models import UnusualModel
class UnusualModelAdmin(admin.ModelAdmin):
pass
class ResourceAdmin(admin.ModelAdmin):
pass
admin.site.register(UnusualModel, UnusualModelAdmin)
|
django-inplaceedit/django-inplaceedit
|
testing/testing/unusual_fields/admin.py
|
Python
|
lgpl-3.0
| 1,048
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.