code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from django.conf.urls.defaults import url
from . import views
urlpatterns = [
url(r'^transaction/(?P<tx_id>[0-9]+)$', views.transaction_view, name='compropago_transaction_state'),
url(r'^webhook/$', views.web_hook_view, name='compropago_webhook'),
]
|
tzicatl/lfs-compropago
|
lfs_compropago/urls.py
|
Python
|
mit
| 260
|
"""Support for Gogogate2 garage Doors."""
import logging
from pygogogate2 import Gogogate2API as pygogogate2
import voluptuous as vol
from homeassistant.components.cover import SUPPORT_CLOSE, SUPPORT_OPEN, CoverDevice
from homeassistant.const import (
CONF_IP_ADDRESS,
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
STATE_CLOSED,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "gogogate2"
NOTIFICATION_ID = "gogogate2_notification"
NOTIFICATION_TITLE = "Gogogate2 Cover Setup"
COVER_SCHEMA = vol.Schema(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Gogogate2 component."""
ip_address = config.get(CONF_IP_ADDRESS)
name = config.get(CONF_NAME)
password = config.get(CONF_PASSWORD)
username = config.get(CONF_USERNAME)
mygogogate2 = pygogogate2(username, password, ip_address)
try:
devices = mygogogate2.get_devices()
if devices is False:
raise ValueError("Username or Password is incorrect or no devices found")
add_entities(MyGogogate2Device(mygogogate2, door, name) for door in devices)
except (TypeError, KeyError, NameError, ValueError) as ex:
_LOGGER.error("%s", ex)
hass.components.persistent_notification.create(
"Error: {}<br />"
"You will need to restart hass after fixing."
"".format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
class MyGogogate2Device(CoverDevice):
"""Representation of a Gogogate2 cover."""
def __init__(self, mygogogate2, device, name):
"""Initialize with API object, device id."""
self.mygogogate2 = mygogogate2
self.device_id = device["door"]
self._name = name or device["name"]
self._status = device["status"]
self._available = None
@property
def name(self):
"""Return the name of the garage door if any."""
return self._name if self._name else DEFAULT_NAME
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
return self._status == STATE_CLOSED
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return "garage"
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._available
def close_cover(self, **kwargs):
"""Issue close command to cover."""
self.mygogogate2.close_device(self.device_id)
def open_cover(self, **kwargs):
"""Issue open command to cover."""
self.mygogogate2.open_device(self.device_id)
def update(self):
"""Update status of cover."""
try:
self._status = self.mygogogate2.get_status(self.device_id)
self._available = True
except (TypeError, KeyError, NameError, ValueError) as ex:
_LOGGER.error("%s", ex)
self._status = None
self._available = False
|
leppa/home-assistant
|
homeassistant/components/gogogate2/cover.py
|
Python
|
apache-2.0
| 3,481
|
'''
Created by auto_sdk on 2015.04.21
'''
from aliyun.api.base import RestApi
class Rds20140815ModifyDBDescriptionRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.DBDescription = None
self.DBInstanceId = None
self.DBName = None
def getapiname(self):
return 'rds.aliyuncs.com.ModifyDBDescription.2014-08-15'
|
wanghe4096/website
|
aliyun/api/rest/Rds20140815ModifyDBDescriptionRequest.py
|
Python
|
bsd-2-clause
| 397
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 6584
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
VioletCoin/VioletCoin
|
contrib/pyminer/pyminer.py
|
Python
|
mit
| 6,434
|
from setuptools import setup, find_packages
import sys, os
version = '0.2'
def read(fname):
# read the contents of a text file
return open(os.path.join(os.path.dirname(__file__), fname)).read()
install_requires = [
'Django>= 1.3'
]
setup(name='django-twostepauth',
version=version,
description="Two-step authentication for Django",
long_description=read('README.rst'),
platforms=['OS Independent'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
keywords='django,authentication',
author='Nuno Maltez, Pedro Lima',
author_email='nuno@cognitiva.com, pedro@cognitiva.com',
url='https://bitbucket.org/cogni/django-twostepauth',
license='BSD',
packages=find_packages(exclude=['exampleapp']),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
entry_points="""
# -*- Entry points: -*-
""",
)
|
cognitiva/django-twostepauth
|
setup.py
|
Python
|
bsd-3-clause
| 1,209
|
# -*- coding: utf-8 -*-
from django.contrib.contenttypes.models import ContentType
def get_model(app_model_str):
app, model = app_model_str.split(".")
model_class = ContentType.objects.get(
app_label=app,
model=model.lower()
).model_class()
return(model_class)
|
math-a3k/django-ai
|
django_ai/base/utils.py
|
Python
|
lgpl-3.0
| 296
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# Perf-O-Meters for Check_MK's checks
#
# They are called with:
# 1. row -> a dictionary of the data row with at least the
# keys "service_perf_data", "service_state" and "service_check_command"
# 2. The check command (might be extracted from the performance data
# in a PNP-like manner, e.g if perfdata is "value=10.5;0;100.0;20;30 [check_disk]
# 3. The parsed performance data as a list of 7-tuples of
# (varname, value, unit, warn, crit, min, max)
def perfometer_esx_vsphere_datastores(row, check_command, perf_data):
used_mb = perf_data[0][1]
maxx = perf_data[0][-1]
# perf data might be incomplete, if trending perfdata is off...
uncommitted_mb = 0
for entry in perf_data:
if entry[0] == "uncommitted":
uncommitted_mb = entry[1]
break
perc_used = 100 * (float(used_mb) / float(maxx))
perc_uncommitted = 100 * (float(uncommitted_mb) / float(maxx))
perc_totally_free = 100 - perc_used - perc_uncommitted
h = '<table><tr>'
if perc_used + perc_uncommitted <= 100:
# Regular handling, no overcommitt
h += perfometer_td(perc_used, "#00ffc6")
h += perfometer_td(perc_uncommitted, "#eeccff")
h += perfometer_td(perc_totally_free, "white")
else:
# Visualize overcommitted space by scaling to total overcommittment value
# and drawing the capacity as red line in the perfometer
total = perc_used + perc_uncommitted
perc_used_bar = perc_used * 100 / total
perc_uncommitted_bar = perc_uncommitted * 100 / total
perc_free = (100 - perc_used) * 100 / total
h += perfometer_td(perc_used_bar, "#00ffc6")
h += perfometer_td(perc_free, "#eeccff")
h += perfometer_td(1, "red") # This line visualizes the capacity
h += perfometer_td(perc_uncommitted - perc_free, "#eeccff")
h += "</tr></table>"
legend = "%0.2f%%" % perc_used
if uncommitted_mb:
legend += " (+%0.2f%%)" % perc_uncommitted
return legend, h
perfometers["check_mk-esx_vsphere_datastores"] = perfometer_esx_vsphere_datastores
def perfometer_check_mk_mem_used(row, check_command, perf_data):
ram_used = None
for entry in perf_data:
# Get total and used RAM
if entry[0] == "ramused":
ram_used = float(entry[1]) # mem.include
ram_total = float(entry[6]) # mem.include
elif entry[0] == "mem_used":
ram_used = float(entry[1]) # mem.linux
elif entry[0] == "mem_total":
ram_total = float(entry[1]) # mem.linux
# Get total and used SWAP
elif entry[0] == "swapused":
swap_used = float(entry[1]) # mem.include
swap_total = float(entry[6]) # mem.include
elif entry[0] == "swap_used":
swap_used = float(entry[1]) # mem.linux
elif entry[0] == "swap_total":
swap_total = float(entry[1]) # mem.linux
if not ram_used:
return "",""
virt_total = ram_total + swap_total
virt_used = ram_used + swap_used
# paint used ram and swap
ram_color, swap_color = "#80ff40", "#008030"
h = '<table><tr>'
h += perfometer_td(100 * ram_used / virt_total, ram_color)
h += perfometer_td(100 * swap_used / virt_total, swap_color)
# used virtual memory < ram => show free ram and free total virtual memory
if virt_used < ram_total:
h += perfometer_td(100 * (ram_total - virt_used) / virt_total, "#fff")
h += perfometer_td(100 * (virt_total - ram_total) / virt_total, "#ccc")
# usage exceeds ram => show only free virtual memory
else:
h += perfometer_td(100 * (virt_total - virt_used), "#ccc")
h += "</tr></table>"
return "%d%%" % (100 * (virt_used / ram_total)), h
perfometers["check_mk-mem.used"] = perfometer_check_mk_mem_used
perfometers["check_mk-mem.linux"] = perfometer_check_mk_mem_used
perfometers["check_mk-aix_memory"] = perfometer_check_mk_mem_used
perfometers["check_mk-hr_mem"] = perfometer_check_mk_mem_used
def perfometer_check_mk_mem_win(row, check_command, perf_data):
# only show mem usage, do omit page file
color = "#5090c0"
ram_total = float(perf_data[0][6])
ram_used = float(perf_data[0][1])
perc = ram_used / ram_total * 100.0
return "%d%%" % perc, perfometer_linear(perc, color)
perfometers["check_mk-mem.win"] = perfometer_check_mk_mem_win
def perfometer_check_mk_kernel(row, check_command, perf_data):
rate = float(perf_data[0][1])
return "%.1f/s" % rate, perfometer_logarithmic(rate, 1000, 2, "#da6")
perfometers["check_mk-kernel"] = perfometer_check_mk_kernel
def perfometer_check_mk_ntp(row, check_command, perf_data, unit = "ms"):
offset = float(perf_data[0][1])
absoffset = abs(offset)
warn = float(perf_data[0][3])
crit = float(perf_data[0][4])
max = crit * 2
if absoffset > max:
absoffset = max
rel = 50 * (absoffset / max)
color = { 0: "#0f8", 1: "#ff2", 2: "#f22", 3: "#fa2" }[row["service_state"]]
h = '<table><tr>'
if offset > 0:
h += perfometer_td(50, "#fff")
h += perfometer_td(rel, color)
h += perfometer_td(50 - rel, "#fff")
else:
h += perfometer_td(50 - rel, "#fff")
h += perfometer_td(rel, color)
h += perfometer_td(50, "#fff")
h += '</tr></table>'
return "%.2f %s" % (offset, unit), h
perfometers["check_mk-ntp"] = perfometer_check_mk_ntp
perfometers["check_mk-ntp.time"] = perfometer_check_mk_ntp
perfometers["check_mk-chrony"] = perfometer_check_mk_ntp
perfometers["check_mk-systemtime"] = lambda r, c, p: perfometer_check_mk_ntp(r, c, p, "s")
def perfometer_ipmi_sensors(row, check_command, perf_data):
state = row["service_state"]
color = "#39f"
value = float(perf_data[0][1])
crit = savefloat(perf_data[0][4])
if not crit:
return "%d" % int(value), perfometer_logarithmic(value, 40, 1.2, color)
perc = 100 * value / crit
# some sensors get critical if the value is < crit (fans), some if > crit (temp)
h = '<table><tr>'
if value <= crit:
h += perfometer_td(perc, color)
h += perfometer_td(100 - perc, "#fff")
elif state == 0: # fan, OK
m = max(value, 10000.0)
perc_crit = 100 * crit / m
perc_value = 100 * (value-crit) / m
perc_free = 100 * (m - value) / m
h += perfometer_td(perc_crit, color)
h += perfometer_td(perc_value, color)
h += perfometer_td(perc_free, "#fff")
h += '</tr></table>'
if perf_data[0][0] == "temp":
unit = "°C"
else:
unit = ""
return (u"%d%s" % (int(value), unit)), h
perfometers["check_mk-ipmi_sensors"] = perfometer_ipmi_sensors
def perfometer_temperature(row, check_command, perf_data):
color = "#39f"
value = float(perf_data[0][1])
return u"%d °C" % int(value), perfometer_logarithmic(value, 40, 1.2, color)
perfometers["check_mk-nvidia.temp"] = perfometer_temperature
perfometers["check_mk-cisco_temp_sensor"] = perfometer_temperature
perfometers["check_mk-cisco_temp_perf"] = perfometer_temperature
perfometers["check_mk-cmctc_lcp.temp"] = perfometer_temperature
perfometers["check_mk-cmctc.temp"] = perfometer_temperature
perfometers["check_mk-smart.temp"] = perfometer_temperature
perfometers["check_mk-f5_bigip_chassis_temp"] = perfometer_temperature
perfometers["check_mk-f5_bigip_cpu_temp"] = perfometer_temperature
perfometers["check_mk-hp_proliant_temp"] = perfometer_temperature
perfometers["check_mk-akcp_sensor_temp"] = perfometer_temperature
perfometers["check_mk-akcp_daisy_temp"] = perfometer_temperature
perfometers["check_mk-fsc_temp"] = perfometer_temperature
perfometers["check_mk-viprinet_temp"] = perfometer_temperature
perfometers["check_mk-hwg_temp"] = perfometer_temperature
perfometers["check_mk-sensatronics_temp"] = perfometer_temperature
perfometers["check_mk-apc_inrow_temperature"] = perfometer_temperature
perfometers["check_mk-hitachi_hnas_temp"] = perfometer_temperature
perfometers["check_mk-dell_poweredge_temp"] = perfometer_temperature
perfometers["check_mk-dell_chassis_temp"] = perfometer_temperature
perfometers["check_mk-dell_om_sensors"] = perfometer_temperature
perfometers["check_mk-innovaphone_temp"] = perfometer_temperature
perfometers["check_mk-cmciii.temp"] = perfometer_temperature
perfometers["check_mk-ibm_svc_enclosurestats.temp"] = perfometer_temperature
perfometers["check_mk-wagner_titanus_topsense.temp"] = perfometer_temperature
perfometers["check_mk-enterasys_temp"] = perfometer_temperature
perfometers["check_mk-adva_fsp_temp"] = perfometer_temperature
perfometers["check_mk-allnet_ip_sensoric.temp"] = perfometer_temperature
perfometers["check_mk-qlogic_sanbox.temp"] = perfometer_temperature
perfometers["check_mk-bintec_sensors.temp"] = perfometer_temperature
perfometers["check_mk-knuerr_rms_temp"] = perfometer_temperature
perfometers["check_mk-arris_cmts_temp"] = perfometer_temperature
perfometers["check_mk-casa_cpu_temp"] = perfometer_temperature
perfometers["check_mk-rms200_temp"] = perfometer_temperature
perfometers["check_mk-juniper_screenos_temp"] = perfometer_temperature
perfometers["check_mk-lnx_thermal"] = perfometer_temperature
perfometers["check_mk-climaveneta_temp"] = perfometer_temperature
perfometers["check_mk-carel_sensors"] = perfometer_temperature
perfometers["check_mk-netscaler_health.temp"] = perfometer_temperature
perfometers["check_mk-kentix_temp"] = perfometer_temperature
perfometers["check_mk-ucs_bladecenter_fans.temp"] = perfometer_temperature
perfometers["check_mk-ucs_bladecenter_psu.chassis_temp"] = perfometer_temperature
perfometers["check_mk-cisco_temperature"] = perfometer_temperature
def perfometer_temperature_multi(row, check_command, perf_data):
display_value = -1
display_color = "#60f020"
for sensor, value, uom, warn, crit, min, max in perf_data:
value=saveint(value)
if value > display_value:
display_value=value
if display_value > saveint(warn):
display_color = "#FFC840"
if display_value > saveint(crit):
display_color = "#FF0000"
display_string = "%s °C" % display_value
return display_string, perfometer_linear(display_value, display_color)
perfometers["check_mk-brocade_mlx_temp"] = perfometer_temperature_multi
def perfometer_power(row, check_command, perf_data):
display_color = "#60f020"
value=savefloat(perf_data[0][1])
crit=savefloat(perf_data[0][4])
warn=savefloat(perf_data[0][3])
power_perc = value/crit*90 # critical is at 90% to allow for more than crit
if value > warn:
display_color = "#FFC840"
if value > crit:
display_color = "#FF0000"
display_string = "%.1f Watt" % value
return display_string, perfometer_linear(power_perc, display_color)
perfometers["check_mk-dell_poweredge_amperage.power"] = perfometer_power
perfometers["check_mk-dell_chassis_power"] = perfometer_power
perfometers["check_mk-dell_chassis_powersupplies"] = perfometer_power
perfometers["check_mk-hp-proliant_power"] = perfometer_power
def perfometer_power_simple(row, check_command, perf_data):
watt = int(perf_data[0][1])
text = "%s Watt" % watt
return text, perfometer_logarithmic(watt, 150, 2, "#60f020")
perfometers["check_mk-ibm_svc_enclosurestats.power"] = perfometer_power_simple
perfometers["check_mk-sentry_pdu"] = perfometer_power_simple
def perfometer_users(row, check_command, perf_data):
state = row["service_state"]
color = "#39f"
value = float(perf_data[0][1])
crit = savefloat(perf_data[0][4])
return u"%d users" % int(value), perfometer_logarithmic(value, 50, 2, color)
perfometers["check_mk-hitachi_hnas_cifs"] = perfometer_users
def perfometer_blower(row, check_command, perf_data):
rpm = saveint(perf_data[0][1])
perc = rpm / 10000.0 * 100.0
return "%d RPM" % rpm, perfometer_logarithmic(rpm, 2000, 1.5, "#88c")
perfometers["check_mk-cmctc_lcp.blower"] = perfometer_blower
def perfometer_lcp_regulator(row, check_command, perf_data):
value = saveint(perf_data[0][1])
return "%d%%" % value, perfometer_linear(value, "#8c8")
perfometers["check_mk-cmctc_lcp.regulator"] = perfometer_lcp_regulator
def perfometer_bandwidth(in_traffic, out_traffic, in_bw, out_bw, unit = "B"):
txt = []
have_bw = True
h = '<table><tr>'
traffic_multiplier = unit == "B" and 1 or 8
for name, bytes, bw, color in [
("in", in_traffic, in_bw, "#0e6"),
("out", out_traffic, out_bw, "#2af") ]:
if bw > 0.0:
rrate = bytes / bw
else:
have_bw = False
break
drate = max(0.02, rrate ** 0.5 ** 0.5)
rperc = 100 * rrate
dperc = 100 * drate
a = perfometer_td(dperc / 2, color)
b = perfometer_td(50 - dperc/2, "#fff")
if name == "in":
h += b + a # white left, color right
else:
h += a + b # color right, white left
txt.append("%.1f%%" % rperc)
if have_bw:
h += '</tr></table>'
return " ".join(txt), h
# make logarithmic perf-o-meter
MB = 1000000.0
text = "%s/s %s/s" % (
number_human_readable(in_traffic * traffic_multiplier, 1, unit), number_human_readable(out_traffic * traffic_multiplier, 1, unit))
return text, perfometer_logarithmic_dual(
in_traffic, "#0e6", out_traffic, "#2af", 1000000, 5)
def perfometer_check_mk_if(row, check_command, perf_data):
unit = "Bit/s" in row["service_plugin_output"] and "Bit" or "B"
return perfometer_bandwidth(
in_traffic = savefloat(perf_data[0][1]),
out_traffic = savefloat(perf_data[5][1]),
in_bw = savefloat(perf_data[0][6]),
out_bw = savefloat(perf_data[5][6]),
unit = unit
)
perfometers["check_mk-if"] = perfometer_check_mk_if
perfometers["check_mk-if64"] = perfometer_check_mk_if
perfometers["check_mk-if64adm"] = perfometer_check_mk_if
perfometers["check_mk-if64_tplink"] = perfometer_check_mk_if
perfometers["check_mk-winperf_if"] = perfometer_check_mk_if
perfometers["check_mk-vms_if"] = perfometer_check_mk_if
perfometers["check_mk-if_lancom"] = perfometer_check_mk_if
perfometers["check_mk-lnx_if"] = perfometer_check_mk_if
perfometers["check_mk-hpux_if"] = perfometer_check_mk_if
perfometers["check_mk-mcdata_fcport"] = perfometer_check_mk_if
perfometers["check_mk-esx_vsphere_counters.if"] = perfometer_check_mk_if
perfometers["check_mk-hitachi_hnas_fc_if"] = perfometer_check_mk_if
perfometers["check_mk-statgrab_net"] = perfometer_check_mk_if
perfometers["check_mk-netapp_api_if"] = perfometer_check_mk_if
perfometers["check_mk-if_brocade"] = perfometer_check_mk_if
perfometers["check_mk-ucs_bladecenter_if"] = perfometer_check_mk_if
def perfometer_check_mk_fc_port(row, check_command, perf_data):
unit = "B"
return perfometer_bandwidth(
in_traffic = savefloat(perf_data[0][1]),
out_traffic = savefloat(perf_data[1][1]),
in_bw = savefloat(perf_data[0][6]),
out_bw = savefloat(perf_data[1][6]),
unit = unit
)
perfometers["check_mk-fc_port"] = perfometer_check_mk_fc_port
def perfometer_check_mk_brocade_fcport(row, check_command, perf_data):
return perfometer_bandwidth(
in_traffic = savefloat(perf_data[0][1]),
out_traffic = savefloat(perf_data[1][1]),
in_bw = savefloat(perf_data[0][6]),
out_bw = savefloat(perf_data[1][6]),
)
perfometers["check_mk-brocade_fcport"] = perfometer_check_mk_brocade_fcport
perfometers["check_mk-qlogic_fcport"] = perfometer_check_mk_brocade_fcport
def perfometer_check_mk_cisco_qos(row, check_command, perf_data):
unit = "Bit/s" in row["service_plugin_output"] and "Bit" or "B"
return perfometer_bandwidth(
in_traffic = savefloat(perf_data[0][1]),
out_traffic = savefloat(perf_data[1][1]),
in_bw = savefloat(perf_data[0][5]) ,
out_bw = savefloat(perf_data[1][5]) ,
unit = unit
)
perfometers["check_mk-cisco_qos"] = perfometer_check_mk_cisco_qos
def perfometer_oracle_tablespaces(row, check_command, perf_data):
current = float(perf_data[0][1])
used = float(perf_data[1][1])
max = float(perf_data[2][1])
used_perc = used / max * 100
curr_perc = (current / max * 100) - used_perc
h = '<table><tr>'
h += perfometer_td(used_perc, "#f0b000");
h += perfometer_td(curr_perc, "#00ff80");
h += perfometer_td(100 - used_perc - curr_perc, "#80c0ff");
h += '</tr></table>'
return "%.1f%%" % used_perc, h
perfometers["check_mk-oracle_tablespaces"] = perfometer_oracle_tablespaces
def perfometer_check_oracle_dataguard_stats(row, check_command, perf_data):
perfdata_found = False
perfdata1 = ''
for data in perf_data:
if data[0] == "apply_lag":
color = '#80F000'
perfdata_found = True
days, rest = divmod(int(data[1]), 60*60*24)
hours, rest = divmod(rest, 60*60)
minutes, seconds = divmod(rest, 60)
perfdata1 = data[1]
if perfdata_found == False:
days = 0
hours = 0
minutes = 0
color = "#008f48";
return "%02dd %02dh %02dm" % (days, hours, minutes), perfometer_logarithmic(perfdata1, 2592000, 2, color)
perfometers["check_mk-oracle_dataguard_stats"] = perfometer_check_oracle_dataguard_stats
def perfometer_oracle_sessions(row, check_command, perf_data):
if check_command != "check_mk-oracle_sessions":
color = "#008f48";
unit = "";
else:
color = "#4800ff";
unit = "/h";
value = int(perf_data[0][1]);
return "%d%s" % (value, unit), perfometer_logarithmic(value, 50, 2, color);
perfometers["check_mk-oracle_sessions"] = perfometer_oracle_sessions
perfometers["check_mk-oracle_logswitches"] = perfometer_oracle_sessions
perfometers["check_mk-oracle_processes"] = perfometer_oracle_sessions
def perfometer_cpu_utilization(row, check_command, perf_data):
util = float(perf_data[0][1]) # is already percentage
color = "#60c080"
return "%.0f %%" % util, perfometer_linear(util, color)
#perfometer_linear(perc, color)
perfometers["check_mk-h3c_lanswitch_cpu"] = perfometer_cpu_utilization
perfometers["check_mk-winperf_processor.util"] = perfometer_cpu_utilization
perfometers["check_mk-netapp_cpu"] = perfometer_cpu_utilization
perfometers["check_mk-cisco_cpu"] = perfometer_cpu_utilization
perfometers["check_mk-juniper_cpu"] = perfometer_cpu_utilization
perfometers["check_mk-brocade_mlx.module_cpu"] = perfometer_cpu_utilization
perfometers["check_mk-hitachi_hnas_cpu"] = perfometer_cpu_utilization
perfometers["check_mk-hitachi_hnas_fpga"] = perfometer_cpu_utilization
perfometers["check_mk-hr_cpu"] = perfometer_cpu_utilization
perfometers["check_mk-innovaphone_cpu"] = perfometer_cpu_utilization
perfometers["check_mk-enterasys_cpu_util"] = perfometer_cpu_utilization
perfometers["check_mk-juniper_trpz_cpu_util"] = perfometer_cpu_utilization
perfometers["check_mk-ibm_svc_nodestats.cpu_util"] = perfometer_cpu_utilization
perfometers["check_mk-ibm_svc_systemstats.cpu_util"] = perfometer_cpu_utilization
perfometers["check_mk-sni_octopuse_cpu"] = perfometer_cpu_utilization
perfometers["check_mk-casa_cpu_util"] = perfometer_cpu_utilization
perfometers["check_mk-juniper_screenos_cpu"] = perfometer_cpu_utilization
def perfometer_ps_perf(row, check_command, perf_data):
perf_dict = dict([(p[0], float(p[1])) for p in perf_data])
try:
perc = perf_dict["pcpu"]
return "%.1f%%" % perc, perfometer_linear(perc, "#30ff80")
except:
return "", ""
perfometers["check_mk-ps"] = perfometer_ps_perf
perfometers["check_mk-ps.perf"] = perfometer_ps_perf
def perfometer_hpux_snmp_cs_cpu(row, check_command, perf_data):
h = '<table><tr>'
h += perfometer_td(float(perf_data[0][1]), "#60f020")
h += perfometer_td(float(perf_data[1][1]), "#ff6000")
h += perfometer_td(float(perf_data[2][1]), "#00d080")
h += perfometer_td(float(perf_data[3][1]), "#ffffff")
h += '</tr></table>'
sum = float(perf_data[0][1]) + float(perf_data[1][1]) + float(perf_data[2][1])
return "%.0f%%" % sum, h
perfometers["check_mk-hpux_snmp_cs.cpu"] = perfometer_hpux_snmp_cs_cpu
def perfometer_check_mk_uptime(row, check_command, perf_data):
seconds = int(float(perf_data[0][1]))
days, rest = divmod(seconds, 60*60*24)
hours, rest = divmod(rest, 60*60)
minutes, seconds = divmod(rest, 60)
return "%02dd %02dh %02dm" % (days, hours, minutes), perfometer_logarithmic(seconds, 2592000.0, 2, '#80F000')
perfometers["check_mk-uptime"] = perfometer_check_mk_uptime
perfometers["check_mk-snmp_uptime"] = perfometer_check_mk_uptime
perfometers["check_mk-esx_vsphere_counters.uptime"] = perfometer_check_mk_uptime
perfometers["check_mk-oracle_instance"] = perfometer_check_mk_uptime
def perfometer_check_mk_diskstat(row, check_command, perf_data):
# No Perf-O-Meter for legacy version of diskstat possible
if len(perf_data) < 2:
return "", ""
read_bytes = float(perf_data[0][1])
write_bytes = float(perf_data[1][1])
text = "%-.2f M/s %-.2f M/s" % \
(read_bytes / (1024*1024.0), write_bytes / (1024*1024.0))
return text, perfometer_logarithmic_dual(
read_bytes, "#60e0a0", write_bytes, "#60a0e0", 5000000, 10)
perfometers["check_mk-winperf_phydisk"] = perfometer_check_mk_diskstat
perfometers["check_mk-hpux_lunstats"] = perfometer_check_mk_diskstat
perfometers["check_mk-aix_diskiod"] = perfometer_check_mk_diskstat
perfometers["check_mk-mysql.innodb_io"] = perfometer_check_mk_diskstat
perfometers["check_mk-esx_vsphere_counters.diskio"] = perfometer_check_mk_diskstat
perfometers["check_mk-emcvnx_disks"] = perfometer_check_mk_diskstat
perfometers["check_mk-ibm_svc_nodestats.diskio"] = perfometer_check_mk_diskstat
perfometers["check_mk-ibm_svc_systemstats.diskio"] = perfometer_check_mk_diskstat
def perfometer_check_mk_iops_r_w(row, check_command, perf_data):
iops_r = float(perf_data[0][1])
iops_w = float(perf_data[1][1])
text = "%.0f IO/s %.0f IO/s" % (iops_r, iops_w)
return text, perfometer_logarithmic_dual(
iops_r, "#60e0a0", iops_w, "#60a0e0", 100000, 10)
perfometers["check_mk-ibm_svc_nodestats.iops"] = perfometer_check_mk_iops_r_w
perfometers["check_mk-ibm_svc_systemstats.iops"] = perfometer_check_mk_iops_r_w
def perfometer_check_mk_disk_latency_r_w(row, check_command, perf_data):
latency_r = float(perf_data[0][1])
latency_w = float(perf_data[1][1])
text = "%.1f ms %.1f ms" % (latency_r, latency_w)
return text, perfometer_logarithmic_dual(
latency_r, "#60e0a0", latency_w, "#60a0e0", 20, 10)
perfometers["check_mk-ibm_svc_nodestats.disk_latency"] = perfometer_check_mk_disk_latency_r_w
perfometers["check_mk-ibm_svc_systemstats.disk_latency"] = perfometer_check_mk_disk_latency_r_w
def perfometer_in_out_mb_per_sec(row, check_command, perf_data):
read_mbit = float(perf_data[0][1]) / 131072
write_mbit = float(perf_data[1][1]) / 131072
text = "%-.2fMb/s %-.2fMb/s" % (read_mbit, write_mbit)
return text, perfometer_logarithmic_dual(
read_mbit, "#30d050", write_mbit, "#0060c0", 100, 10)
perfometers["check_mk-openvpn_clients"] = perfometer_in_out_mb_per_sec
def perfometer_check_mk_hba(row, check_command, perf_data):
if len(perf_data) < 2:
return "", ""
read_blocks = int(perf_data[0][1])
write_blocks = int(perf_data[1][1])
text = "%d/s %d/s" % (read_blocks, write_blocks)
return text, perfometer_logarithmic_dual(
read_blocks, "#30d050", write_blocks, "#0060c0", 100000, 2)
perfometers["check_mk-emcvnx_hba"] = perfometer_check_mk_hba
def perfometer_check_mk_iops(row, check_command, perf_data):
iops = int(perf_data[0][1])
text = "%d/s" % iops
return text, perfometer_logarithmic(iops, 100000, 2, "#30d050")
perfometers["check_mk-emc_isilon_iops"] = perfometer_check_mk_iops
def perfometer_check_mk_printer_supply(row, check_command, perf_data):
left = savefloat(perf_data[0][1])
warn = savefloat(perf_data[0][3])
crit = savefloat(perf_data[0][4])
mini = savefloat(perf_data[0][5])
maxi = savefloat(perf_data[0][6])
if maxi < 0:
return "", "" # Printer does not supply a max value
# If there is no 100% given, calculate the percentage
if maxi != 100.0 and maxi != 0.0:
left = left * 100 / maxi
s = row['service_description'].lower()
fg_color = '#000000'
if 'black' in s or ("ink" not in s and s[-1] == 'k'):
colors = [ '#000000', '#6E6F00', '#6F0000' ]
if left >= 60:
fg_color = '#FFFFFF'
elif 'magenta' in s or s[-1] == 'm':
colors = [ '#FC00FF', '#FC7FFF', '#FEDFFF' ]
elif 'yellow' in s or s[-1] == 'y':
colors = [ '#FFFF00', '#FEFF7F', '#FFFFCF' ]
elif 'cyan' in s or s[-1] == 'c':
colors = [ '#00FFFF', '#7FFFFF', '#DFFFFF' ]
else:
colors = [ '#CCCCCC', '#ffff00', '#ff0000' ]
st = min(2, row['service_state'])
color = colors[st]
return "<font color=\"%s\">%.0f%%</font>" % (fg_color, left), perfometer_linear(left, color)
perfometers["check_mk-printer_supply"] = perfometer_check_mk_printer_supply
perfometers["check_mk-printer_supply_ricoh"] = perfometer_check_mk_printer_supply
def perfometer_printer_pages(row, check_command, perf_data):
color = "#909090"
return "%d" % int(perf_data[0][1]), perfometer_logarithmic(perf_data[0][1], 50000, 6, color)
perfometers["check_mk-printer_pages"] = perfometer_printer_pages
perfometers["check_mk-canon_pages"] = perfometer_printer_pages
def perfometer_msx_queues(row, check_command, perf_data):
length = int(perf_data[0][1])
state = row["service_state"]
if state == 1:
color = "#ffd020"
elif state == 2:
color = "#ff2020"
else:
color = "#6090ff"
return "%d" % length, perfometer_logarithmic(length, 100, 2, color)
perfometers["check_mk-winperf_msx_queues"] = perfometer_msx_queues
def perfometer_fileinfo(row, check_command, perf_data):
h = '<div class="stacked">'
texts = []
for i, color, base, scale, verbfunc in [
( 0, "#ffcc50", 1000000, 10, lambda v: number_human_readable(v, precision=0) ), # size
( 1, "#ccff50", 3600, 10, age_human_readable )]: # age
val = float(perf_data[i][1])
h += perfometer_logarithmic(val, base, scale, color)
texts.append(verbfunc(val))
h += '</div>'
return " / ".join(texts), h # perfometer_logarithmic(100, 200, 2, "#883875")
def perfometer_fileinfo_groups(row, check_command, perf_data):
h = '<div class="stacked">'
texts = []
for i, color, base, scale, verbfunc in [
( 2, "#aabb50", 10000, 10, lambda v: ("%d Tot") % v ), # count
( 1, "#ccff50", 3600, 10, age_human_readable )]: #age_newest
val = float(perf_data[i][1])
h += perfometer_logarithmic(val, base, scale, color)
texts.append(verbfunc(val))
h += '</div>'
return " / ".join(texts), h # perfometer_logarithmic(100, 200, 2, "#883875")
perfometers["check_mk-fileinfo"] = perfometer_fileinfo
perfometers["check_mk-fileinfo.groups"] = perfometer_fileinfo_groups
def perfometer_mssql_tablespaces(row, check_command, perf_data):
size = float(perf_data[0][1])
unallocated = float(perf_data[1][1])
reserved = float(perf_data[2][1])
data = float(perf_data[3][1])
indexes = float(perf_data[4][1])
unused = float(perf_data[5][1])
data_perc = data / reserved * 100
indexes_perc = indexes / reserved * 100
unused_perc = unused / reserved * 100
h = '<table><tr>'
h += perfometer_td(data_perc, "#80c0ff");
h += perfometer_td(indexes_perc, "#00ff80");
h += perfometer_td(unused_perc, "#f0b000");
h += '</tr></table>'
return "%.1f%%" % (data_perc + indexes_perc), h
perfometers["check_mk-mssql_tablespaces"] = perfometer_mssql_tablespaces
def perfometer_mssql_counters_cache_hits(row, check_command, perf_data):
perc = float(perf_data[0][1])
h = '<table><tr>'
h += perfometer_td(perc, "#69EA96");
h += perfometer_td(100 - perc, "#ffffff");
h += '</tr></table>'
return "%.1f%%" % perc, h
perfometers["check_mk-mssql_counters.cache_hits"] = perfometer_mssql_counters_cache_hits
def perfometer_hpux_tunables(row, check_command, perf_data):
varname, value, unit, warn, crit, minival, threshold = perf_data[0]
value = float(value)
threshold = float(threshold)
if warn != "" or crit != "":
warn = saveint(warn)
crit = saveint(crit)
# go red if we're over crit
if value > crit:
color = "#f44"
# yellow
elif value > warn:
color = "#f84"
else:
# all green lights
color = "#2d3"
else:
# use a brown-ish color if we have no levels.
# otherwise it could be "green" all the way to 100%
color = "#f4a460"
used = value / threshold * 100
return "%.0f%%" % (used), perfometer_linear(used, color)
perfometers["check_mk-hpux_tunables.nproc"] = perfometer_hpux_tunables
perfometers["check_mk-hpux_tunables.nkthread"] = perfometer_hpux_tunables
perfometers["check_mk-hpux_tunables.maxfiles_lim"] = perfometer_hpux_tunables
# this one still doesn't load. I need more test data to find out why.
perfometers["check_mk-hpux_tunables.semmni"] = perfometer_hpux_tunables
perfometers["check_mk-hpux_tunables.semmns"] = perfometer_hpux_tunables
perfometers["check_mk-hpux_tunables.shmseg"] = perfometer_hpux_tunables
perfometers["check_mk-hpux_tunables.nkthread"] = perfometer_hpux_tunables
perfometers["check_mk-hpux_tunables.nkthread"] = perfometer_hpux_tunables
# This will probably move to a generic DB one
def perfometer_mysql_capacity(row, check_command, perf_data):
color = { 0: "#68f", 1: "#ff2", 2: "#f22", 3: "#fa2" }[row["service_state"]]
size = float(perf_data[0][1])
# put the vertical middle at 40GB DB size, this makes small databases look small
# and big ones big. raise every 18 months by Moore's law :)
median = 40 * 1024 * 1024 * 1024
return "%s" % number_human_readable(size), perfometer_logarithmic(size, median, 10, color)
perfometers['check_mk-mysql_capacity'] = perfometer_mysql_capacity
def perfometer_vms_system_ios(row, check_command, perf_data):
h = '<div class="stacked">'
direct = float(perf_data[0][1])
buffered = float(perf_data[1][1])
h += perfometer_logarithmic(buffered, 10000, 3, "#38b0cf")
h += perfometer_logarithmic(direct, 10000, 3, "#38808f")
h += '</div>'
return "%.0f / %.0f" % (direct, buffered), h # perfometer_logarithmic(100, 200, 2, "#883875")
perfometers["check_mk-vms_system.ios"] = perfometer_vms_system_ios
def perfometer_check_mk_vms_system_procs(row, check_command, perf_data):
color = { 0: "#a4f", 1: "#ff2", 2: "#f22", 3: "#fa2" }[row["service_state"]]
return "%d" % int(perf_data[0][1]), perfometer_logarithmic(perf_data[0][1], 100, 2, color)
perfometers["check_mk-vms_system.procs"] = perfometer_check_mk_vms_system_procs
def perfometer_cmc_lcp(row, check_command, perf_data):
color = { 0: "#68f", 1: "#ff2", 2: "#f22", 3: "#fa2" }[row["service_state"]]
val = float(perf_data[0][1])
unit = str(perf_data[0][0])
return "%.1f %s" % (val,unit), perfometer_logarithmic(val, 4, 2, color)
perfometers["check_mk-cmc_lcp"] = perfometer_cmc_lcp
def perfometer_humidity(row, check_command, perf_data):
humidity = float(perf_data[0][1])
return "%3.1f% %" % humidity, perfometer_linear(humidity, '#6f2')
perfometers['check_mk-carel_uniflair_cooling'] = perfometer_humidity
perfometers['check_mk-cmciii.humidity'] = perfometer_humidity
perfometers['check_mk-allnet_ip_sensoric.humidity'] = perfometer_humidity
perfometers['check_mk-knuerr_rms_humidity'] = perfometer_humidity
def perfometer_eaton(row, command, perf):
return u"%s°C" % str(perf[0][1]), perfometer_linear(float(perf[0][1]), 'silver')
perfometers['check_mk-ups_eaton_enviroment'] = perfometer_eaton
def perfometer_battery(row, command, perf):
return u"%s%%" % str(perf[0][1]), perfometer_linear(float(perf[0][1]), '#C98D5C')
perfometers['check_mk-emc_datadomain_nvbat'] = perfometer_battery
def perfometer_ups_capacity(row, command, perf):
return "%0.2f%%" % float(perf[1][1]), perfometer_linear(float(perf[1][1]), '#B2FF7F')
perfometers['check_mk-ups_capacity'] = perfometer_ups_capacity
def perfometer_genu_screen(row, command, perf):
value = saveint(perf[0][1])
return "%d Sessions" % value , perfometer_logarithmic(value, 5000 , 2 , "#7109AA")
perfometers['check_mk-genu_pfstate'] = perfometer_genu_screen
def perfometer_simple_mem_usage(row, command, perf):
maxw = float(perf[0][6])
used_level = float(perf[0][1])
used_perc = (100.0 / maxw) * used_level
return "%d%%" % used_perc , perfometer_linear(used_perc, "#20cf80")
perfometers['check_mk-db2_mem'] = perfometer_simple_mem_usage
perfometers['check_mk-esx_vsphere_hostsystem.mem_usage'] = perfometer_simple_mem_usage
perfometers['check_mk-brocade_mlx.module_mem'] = perfometer_simple_mem_usage
perfometers['check_mk-innovaphone_mem'] = perfometer_simple_mem_usage
perfometers['check_mk-juniper_screenos_mem'] = perfometer_simple_mem_usage
perfometers['check_mk-netscaler_mem'] = perfometer_simple_mem_usage
perfometers['check_mk-arris_cmts_mem'] = perfometer_simple_mem_usage
perfometers["check_mk-juniper_trpz_mem"] = perfometer_simple_mem_usage
def perfometer_vmguest_mem_usage(row, command, perf):
used = float(perf[0][1])
return number_human_readable(used), perfometer_logarithmic(used, 1024*1024*2000, 2, "#20cf80")
perfometers['check_mk-esx_vsphere_vm.mem_usage'] = perfometer_vmguest_mem_usage
def perfometer_esx_vsphere_hostsystem_cpu(row, command, perf):
used_perc = float(perf[0][1])
return "%d%%" % used_perc, perfometer_linear(used_perc, "#60f020")
perfometers['check_mk-esx_vsphere_hostsystem.cpu_usage'] = perfometer_esx_vsphere_hostsystem_cpu
def perfometer_mq_queues(row, command, perf):
size = int(perf[0][1])
return "%s Messages" % size, perfometer_logarithmic(size, 1, 2, "#701141")
perfometers['check_mk-mq_queues'] = perfometer_mq_queues
perfometers['check_mk-websphere_mq_channels'] = perfometer_mq_queues
perfometers['check_mk-websphere_mq_queues'] = perfometer_mq_queues
def perfometer_apc_mod_pdu_modules(row, check_command, perf_data):
value = int(savefloat(perf_data[0][1]) * 100)
return "%skw" % perf_data[0][1], perfometer_logarithmic(value, 500, 2, "#3366CC")
perfometers["check_mk-apc_mod_pdu_modules"] = perfometer_apc_mod_pdu_modules
# Aiflow in l/s
def perfometer_airflow_ls(row, check_command, perf_data):
value = int(float(perf_data[0][1])*100)
return "%sl/s" % perf_data[0][1], perfometer_logarithmic(value, 1000, 2, '#3366cc')
perfometers["check_mk-apc_inrow_airflow"] = perfometer_airflow_ls
# Aiflow Deviation in Percent
def perfometer_airflow_deviation(row, check_command, perf_data):
value = float(perf_data[0][1])
return "%0.2f%%" % value, perfometer_linear(abs(value), "silver")
perfometers["check_mk-wagner_titanus_topsense.airflow_deviation"] = perfometer_airflow_deviation
def perfometer_fanspeed(row, check_command, perf_data):
value = float(perf_data[0][1])
return "%.2f%%" % value, perfometer_linear(value, "silver")
perfometers["check_mk-apc_inrow_fanspeed"] = perfometer_fanspeed
def perfometer_fanspeed_logarithmic(row, check_command, perf_data):
value = float(perf_data[0][1])
return "%d rpm" % value, perfometer_logarithmic(value, 5000, 2, "silver")
perfometers["check_mk-hitachi_hnas_fan"] = perfometer_fanspeed_logarithmic
perfometers["check_mk-bintec_sensors.fan"] = perfometer_fanspeed_logarithmic
def perfometer_check_mk_arcserve_backup(row, check_command, perf_data):
bytes = int(perf_data[2][1])
text = number_human_readable(bytes)
return text, perfometer_logarithmic(bytes, 1000 * 1024 * 1024 * 1024, 2, "#BDC6DE")
perfometers["check_mk-arcserve_backup"] = perfometer_check_mk_arcserve_backup
def perfometer_check_mk_ibm_svc_host(row, check_command, perf_data):
if len(perf_data) < 5:
return "", ""
h = '<table><tr>'
active = int(perf_data[0][1])
inactive = int(perf_data[1][1])
degraded = int(perf_data[2][1])
offline = int(perf_data[3][1])
other = int(perf_data[4][1])
total = active + inactive + degraded + offline + other
if active > 0:
perc_active = 100 * active / total
h += perfometer_td(perc_active, "#008000")
if inactive > 0:
perc_inactive = 100 * inactive / total
h += perfometer_td(perc_inactive, "#0000FF")
if degraded > 0:
perc_degraded = 100 * degraded / total
h += perfometer_td(perc_degraded, "#F84")
if offline > 0:
perc_offline = 100 * offline / total
h += perfometer_td(perc_offline, "#FF0000")
if other > 0:
perc_other = 100 * other / total
h += perfometer_td(perc_other, "#000000")
if total == 0:
h += perfometer_td(100, "white")
h += "</tr></table>"
return "%d active" % active, h
perfometers["check_mk-ibm_svc_host"] = perfometer_check_mk_ibm_svc_host
def perfometer_check_mk_ibm_svc_license(row, check_command, perf_data):
if len(perf_data) < 2:
return "", ""
licensed = float(perf_data[0][1])
used = float(perf_data[1][1])
if used == 0 and licensed == 0:
return "0 of 0 used", perfometer_linear(100, "white")
elif licensed == 0:
return "completely unlicensed", perfometer_linear(100, "silver")
else:
perc_used = 100 * used / licensed
return "%0.2f %% used" % perc_used, perfometer_linear(perc_used, "silver")
perfometers["check_mk-ibm_svc_license"] = perfometer_check_mk_ibm_svc_license
def perfometer_check_mk_ibm_svc_cache(row, check_command, perf_data):
h = '<table><tr>'
write_cache_pc = int(perf_data[0][1])
total_cache_pc = int(perf_data[1][1])
read_cache_pc = total_cache_pc - write_cache_pc
free_cache_pc = 100 - total_cache_pc
h += perfometer_td(write_cache_pc, "#60e0a0")
h += perfometer_td(read_cache_pc, "#60a0e0")
h += perfometer_td(free_cache_pc, "white")
h += "</tr></table>"
return "%d %% write, %d %% read" % (write_cache_pc, read_cache_pc), h
perfometers["check_mk-ibm_svc_nodestats.cache"] = perfometer_check_mk_ibm_svc_cache
perfometers["check_mk-ibm_svc_systemstats.cache"] = perfometer_check_mk_ibm_svc_cache
def perfometer_licenses_percent(row, check_command, perf_data):
licenses = float(perf_data[0][1])
max_avail = float(perf_data[0][6])
used_perc = 100.0 * licenses / max_avail
return "%.0f%% used" % used_perc, perfometer_linear( used_perc, 'orange' )
perfometers['check_mk-innovaphone_licenses'] = perfometer_licenses_percent
perfometers['check_mk-citrix_licenses'] = perfometer_licenses_percent
def perfometer_smoke_percent(row, command, perf):
used_perc = float(perf[0][1])
return "%0.6f%%" % used_perc, perfometer_linear(used_perc, "#404040")
perfometers['check_mk-wagner_titanus_topsense.smoke'] = perfometer_smoke_percent
def perfometer_chamber_deviation(row, command, perf):
chamber_dev = float(perf[0][1])
return "%0.6f%%" % chamber_dev, perfometer_linear(chamber_dev, "#000080")
perfometers['check_mk-wagner_titanus_topsense.chamber_deviation'] = perfometer_chamber_deviation
def perfometer_cache_hit_ratio(row, check_command, perf_data):
hit_ratio = float(perf_data[0][1]) # is already percentage
color = "#60f020"
return "%.2f %% hits" % hit_ratio, perfometer_linear(hit_ratio, color)
perfometers["check_mk-zfs_arc_cache"] = perfometer_cache_hit_ratio
perfometers["check_mk-zfs_arc_cache.l2"] = perfometer_cache_hit_ratio
def perfometer_current(row, check_command, perf_data):
display_color = "#50f020"
value=savefloat(perf_data[0][1])
crit=savefloat(perf_data[0][4])
warn=savefloat(perf_data[0][3])
current_perc = value/crit*90 # critical is at 90% to allow for more than crit
if value > warn:
display_color = "#FDC840"
if value > crit:
display_color = "#FF0000"
display_string = "%.1f Ampere" % value
return display_string, perfometer_linear(current_perc, display_color)
perfometers["check_mk-adva_fsp_current"] = perfometer_current
def perfometer_raritan_pdu_inlet(row, check_command, perf_data):
display_color = "#50f020"
cap = perf_data[0][0].split('-')[-1]
value = float(perf_data[0][1])
unit = perf_data[0][2]
display_str = perf_data[0][1] + " " + unit
if cap.startswith("rmsCurrent"):
return display_str, perfometer_logarithmic(value, 1, 2, display_color)
elif cap.startswith("unbalancedCurrent"):
return display_str, perfometer_linear(value, display_color)
elif cap.startswith("rmsVoltage"):
return display_str, perfometer_logarithmic(value, 500, 2, display_color)
elif cap.startswith("activePower"):
return display_str, perfometer_logarithmic(value, 20, 2, display_color)
elif cap.startswith("apparentPower"):
return display_str, perfometer_logarithmic(value, 20, 2, display_color)
elif cap.startswith("powerFactor"):
return display_str, perfometer_linear(value * 100, display_color)
elif cap.startswith("activeEnergy"):
return display_str, perfometer_logarithmic(value, 100000, 2, display_color)
elif cap.startswith("apparentEnergy"):
return display_str, perfometer_logarithmic(value, 100000, 2, display_color)
return "unimplemented" , perfometer_linear(0, "#ffffff")
perfometers["check_mk-raritan_pdu_inlet"] = perfometer_raritan_pdu_inlet
perfometers["check_mk-raritan_pdu_inlet_summary"] = perfometer_raritan_pdu_inlet
def perfometer_raritan_pdu_outletcount(row, check_command, perf_data):
outletcount = float(perf_data[0][1])
return "%d" % outletcount, perfometer_logarithmic(outletcount, 20, 2, "#da6")
perfometers["check_mk-raritan_pdu_outletcount"] = perfometer_raritan_pdu_outletcount
def perfometer_allnet_ip_sensoric_tension(row, check_command, perf_data):
display_color = "#50f020"
value = float(perf_data[0][1])
return value, perfometer_linear(value, display_color)
perfometers["check_mk-allnet_ip_sensoric.tension"] = perfometer_allnet_ip_sensoric_tension
def perfometer_pressure(row, check_command, perf_data):
pressure = float(perf_data[0][1])
return "%0.5f bars" % pressure, perfometer_logarithmic(pressure, 1, 2, "#da6")
perfometers['check_mk-allnet_ip_sensoric.pressure'] = perfometer_pressure
def perfometer_voltage(row, check_command, perf_data):
color = "#808000"
value = float(perf_data[0][1])
return "%0.3f V" % value, perfometer_logarithmic(value, 12, 2, color)
perfometers["check_mk-bintec_sensors.voltage"] = perfometer_voltage
def perfometer_dbmv(row, check_command, perf_data):
dbmv = float(perf_data[0][1])
return "%.1f dBmV" % dbmv, perfometer_logarithmic(dbmv, 50, 2, "#da6")
perfometers["check_mk-docsis_channels_downstream"] = perfometer_dbmv
perfometers["check_mk-docsis_cm_status"] = perfometer_dbmv
def perfometer_veeam_client(row, check_command, perf_data):
for graph in perf_data:
if graph[0] == "avgspeed":
avgspeed_bytes = int(graph[1])
if graph[0] == "duration":
duration_secs = int(graph[1])
h = perfometer_logarithmic_dual_independent(avgspeed_bytes, '#54b948', 10000000, 2, duration_secs, '#2098cb', 500, 2)
avgspeed = bytes_human_readable(avgspeed_bytes)
# Return Value always as minutes
duration = age_human_readable(duration_secs, True)
return "%s/s %s" % (avgspeed, duration), h
perfometers["check_mk-veeam_client"] = perfometer_veeam_client
def perfometer_ups_outphase(row, check_command, perf_data):
load = saveint(perf_data[2][1])
return "%d%%" % load, perfometer_linear(load, "#8050ff")
perfometers["check_mk-ups_socomec_outphase"] = perfometer_ups_outphase
def perfometer_el_inphase(row, check_command, perf_data):
for data in perf_data:
if data[0] == "power":
power = savefloat(data[1])
return "%.0f W" % power, perfometer_linear(power, "#8050ff")
perfometers["check_mk-raritan_pdu_inlet"] = perfometer_el_inphase
perfometers["check_mk-raritan_pdu_inlet_summary"] = perfometer_el_inphase
perfometers["check_mk-ucs_bladecenter_psu.switch_power"] = perfometer_el_inphase
def perfometer_f5_bigip_vserver(row, check_command, perf_data):
connections = int(perf_data[0][1])
return str(connections), perfometer_logarithmic(connections, 100, 2, "#46a")
perfometers["check_mk-f5_bigip_vserver"] = perfometer_f5_bigip_vserver
#.
# .--Obsolete------------------------------------------------------------.
# | ___ _ _ _ |
# | / _ \| |__ ___ ___ | | ___| |_ ___ |
# | | | | | '_ \/ __|/ _ \| |/ _ \ __/ _ \ |
# | | |_| | |_) \__ \ (_) | | __/ || __/ |
# | \___/|_.__/|___/\___/|_|\___|\__\___| |
# | |
# +----------------------------------------------------------------------+
# | These Perf-O-Meters are not longer needed since thery are being |
# | handled by the new metrics.py module. |
# '----------------------------------------------------------------------'
def perfometer_check_mk(row, check_command, perf_data):
# make maximum value at 90sec.
exectime = float(perf_data[0][1])
perc = min(100.0, exectime / 90.0 * 100)
if exectime < 10:
color = "#2d3"
elif exectime < 30:
color = "#ff4"
elif exectime < 60:
color = "#f84"
else:
color = "#f44"
return "%.1f s" % exectime, perfometer_linear(perc, color)
perfometers["check-mk"] = perfometer_check_mk
def perfometer_check_mk_cpu_loads(row, check_command, perf_data):
color = { 0: "#68f", 1: "#ff2", 2: "#f22", 3: "#fa2" }[row["service_state"]]
load = float(perf_data[0][1])
return "%.1f" % load, perfometer_logarithmic(load, 4, 2, color)
perfometers["check_mk-cpu.loads"] = perfometer_check_mk_cpu_loads
perfometers["check_mk-ucd_cpu_load"] = perfometer_check_mk_cpu_loads
perfometers["check_mk-statgrab_load"] = perfometer_check_mk_cpu_loads
perfometers["check_mk-hpux_cpu"] = perfometer_check_mk_cpu_loads
perfometers["check_mk-blade_bx_load"] = perfometer_check_mk_cpu_loads
def perfometer_check_mk_df(row, check_command, perf_data):
varname, value, unit, warn, crit, minn, maxx = perf_data[0]
hours_left = None
for data in perf_data:
if data[0] == "trend_hoursleft":
hours_left = float(data[1])
break
perc_used = 100 * (float(value) / float(maxx))
perc_free = 100 - float(perc_used)
if hours_left or hours_left == 0:
h = '<div class="stacked"><table><tr>'
h += perfometer_td(perc_used, "#00ffc6")
h += perfometer_td(perc_free, "white")
h += "</tr></table><table><tr>"
if hours_left == -1.0:
h += perfometer_td(100, "#39c456")
h += '</tr></table></div>'
return "%0.1f%% / not growing" % (perc_used), h
days_left = hours_left / 24
if days_left > 30:
color = "#39c456" # OK
elif days_left < 7:
color = "#d94747" # CRIT
else:
color = "#d7d139" # WARN
half = math.log(30.0, 2) # value to be displayed at 50%
pos = 50 + 10.0 * (math.log(days_left, 2) - half)
if pos < 2:
pos = 2
if pos > 98:
pos = 98
h += perfometer_td(100 - pos, color)
h += perfometer_td(pos, "white")
h += '</tr></table></div>'
if days_left > 365:
days_left = " >365"
else:
days_left = "%0.1f" % days_left
return "%0.1f%%/%s days left" % (perc_used, days_left), h
else:
h = '<table><tr>'
h += perfometer_td(perc_used, "#00ffc6")
h += perfometer_td(perc_free, "white")
h += "</tr></table>"
return "%0.2f %%" % perc_used, h
perfometers["check_mk-df"] = perfometer_check_mk_df
perfometers["check_mk-vms_df"] = perfometer_check_mk_df
perfometers["check_mk-vms_diskstat.df"] = perfometer_check_mk_df
perfometers["check_disk"] = perfometer_check_mk_df
perfometers["check_mk-df_netapp"] = perfometer_check_mk_df
perfometers["check_mk-df_netapp32"] = perfometer_check_mk_df
perfometers["check_mk-zfsget"] = perfometer_check_mk_df
perfometers["check_mk-hr_fs"] = perfometer_check_mk_df
perfometers["check_mk-oracle_asm_diskgroup"] = perfometer_check_mk_df
perfometers["check_mk-mysql_capacity"] = perfometer_check_mk_df
perfometers["check_mk-esx_vsphere_counters.ramdisk"] = perfometer_check_mk_df
perfometers["check_mk-hitachi_hnas_span"] = perfometer_check_mk_df
perfometers["check_mk-hitachi_hnas_volume"] = perfometer_check_mk_df
perfometers["check_mk-emcvnx_raidgroups.capacity"] = perfometer_check_mk_df
perfometers["check_mk-emcvnx_raidgroups.capacity_contiguous"] = perfometer_check_mk_df
perfometers["check_mk-ibm_svc_mdiskgrp"] = perfometer_check_mk_df
perfometers["check_mk-fast_lta_silent_cubes.capacity"] = perfometer_check_mk_df
perfometers["check_mk-fast_lta_volumes"] = perfometer_check_mk_df
perfometers["check_mk-libelle_business_shadow.archive_dir"] = perfometer_check_mk_df
perfometers["check_mk-netapp_api_volumes"] = perfometer_check_mk_df
perfometers["check_mk-df_zos"] = perfometer_check_mk_df
def perfometer_check_mk_kernel_util(row, check_command, perf_data):
h = '<table><tr>'
h += perfometer_td(perf_data[0][1], "#6f2")
h += perfometer_td(perf_data[1][1], "#f60")
h += perfometer_td(perf_data[2][1], "#0bc")
total = sum([float(p[1]) for p in perf_data])
h += perfometer_td(100.0 - total, "white")
h += "</tr></table>"
return "%d%%" % total, h
perfometers["check_mk-kernel.util"] = perfometer_check_mk_kernel_util
perfometers["check_mk-vms_sys.util"] = perfometer_check_mk_kernel_util
perfometers["check_mk-vms_cpu"] = perfometer_check_mk_kernel_util
perfometers["check_mk-ucd_cpu_util"] = perfometer_check_mk_kernel_util
perfometers["check_mk-lparstat_aix.cpu_util"] = perfometer_check_mk_kernel_util
perfometers["check_mk-emc_isilon_cpu"] = perfometer_check_mk_kernel_util
def perfometer_check_mk_cpu_threads(row, check_command, perf_data):
color = { 0: "#a4f", 1: "#ff2", 2: "#f22", 3: "#fa2" }[row["service_state"]]
return "%d" % int(perf_data[0][1]), perfometer_logarithmic(perf_data[0][1], 400, 2, color)
perfometers["check_mk-cpu.threads"] = perfometer_check_mk_cpu_threads
def perfometer_docsis_snr(row, check_command, perf_data):
dbmv = float(perf_data[0][1])
return "%.1f dB" % dbmv, perfometer_logarithmic(dbmv, 50, 2, "#ad6")
perfometers["check_mk-docsis_channels_upstream"] = perfometer_docsis_snr
|
oposs/check_mk_mirror
|
web/plugins/perfometer/check_mk.py
|
Python
|
gpl-2.0
| 52,956
|
#!/usr/bin/python
#
# File: qasm2tex.py
# Date: 22-Mar-04
# Author: I. Chuang <ichuang@mit.edu>
#
# Python program to convert qasm to latex (and optionally generate ps/epsf/pdf)
#
# Usage: qasm2tex in.qasm
#
# Outputs: latex file (to stdout)
#
# Notes: qasm instructions are as follows. Lines begining with '#'
# are comments. All other lines should be of the form <b>op<b>args
# where <b> is whitespace, and op-args pairs are:
#
# qubit name,initval
# cbit name,initval
# measure qubit
# H qubit
# X qubit
# Y qubit
# Z qubit
# S qubit
# T qubit
# nop qubit
# zero qubit
# discard qubit
# slash qubit
# dmeter qubit
# cnot ctrl,target
# c-z ctrl,target
# c-x ctrl,target
# toffoli ctrl1,ctrl2,target
# ZZ b1,b2
# SS b1,b2
# swap b1,b2
# Utwo b1,b2
# space qubit
# def opname,nctrl,texsym
# defbox opname,nbits,nctrl,texsym
#
# Where:
#
# def - define a custom controlled single-qubit operation, with
# opname = name of gate operation
# nctrl = number of control qubits
# texsym = latex symbol for the target qubit operation
# defbox - define a custom muti-qubit-controlled multi-qubit operation, with
# opname = name of gate operation
# nbits = number of qubits it acts upon
# nctrl = number of control qubits
# texsym = latex symbol for the target qubit operation
# qubit - define a qubit with a certain name (all qubits must be defined)
# name = name of the qubit, eg q0 or j2 etc
# initval = initial value (optional), eg 0
# cbit - define a cbit with a certain name (all cbits must be defined)
# name = name of the cbit, eg c0
# initval = initial value (optional), eg 0
# H - single qubit operator ("hadamard")
# X - single qubit operator
# Y - single qubit operator
# Z - single qubit operator
# S - single qubit operator
# T - single qubit operator
# nop - single qubit operator, just a wire
# space - single qubit operator, just an empty space
# dmeter - measure qubit, showing "D" style meter instead of rectangular box
# zero - replaces qubit with |0> state
# discard - discard qubit (put "|" vertical bar on qubit wire)
# slash - put slash on qubit wire
# measure - measurement of qubit, gives classical bit (double-wire) output
# cnot - two-qubit CNOT
# c-z - two-qubit controlled-Z gate
# c-x - two-qubit controlled-X gate
# swap - two-qubit swap operation
# Utwo - two-qubit operation U
# ZZ - two-qubit controlled-Z gate, symmetric notation; two filled circles
# SS - two-qubit gate, symmetric; open squares
# toffoli - three-qubit Toffoli gate
#
#-----------------------------------------------------------------------------
#
# Patched 02-Nov-04 by P. Oscar Boykin to allow arbitrarily large circuits
# (old version used to run out when chr() returned a non-alpha character)
#
#-----------------------------------------------------------------------------
#
# $Log: qasm2tex.py,v $
# Revision 1.21 2004/03/25 15:36:59 ike
# special case for bullet target
# switched ZZ to using filled circles
# SS is now the two-qubit op with open squares
#
# Revision 1.20 2004/03/25 05:32:35 ike
# added comments for new gates
#
# Revision 1.19 2004/03/25 05:09:54 ike
# moved qubit labels to def's
# added ZZ, slash, discard, dmeter
#
# Revision 1.18 2004/03/24 20:49:03 ike
# more comments
#
# Revision 1.17 2004/03/24 20:47:08 ike
# comments for S,T
#
# Revision 1.16 2004/03/24 20:40:58 ike
# comments for swap
#
# Revision 1.15 2004/03/24 20:40:30 ike
# added swap gate
#
# Revision 1.14 2004/03/24 20:16:18 ike
# comments
#
# Revision 1.13 2004/03/24 20:15:27 ike
# multi-qubit controlled multi-qubit gates now work
# added space
#
# Revision 1.12 2004/03/24 19:24:30 ike
# muliqubit gate targets can now be in any order
# error checking is done for duplicate targets
#
# Revision 1.11 2004/03/24 18:04:17 ike
# added multi qubit gates
#
# Revision 1.10 2004/03/24 16:38:55 ike
# added zero, S,T,U
#
# Revision 1.9 2004/03/24 04:39:36 ike
# added copyright
#
# Revision 1.8 2004/03/24 03:22:43 ike
# added more comments
#
# Revision 1.7 2004/03/24 03:12:55 ike
# qubits can now have initial values
#
# Revision 1.6 2004/03/24 00:36:06 ike
# multiple controls on qubit now work
#
# Revision 1.5 2004/03/23 23:59:44 ike
# custom gate def's now work; see test4.qasm
#
# Revision 1.4 2004/03/23 23:42:35 ike
# new version with global gate definition table
#
# Revision 1.3 2004/03/23 23:13:36 ike
# working version, switches between single and double wires automatically
#
# Revision 1.2 2004/03/23 21:05:29 ike
# rcs log
#
#-----------------------------------------------------------------------------
#
# Copyright (c) 2004 Isaac L. Chuang <ichuang@mit.edu>
#
# This file, qasm2tex, is part of qasm2circ
#
# qasm2tex is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# qasm2tex is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qasm2tex; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -*-Python-*-
import re
import sys
import os
import fileinput
from struct import *
from string import *
#-----------------------------------------------------------------------------
def do_error(msg): # global error handler
sys.stderr.write('ERROR: ' + msg + '\n')
sys.exit(-1)
#-----------------------------------------------------------------------------
def num2name(num): # convert a number to a name
if( num == 0 ):
return "";
elif( num <= 26 ):
return chr(num+64)
else:
return chr( (num % 26) + 64) + num2name(num/26)
#-----------------------------------------------------------------------------
class qgate: # quantum gate class
def __init__(self,op,args,linenum):
self.name = op # gate name
self.args = args # arguments to gate
self.qubits = args.split(',') # name of qubits we act upon
self.timeseq = 0 # time sequence number
self.id = 0 # gate ID number (unique)
self.endtex = '' # latex to output after xymatrix
self.xy = {} # gate xy ID table
self.yloc = {} # y-location of qubits we act upon
self.wiretype = {} # wire type for this gate/qubit
self.linenum = linenum # line number of input where gate used
# do a quick syntax check to make sure number of operands is correct
# and that the gate exists
if not GateMasterDef.has_key(self.name):
s = (self.linenum, self.name, self.args)
do_error("[qgate] OOPS! line %d unknown gate op %s on %s" % s)
# retrieve information about gate from master table
(self.nbits, self.nctrl, self.texsym) = GateMasterDef[self.name]
# check if the operand has the right number of bits
if (len(self.qubits) != self.nbits): # right # bits?
s = (self.linenum, self.name + " " + self.args)
do_error("[qgate] OOPS! line %d wrong number of qubits in %s" % s)
# check for duplicate operands
x = self.qubits
if ([ x.count(qb) for qb in x ].count(1) < len(x)):
s = (self.linenum, self.name + " " + self.args)
do_error("[qgate] OOPS! line %d duplicate bit operands in %s" % s)
def set_bittype(self,qb,cbit): # set qubit type (cbit/qbit)
self.wiretype[qb] = cbit
def make_id(self,qb2idx): # make gate ID's, eg gAB
for qb in self.qubits:
self.xy[qb] = self.xyid(qb2idx[qb])
self.yloc[qb] = qb2idx[qb] # y (vertical) location of qubit
def xid(self): # return ID string for gate timestep
return('g%s' % (num2name(self.timeseq)))
def xyid(self,qubitnum): # return ID string for gate/qubit
return('%s%s%s' % (self.xid(),'x',num2name(qubitnum)))
def latex(self): # output latex/xypic/xyqcirc for gate
def defid(k,op): # latex def for given gate & qubit
myid = self.xy[self.qubits[k]]
wires = ['\w','\W'] # \w = single, \W = double wire
mywire = wires[self.wiretype[self.qubits[k]]]
return('\def\%s{%s%s\A{%s}}' % (myid,op,mywire,myid))
def get_wiretype(qubits): # figure out wire type for verticals
# if any control is classical (double-wire) then all should be
if(sum([ self.wiretype[x] for x in qubits])>0):
wt = '=' # wire type = cbit
else:
wt = '-' # wire type = qubit
return(wt)
def do_multiqubit(nbits,nctrl,u): # multiple-qubit operation
# first do target qubits (big box)
s = []
targets = self.qubits[nctrl:]
ytab = [ self.yloc[qb] for qb in targets ]
idx = ytab.index(min(ytab)) # find which qubit is first
qb = targets[idx] # handle first qubit specially
ytop = min(ytab) # remember y location & ID of top qubit
xytop = self.xy[qb]
ybot = max(ytab) # and bottom
xybot = self.xy[targets[ytab.index(ybot)]]
myid = self.xy[qb] # top qubit gets \gnqubit{u}{ddd...}
dstr = 'd'*(nbits-nctrl-1)
wires = ['\w','\W'] # \w = single, \W = double wire
w = wires[self.wiretype[qb]]
s.append(r'\def\%s{\gnqubit{%s}{%s}%s\A{%s}}'%(myid,u,dstr,w,myid))
firstqb = qb
for qb in targets: # loop over target bits
if (qb==firstqb): # skip first qubit
continue
myid = self.xy[qb] # non-first bits get \gspace{u}
w = wires[self.wiretype[qb]]
s.append(r'\def\%s{\gspace{%s}%s\A{%s}}' % (myid,u,w,myid))
# now do control qubits
controls = self.qubits[:nctrl]
for k in range(nctrl): # loop over all control qubits
s.append(defid(k,r'\b')) # bullets on controls
# create vertical wires
# if any control is classical (double-wire) then all should be
wt = get_wiretype(controls)
for qb in controls: # loop over all ctrl qubits
# endtex = latex commands which appear after xymatrix body
# such as the vertical wires
if self.yloc[qb] < ytop:
self.endtex += r'\ar@{%c}"%s";"%s"' %(wt,xytop,self.xy[qb])
else:
self.endtex += r'\ar@{%c}"%s";"%s"' %(wt,xybot,self.xy[qb])
# done with multi-qubit op
return(join(s,'\n')) # return with latex def's
def ctrl_op(nctrl,u): # controlled operation
s = []
for k in range(nctrl): # loop over all control qubits
s.append(defid(k,r'\b')) # bullets on controls
s.append(defid(nctrl,u)) # add target op
s = join(s,'\n')
# create vertical wires
qbtarget = self.xy[self.qubits[-1]]
wt = get_wiretype(self.qubits[0:-1])
for qb in self.qubits[0:-1]: # loop over all ctrl-target pairs
# endtex = latex commands which appear after xymatrix body
# such as the vertical wires
self.endtex += r'\ar@{%c}"%s";"%s"' % (wt,qbtarget,self.xy[qb])
return(s)
def check_multi_qubit_gate_targets(nctrl):
# gate targets (not controls) must be consecutive bits
ytab = [self.yloc[qb] for qb in self.qubits[nctrl:]]
ytab.sort()
for k in range(len(ytab)-1):
if (ytab[k+1]-ytab[k]!=1):
s = (self.linenum, self.name + " " + self.args)
do_error('[qgate] OOPS! line %d multi-qubit gate targets not consecutive %s' % s)
def double_sym_gate(texsym):
wt = get_wiretype(self.qubits)
qb0 = self.xy[self.qubits[0]]
qb1 = self.xy[self.qubits[1]]
self.endtex += r'\ar@{%c}"%s";"%s"' % (wt,qb0,qb1)
return(defid(0,texsym) + '\n' + defid(1,texsym))
# main routine to generate latex
(nbits, nctrl, texsym) = GateMasterDef[self.name]
if(self.name=='zero'): # special for zero: no wire
myid = self.xy[self.qubits[0]]
return('\def\%s{%s\A{%s}}' % (myid,texsym,myid))
if(self.name=='space'): # special for space: no wire
myid = self.xy[self.qubits[0]]
return('\def\%s{\A{%s}}' % (myid,myid))
if(self.name=='ZZ'): # special for ZZ gate
return(double_sym_gate(texsym))
if(self.name=='SS'): # special for SS gate
return(double_sym_gate(texsym))
if(self.name=='swap'): # special for swap gate
return(double_sym_gate(texsym))
if(nbits-nctrl>1): # multi-qubit gate
check_multi_qubit_gate_targets(nctrl)
return(do_multiqubit(nbits,nctrl,texsym))
if(nctrl==0):
return(defid(0,texsym)) # single qubit op
else:
return(ctrl_op(nctrl,texsym)) # controlled-single-qubit op
#-----------------------------------------------------------------------------
class qasm_parser: # parser for qasm; inputs lines, returns
# tables of comments, names, and gates
def __init__(self,fp):
self.nametab = [] # table of bit names
self.gatetab = [] # table of gates
self.typetab = [] # table of bit types (0=qubit, 1=cbit)
self.comments = '' # string with comments from original qasm file
linenum = 0 # line number counting, for error messages
for line in fp: # loop over input lines
linenum += 1 # line number counter
if(line[0]=='#'):
self.comments += line
continue
else:
self.comments += "% " + line # optional - include all input
# qubit spec - syntax: qubit name
m = re.compile('\s+qubit\s+(\S+)').search(line)
if(m):
self.nametab.append(m.group(1)) # add name
self.typetab.append(0) # add as qubit
# print "qubit: %s" % m.group(1)
continue
# cbit spec - syntax: cbit name
m = re.compile('\s+cbit\s+(\S+)').search(line)
if(m):
self.nametab.append(m.group(1)) # add name
self.typetab.append(1) # add as cbit
# print "cbit: %s" % m.group(1)
continue
# gate definition spec - syntax: def name,num-ctrl-qubits,texsym
# this is for controlled single-qubit operations only
m = re.compile("\s+def\s+(\S+),'(.*)'").search(line)
if(m):
(name,nctrl) = m.group(1).split(',')
tex = m.group(2)
if(tex=='bullet'): # special for bullet, no \op{}
texsym = r'\b'
elif(tex.find(r'\dmeter')>=0): # if meas, don't put in \op{}
texsym = tex
else:
texsym = '\op{%s}' % tex
nctrl = int(nctrl)
if GateMasterDef.has_key(name):
print "[qasm_parser] oops! duplicate def for op %s" % line
else:
GateMasterDef[name] = (nctrl+1, nctrl, texsym)
# print "definition: %s" % m.group(1)
continue
# box-gate definition spec - syntax: defbox name,nbits,nctrl,texsym
# this is for multi-qubit controlled multi-qubit operations
m = re.compile("\s+defbox\s+(\S+),'(.*)'").search(line)
if(m):
(name,nbits,nctrl) = m.group(1).split(',')
texsym = m.group(2)
nbits = int(nbits)
nctrl = int(nctrl)
if GateMasterDef.has_key(name):
print "[qasm_parser] oops! duplicate def for op %s" % line
else:
GateMasterDef[name] = (nbits, nctrl, texsym)
# print "definition: %s" % m.group(1)
continue
# gate acting on qubits
m = re.compile('\s+(\S+)\s+(\S+)').search(line)
if(m):
op = m.group(1)
args = m.group(2)
self.gatetab.append(qgate(op,args,linenum))
#-----------------------------------------------------------------------------
class qcircuit: # quantum circuit class
def __init__(self,bitnames,typetab):
self.initval = {} # qubit initial values
self.is_cbit = {} # flags to see if a bit is qubit or cbit
self.setnames(bitnames,typetab) # set names & types of qubits
self.qbtab = {} # initialize qubit table (assoc array)
# each element in qbtab holds an array
# of IDs for gates acting on that qubit
self.qb2idx = {} # translate from name to index
k = 1
for name in self.qubitnames: # create index for name->idx translate
self.qbtab[name] = [] # array of gates on this qubit
self.qb2idx[name] = k # index for this qubit
# print "%% [qcircuit] qubit %s (id=%d)" % (name,k)
k += 1
self.optab = [] # initialize table of gates
self.circuit = [] # initialize table of circuit timesteps
self.matrix = [] # initialize null circuit matrix
def setnames(self,names,types): # set bit names and types (+ initval)
def do_name(n,type): # set names & extract initial values
tmp = n.split(',') # check for initial value
self.qubitnames.append(tmp[0]) # add to name list
self.is_cbit[tmp[0]] = type # 0 = qubit, 1 = cbit
if(len(tmp)>1):
self.initval[tmp[0]] = tmp[1] # add initial value for qubit
self.qubitnames = []
for k in range(len(names)): # loop over qubit names
do_name(names[k],types[k]) # process name and type
def add_op(self,gate): # add gate to circuit
self.optab.append(gate) # put gate into table of gates
gate.id = len(self.optab)-1 # give the gate a unique ID number
# print "%% adding op %s(%s) IDs: %s" % (gate.name,gate.args,
# join(gate.xy.values(),','))
for qb in gate.qubits: # put gate on qubits it acts upon
if(self.qbtab.has_key(qb)==0): # check for syntax error
s = (qb,gate.linenum,gate.name + ' ' + gate.args)
do_error('[qcircuit] No qubit %s in line %d: "%s"' % s)
if(len(self.qbtab[qb])==0): # if first gate, timestep = 1
ts = 1
else: # otherwise, timestep = last+1
ts = self.optab[self.qbtab[qb][-1]].timeseq+1
self.qbtab[qb].append(gate.id)
if(ts>gate.timeseq): # set timeseq number for gate
gate.timeseq = ts # to be largest of its qubits
gate.make_id(self.qb2idx) # make gate ID's (do after timestep)
if(gate.timeseq > len(self.circuit)): # add new timestep if necessary
self.circuit.append([])
self.circuit[gate.timeseq-1].append(gate.id) # add gate to circuit
def output_sequence(self): # output time-sequence of gates
k = 1 # timestep counter
for timestep in self.circuit: # loop over timesteps
print "%% Time %02d:" % k
for g in timestep: # loop over events in this timestep
op = self.optab[g]
print "%% Gate %02d %s(%s)" % (op.id,
op.name,op.args)
k += 1
print ""
def output_matrix(self): # output circuit matrix, of qubit vs timestep
if(len(self.matrix)==0): # make circuit matrix if not done
self.make_matrix()
k = 0
print "% Qubit circuit matrix:\n%"
for y in self.matrix: # loop over qubits
print '%% %s: %s' % (self.qubitnames[k],join(y,', '))
k += 1
def make_matrix(self): # make circuit matrix, of qubit vs timestep
self.matrix = []
ntime = len(self.circuit)+2 # total number of timsteps
wires = ['n','N'] # single or double wire for qubit/cbit
for qb in self.qubitnames: # loop over qubits
self.matrix.append([]) # start with empty row
k = 1 # timestep counter
cbit = self.is_cbit[qb] # cbit=0 means qubit type (single wire)
gidtab = self.qbtab[qb] # table of gate IDs
for gid in gidtab: # loop over IDs for gates on qubit
g = self.optab[gid] # gate with that ID
while(g.timeseq>k): # output null ops until gate acts
self.matrix[-1].append('%s ' % wires[cbit])
k += 1 # increment timestep
g.set_bittype(qb,cbit) # set qubit type (cbit/qubit)
self.matrix[-1].append(g.xy[qb])
k += 1 # increment timestep
if(g.texsym=='\meter'): # if measurement gate then cbit=1
cbit = 1
if(g.texsym.find('\dmeter')>=0): # alternative measurement gate
cbit = 1
if(g.name=='measure'): # if measurement gate then cbit=1
cbit = 1 # switch to double wire
if(g.name=='zero'): # if zero gate then cbit=0
cbit = 0 # switch to single wire
while(k<ntime): # fill in null ops until end of circuit
k += 1 # unless last g was space or discard
if((g.name!='space')&(g.name!='discard')):
self.matrix[-1].append('%s ' % wires[cbit])
def qb2label(self,qb): # make latex format label for qubit name
m = re.compile('([A-z]+)(\d+)').search(qb)
if(m): # make num subscript if name = alpha+numbers
label = "%s_{%s}" % (m.group(1),m.group(2))
else:
label = qb # othewise use just what was specified
if(self.is_cbit[qb]):
if(self.initval.has_key(qb)): # qubit has initial value?
label = r' {%s = %s}' % (label,self.initval[qb])
else:
label = r' {%s}' % (label)
else:
if(self.initval.has_key(qb)): # qubit has initial value?
label = r'\qv{%s}{%s}' % (label,self.initval[qb])
else:
label = r' \q{%s}' % (label)
return(label)
def output_latex(self): # output latex with xypic for circuit
if(len(self.matrix)==0): # make circuit matrix if not done
self.make_matrix()
print ''
print r'\documentclass[11pt]{article}' # output latex header
print r'\input{xyqcirc.tex}'
# now go through all gates and output latex definitions
print ""
print "% definitions for the circuit elements\n"
for g in self.optab:
print g.latex() # output \def\gXY{foo} lines
# now output defs for qubit labels and initial states
print ""
print "% definitions for bit labels and initial states\n"
for j in range(len(self.matrix)):
qb = self.qubitnames[j]
print r"\def\b%s{%s}" % (num2name(j+1),self.qb2label(qb))
# now output circuit
print ""
# print r'\xymatrix@R=15pt@C=12pt{'
print "% The quantum circuit as an xymatrix\n"
print r'\xymatrix@R=5pt@C=10pt{'
ntime = len(self.circuit)+2 # total number of timsteps
j = 0 # counter for timestep
stab = [] # table of strings
for y in self.matrix: # loop over qubits
qb = self.qubitnames[j] # qubit name
ops = join(map(lambda(x):'\\'+x,y),' &')
stab.append(r'\b%s & %s' % (num2name(j+1),ops))
j += 1 # increment timestep
stab[0] = ' ' + stab[0]
print join(stab,'\n\\\\ ')
# now go through all gates and output final latex (eg vertical lines)
print "%"
print "% Vertical lines and other post-xymatrix latex\n%"
for g in self.optab:
if(g.endtex!=""):
print g.endtex # output end latex commands
# now end the xymatrix & latex document
print r'}'
print ''
print r'\end{document}'
#-----------------------------------------------------------------------------
# master gate definition table (global definition)
#
# Format = name : (nbits, nctrl, texsym)
#
# where:
#
# name - text name of the gate op
# nbits - total number of qubits gate acts upon
# nctrl - number of control qubits
# texsym - latex code for the operator target qubit
#
# This model assumes single qubit operations and multiple-qubit controlled
# single qubit operations.
#
# Note that GateMasterDef is modified by qasm_parser
GateMasterDef = {'cnot' : ( 2 , 1 , '\o' ),
'c-z' : ( 2 , 1 , '\op{Z}' ),
'c-x' : ( 2 , 1 , '\op{X}' ),
'measure' : ( 1 , 0 , '\meter' ),
'dmeter' : ( 1 , 0 , '\dmeter{}' ),
'h' : ( 1 , 0 , '\op{H}' ),
'H' : ( 1 , 0 , '\op{H}' ),
'X' : ( 1 , 0 , '\op{X}' ),
'Y' : ( 1 , 0 , '\op{Y}' ),
'Z' : ( 1 , 0 , '\op{Z}' ),
'S' : ( 1 , 0 , '\op{S}' ),
'T' : ( 1 , 0 , '\op{T}' ),
'U' : ( 1 , 0 , '\op{U}' ),
'ZZ' : ( 2 , 0 , r'\b' ),
'SS' : ( 2 , 0 , '\sq' ),
'zero' : ( 1 , 0 , '\z' ),
'nop' : ( 1 , 0 , '*-{}' ),
'discard' : ( 1 , 0 , '\discard' ),
'slash' : ( 1 , 0 , '\slash' ),
'space' : ( 1 , 0 , '' ),
'swap' : ( 2 , 0 , r'\t' ),
'toffoli' : ( 3 , 2 , r'\o' ),
'Utwo' : ( 2 , 0 , 'U' )
}
#-----------------------------------------------------------------------------
# main program
qp = qasm_parser(fileinput.input()) # parse the qasm file
qc = qcircuit(qp.nametab,qp.typetab) # initialize the circuit
for g in qp.gatetab: # add each gate to the circuit
qc.add_op(g)
print qp.comments.replace('#','%') # output comments
qc.output_sequence() # output time sequence of ops
qc.output_matrix() # output matrix of qubit/timesteps
qc.output_latex() # output latex code
|
eschmidgall/qasm2circ
|
original_qasm2circ-v1.4/qasm2tex.py
|
Python
|
gpl-2.0
| 27,389
|
# Copyright (c) 2013 Bull.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from climate.db import api as db_api
from climate import tests
class DBApiTestCase(tests.TestCase):
"""Test case for DB API."""
# TODO(sbauza) : Extend methods to CRUD lease
def setUp(self):
super(DBApiTestCase, self).setUp()
self.db_api = db_api
self.patch(self.db_api.IMPL, "setup_db").return_value = True
self.patch(self.db_api.IMPL, "drop_db").return_value = True
def test_setup_db(self):
self.assertTrue(self.db_api.setup_db())
def test_drop_db(self):
self.assertTrue(self.db_api.drop_db())
|
paramite/blazar
|
climate/tests/db/test_api.py
|
Python
|
apache-2.0
| 1,146
|
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn import datasets, metrics, cross_validation
import skflow
# Load dataset.
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respecitvely.
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=200)
# Fit and predict.
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
|
shareactorIO/pipeline
|
source.ml/jupyterhub.ml/notebooks/zz_old/TensorFlow/SkFlow_DEPRECATED/iris.py
|
Python
|
apache-2.0
| 1,182
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
try:
from library.bigip_gtm_facts import Parameters
from library.bigip_gtm_facts import ServerParameters
from library.bigip_gtm_facts import PoolParameters
from library.bigip_gtm_facts import WideIpParameters
from library.bigip_gtm_facts import ModuleManager
from library.bigip_gtm_facts import ServerFactManager
from library.bigip_gtm_facts import PoolFactManager
from library.bigip_gtm_facts import TypedPoolFactManager
from library.bigip_gtm_facts import UntypedPoolFactManager
from library.bigip_gtm_facts import WideIpFactManager
from library.bigip_gtm_facts import TypedWideIpFactManager
from library.bigip_gtm_facts import UntypedWideIpFactManager
from library.bigip_gtm_facts import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from f5.bigip.tm.gtm.pool import A
from f5.utils.responses.handlers import Stats
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_gtm_pool import Parameters
from ansible.modules.network.f5.bigip_gtm_pool import ServerParameters
from ansible.modules.network.f5.bigip_gtm_pool import PoolParameters
from ansible.modules.network.f5.bigip_gtm_pool import WideIpParameters
from ansible.modules.network.f5.bigip_gtm_pool import ModuleManager
from ansible.modules.network.f5.bigip_gtm_pool import ServerFactManager
from ansible.modules.network.f5.bigip_gtm_pool import PoolFactManager
from ansible.modules.network.f5.bigip_gtm_pool import TypedPoolFactManager
from ansible.modules.network.f5.bigip_gtm_pool import UntypedPoolFactManager
from ansible.modules.network.f5.bigip_gtm_pool import WideIpFactManager
from ansible.modules.network.f5.bigip_gtm_pool import TypedWideIpFactManager
from ansible.modules.network.f5.bigip_gtm_pool import UntypedWideIpFactManager
from ansible.modules.network.f5.bigip_gtm_pool import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from f5.bigip.tm.gtm.pool import A
from f5.utils.responses.handlers import Stats
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class FakeStatResource(object):
def __init__(self, obj):
self.entries = obj
class FakeARecord(A):
def __init__(self, *args, **kwargs):
attrs = kwargs.pop('attrs', {})
for key, value in iteritems(attrs):
setattr(self, key, value)
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
include=['pool'],
filter='name.*'
)
p = Parameters(params=args)
assert p.include == ['pool']
assert p.filter == 'name.*'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_get_typed_pool_facts(self, *args):
set_module_args(dict(
include='pool',
password='passsword',
server='localhost',
user='admin'
))
fixture1 = load_fixture('load_gtm_pool_a_collection.json')
fixture2 = load_fixture('load_gtm_pool_a_example_stats.json')
collection = [FakeARecord(attrs=x) for x in fixture1['items']]
stats = Stats(FakeStatResource(fixture2['entries']))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tfm = TypedPoolFactManager(module=module)
tfm.read_collection_from_device = Mock(return_value=collection)
tfm.read_stats_from_device = Mock(return_value=stats.stat)
tm = PoolFactManager(module=module)
tm.version_is_less_than_12 = Mock(return_value=False)
tm.get_manager = Mock(return_value=tfm)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.get_manager = Mock(return_value=tm)
mm.gtm_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert 'pool' in results
assert len(results['pool']) > 0
assert 'load_balancing_mode' in results['pool'][0]
|
ravibhure/ansible
|
test/units/modules/network/f5/test_bigip_gtm_facts.py
|
Python
|
gpl-3.0
| 5,694
|
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp.deprecated import dsl, components
# Advanced function
# Demonstrates imports, helper functions and multiple outputs
from typing import NamedTuple
@components.create_component_from_func
def html_visualization(gcsPath: str) -> NamedTuple('VisualizationOutput', [('mlpipeline_ui_metadata', 'UI_metadata')]):
import json
metadata = {
'outputs': [{
'type': 'web-app',
'storage': 'inline',
'source': '<h1>Hello, World!</h1>',
}]
}
# Temporarily hack for empty string scenario: https://github.com/kubeflow/pipelines/issues/5830
if gcsPath and gcsPath != 'BEGIN-KFP-PARAM[]END-KFP-PARAM':
metadata.get('outputs').append({
'type': 'web-app',
'storage': 'gcs',
'source': gcsPath,
})
from collections import namedtuple
visualization_output = namedtuple('VisualizationOutput', [
'mlpipeline_ui_metadata'])
return visualization_output(json.dumps(metadata))
@dsl.pipeline(
name='html-pipeline',
description='A sample pipeline to generate HTML for UI visualization.'
)
def html_pipeline():
html_visualization_task = html_visualization("")
# html_visualization_task = html_visualization_op("gs://jamxl-kfp-bucket/v2-compatible/html/hello-world.html")
# Replace the parameter gcsPath with actual google cloud storage path with html file.
# For example: Upload hello-world.html in the same folder to gs://bucket-name/hello-world.html.
# Then uncomment the following line.
# html_visualization_task = html_visualization_op("gs://bucket-name/hello-world.html")
|
kubeflow/pipelines
|
samples/core/visualization/html.py
|
Python
|
apache-2.0
| 2,218
|
#!/usr/bin/env python
# Copyright (C) 2015 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage JJB yaml feature implementation
import copy
import fnmatch
import io
import itertools
import logging
import pkg_resources
import jenkins_jobs.local_yaml as local_yaml
from jenkins_jobs.constants import MAGIC_MANAGE_STRING
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.registry import ModuleRegistry
from jenkins_jobs.formatter import deep_format
from jenkins_jobs import utils
from jenkins_jobs.xml_config import XmlJob
logger = logging.getLogger(__name__)
def matches(what, glob_patterns):
"""
Checks if the given string, ``what``, matches any of the glob patterns in
the iterable, ``glob_patterns``
:arg str what: String that we want to test if it matches a pattern
:arg iterable glob_patterns: glob patterns to match (list, tuple, set,
etc.)
"""
return any(fnmatch.fnmatch(what, glob_pattern)
for glob_pattern in glob_patterns)
class YamlParser(object):
def __init__(self, config=None, plugins_info=None):
self.data = {}
self.jobs = []
self.xml_jobs = []
self.config = config
self.registry = ModuleRegistry(self.config, plugins_info)
self.path = ["."]
if self.config:
if config.has_section('job_builder') and \
config.has_option('job_builder', 'include_path'):
self.path = config.get('job_builder',
'include_path').split(':')
self.keep_desc = self.get_keep_desc()
def get_keep_desc(self):
keep_desc = False
if self.config and self.config.has_section('job_builder') and \
self.config.has_option('job_builder', 'keep_descriptions'):
keep_desc = self.config.getboolean('job_builder',
'keep_descriptions')
return keep_desc
def parse_fp(self, fp):
# wrap provided file streams to ensure correct encoding used
data = local_yaml.load(utils.wrap_stream(fp), search_path=self.path)
if data:
if not isinstance(data, list):
raise JenkinsJobsException(
"The topmost collection in file '{fname}' must be a list,"
" not a {cls}".format(fname=getattr(fp, 'name', fp),
cls=type(data)))
for item in data:
cls, dfn = next(iter(item.items()))
group = self.data.get(cls, {})
if len(item.items()) > 1:
n = None
for k, v in item.items():
if k == "name":
n = v
break
# Syntax error
raise JenkinsJobsException("Syntax error, for item "
"named '{0}'. Missing indent?"
.format(n))
name = dfn['name']
if name in group:
self._handle_dups("Duplicate entry found in '{0}: '{1}' "
"already defined".format(fp.name, name))
group[name] = dfn
self.data[cls] = group
def parse(self, fn):
with io.open(fn, 'r', encoding='utf-8') as fp:
self.parse_fp(fp)
def _handle_dups(self, message):
if not (self.config and self.config.has_section('job_builder') and
self.config.getboolean('job_builder', 'allow_duplicates')):
logger.error(message)
raise JenkinsJobsException(message)
else:
logger.warn(message)
def getJob(self, name):
job = self.data.get('job', {}).get(name, None)
if not job:
return job
return self.applyDefaults(job)
def getJobGroup(self, name):
return self.data.get('job-group', {}).get(name, None)
def getJobTemplate(self, name):
job = self.data.get('job-template', {}).get(name, None)
if not job:
return job
return self.applyDefaults(job)
def applyDefaults(self, data, override_dict=None):
if override_dict is None:
override_dict = {}
whichdefaults = data.get('defaults', 'global')
defaults = copy.deepcopy(self.data.get('defaults',
{}).get(whichdefaults, {}))
if defaults == {} and whichdefaults != 'global':
raise JenkinsJobsException("Unknown defaults set: '{0}'"
.format(whichdefaults))
for key in override_dict.keys():
if key in defaults.keys():
defaults[key] = override_dict[key]
newdata = {}
newdata.update(defaults)
newdata.update(data)
return newdata
def formatDescription(self, job):
if self.keep_desc:
description = job.get("description", None)
else:
description = job.get("description", '')
if description is not None:
job["description"] = description + \
self.get_managed_string().lstrip()
def expandYaml(self, jobs_glob=None):
changed = True
while changed:
changed = False
for module in self.registry.modules:
if hasattr(module, 'handle_data'):
if module.handle_data(self):
changed = True
for job in self.data.get('job', {}).values():
if jobs_glob and not matches(job['name'], jobs_glob):
logger.debug("Ignoring job {0}".format(job['name']))
continue
logger.debug("Expanding job '{0}'".format(job['name']))
job = self.applyDefaults(job)
self.formatDescription(job)
self.jobs.append(job)
for project in self.data.get('project', {}).values():
logger.debug("Expanding project '{0}'".format(project['name']))
# use a set to check for duplicate job references in projects
seen = set()
for jobspec in project.get('jobs', []):
if isinstance(jobspec, dict):
# Singleton dict containing dict of job-specific params
jobname, jobparams = next(iter(jobspec.items()))
if not isinstance(jobparams, dict):
jobparams = {}
else:
jobname = jobspec
jobparams = {}
job = self.getJob(jobname)
if job:
# Just naming an existing defined job
if jobname in seen:
self._handle_dups("Duplicate job '{0}' specified "
"for project '{1}'".format(
jobname, project['name']))
seen.add(jobname)
continue
# see if it's a job group
group = self.getJobGroup(jobname)
if group:
for group_jobspec in group['jobs']:
if isinstance(group_jobspec, dict):
group_jobname, group_jobparams = \
next(iter(group_jobspec.items()))
if not isinstance(group_jobparams, dict):
group_jobparams = {}
else:
group_jobname = group_jobspec
group_jobparams = {}
job = self.getJob(group_jobname)
if job:
if group_jobname in seen:
self._handle_dups(
"Duplicate job '{0}' specified for "
"project '{1}'".format(group_jobname,
project['name']))
seen.add(group_jobname)
continue
template = self.getJobTemplate(group_jobname)
# Allow a group to override parameters set by a project
d = {}
d.update(project)
d.update(jobparams)
d.update(group)
d.update(group_jobparams)
# Except name, since the group's name is not useful
d['name'] = project['name']
if template:
self.expandYamlForTemplateJob(d, template,
jobs_glob)
continue
# see if it's a template
template = self.getJobTemplate(jobname)
if template:
d = {}
d.update(project)
d.update(jobparams)
self.expandYamlForTemplateJob(d, template, jobs_glob)
else:
raise JenkinsJobsException("Failed to find suitable "
"template named '{0}'"
.format(jobname))
# check for duplicate generated jobs
seen = set()
# walk the list in reverse so that last definition wins
for job in self.jobs[::-1]:
if job['name'] in seen:
self._handle_dups("Duplicate definitions for job '{0}' "
"specified".format(job['name']))
self.jobs.remove(job)
seen.add(job['name'])
def expandYamlForTemplateJob(self, project, template, jobs_glob=None):
dimensions = []
template_name = template['name']
# reject keys that are not useful during yaml expansion
for k in ['jobs']:
project.pop(k)
for (k, v) in project.items():
tmpk = '{{{0}}}'.format(k)
if tmpk not in template_name:
logger.debug("Variable %s not in name %s, rejecting from job"
" matrix expansion.", tmpk, template_name)
continue
if type(v) == list:
dimensions.append(zip([k] * len(v), v))
# XXX somewhat hackish to ensure we actually have a single
# pass through the loop
if len(dimensions) == 0:
dimensions = [(("", ""),)]
for values in itertools.product(*dimensions):
params = copy.deepcopy(project)
params = self.applyDefaults(params, template)
expanded_values = {}
for (k, v) in values:
if isinstance(v, dict):
inner_key = next(iter(v))
expanded_values[k] = inner_key
expanded_values.update(v[inner_key])
else:
expanded_values[k] = v
params.update(expanded_values)
params = deep_format(params, params)
allow_empty_variables = self.config \
and self.config.has_section('job_builder') \
and self.config.has_option(
'job_builder', 'allow_empty_variables') \
and self.config.getboolean(
'job_builder', 'allow_empty_variables')
for key in template.keys():
if key not in params:
params[key] = template[key]
expanded = deep_format(template, params, allow_empty_variables)
job_name = expanded.get('name')
if jobs_glob and not matches(job_name, jobs_glob):
continue
self.formatDescription(expanded)
self.jobs.append(expanded)
def get_managed_string(self):
# The \n\n is not hard coded, because they get stripped if the
# project does not otherwise have a description.
return "\n\n" + MAGIC_MANAGE_STRING
def generateXML(self):
for job in self.jobs:
self.xml_jobs.append(self.getXMLForJob(job))
def getXMLForJob(self, data):
kind = data.get('project-type', 'freestyle')
for ep in pkg_resources.iter_entry_points(
group='jenkins_jobs.projects', name=kind):
Mod = ep.load()
mod = Mod(self.registry)
xml = mod.root_xml(data)
self.gen_xml(xml, data)
job = XmlJob(xml, data['name'])
return job
def gen_xml(self, xml, data):
for module in self.registry.modules:
if hasattr(module, 'gen_xml'):
module.gen_xml(self, xml, data)
|
GoodgameStudios/jenkins-job-builder
|
jenkins_jobs/parser.py
|
Python
|
apache-2.0
| 13,409
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import StringIO
import unittest
from telemetry import page as page_module
from telemetry.page import page_set
from telemetry.results import csv_pivot_table_output_formatter
from telemetry.results import page_test_results
from telemetry.value import scalar
def _MakePageSet():
ps = page_set.PageSet(file_path=os.path.dirname(__file__))
ps.AddUserStory(page_module.Page('http://www.foo.com/', ps, ps.base_dir))
ps.AddUserStory(page_module.Page('http://www.bar.com/', ps, ps.base_dir))
return ps
class CsvPivotTableOutputFormatterTest(unittest.TestCase):
# The line separator used by CSV formatter.
_LINE_SEPARATOR = '\r\n'
def setUp(self):
self._output = StringIO.StringIO()
self._page_set = _MakePageSet()
self._results = page_test_results.PageTestResults()
self._formatter = None
self.MakeFormatter()
def MakeFormatter(self, trace_tag=''):
self._formatter = (
csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
self._output, trace_tag))
def SimulateBenchmarkRun(self, dict_of_values):
"""Simulate one run of a benchmark, using the supplied values.
Args:
dict_of_values: dictionary w/ Page instance as key, a list of Values
as value.
"""
for page, values in dict_of_values.iteritems():
self._results.WillRunPage(page)
for v in values:
v.page = page
self._results.AddValue(v)
self._results.DidRunPage(page)
def Format(self):
self._formatter.Format(self._results)
return self._output.getvalue()
def testSimple(self):
# Test a simple benchmark with only one value:
self.SimulateBenchmarkRun({
self._page_set[0]: [scalar.ScalarValue(None, 'foo', 'seconds', 3)]})
expected = self._LINE_SEPARATOR.join([
'page_set,page,name,value,units,run_index',
'page_set,http://www.foo.com/,foo,3,seconds,0',
''])
self.assertEqual(expected, self.Format())
def testMultiplePagesAndValues(self):
self.SimulateBenchmarkRun({
self._page_set[0]: [scalar.ScalarValue(None, 'foo', 'seconds', 4)],
self._page_set[1]: [scalar.ScalarValue(None, 'foo', 'seconds', 3.4),
scalar.ScalarValue(None, 'bar', 'km', 10),
scalar.ScalarValue(None, 'baz', 'count', 5)]})
# Parse CSV output into list of lists.
csv_string = self.Format()
lines = csv_string.split(self._LINE_SEPARATOR)
values = [s.split(',') for s in lines[1:-1]]
self.assertEquals(len(values), 4) # We expect 4 value in total.
self.assertEquals(len(set((v[1] for v in values))), 2) # 2 pages.
self.assertEquals(len(set((v[2] for v in values))), 3) # 3 value names.
def testTraceTag(self):
self.MakeFormatter(trace_tag='date,option')
self.SimulateBenchmarkRun({
self._page_set[0]: [scalar.ScalarValue(None, 'foo', 'seconds', 3),
scalar.ScalarValue(None, 'bar', 'tons', 5)]})
output = self.Format().split(self._LINE_SEPARATOR)
self.assertTrue(output[0].endswith(',trace_tag_0,trace_tag_1'))
for line in output[1:-1]:
self.assertTrue(line.endswith(',date,option'))
|
guorendong/iridium-browser-ubuntu
|
tools/telemetry/telemetry/results/csv_pivot_table_output_formatter_unittest.py
|
Python
|
bsd-3-clause
| 3,349
|
from fabric.api import *
import os
project_dir = os.path.join(os.path.dirname(sys.argv[0])
def first_setup():
# 1. Make a new virtualenv
local("mkvirtualenv xenserver_backup")
# pip install packages
with prefix('workon xenserver_backup'):
local("pip install pytest")
def build_windows_dist():
if os.name == 'nt':
# Call the pyinstaller
local("python ../pyinstaller/pyinstaller.py xenserver_backup_windows.spec --onefile")
def run_tests():
test_dir = "test"
with lcd(test_dir):
# Regenerate the test script
local("py.test --genscript=runtests.py")
t = local("py.test --cov-config .coveragerc --cov=xenserver_backup --cov-report=term --cov-report=html", capture=False)
with open("test/COVERAGE.rst", "w") as f:
f.write(t)
def push_docs():
""" Build the sphinx docs from develop
And push it to gh-pages
"""
githubpages = "/Users/virantha/dev/githubdocs/xenserver_backup"
# Convert markdown readme to rst
#local("pandoc README.md -f markdown -t rst -o README.rst")
with lcd(githubpages):
local("git checkout gh-pages")
local("git pull origin gh-pages")
with lcd("docs"):
print("Running sphinx in docs/ and building to ~/dev/githubpages/xenserver_backup")
local("make clean")
local("make html")
local("cp -R ../test/htmlcov %s/html/testing" % githubpages)
with lcd(githubpages):
local("git add .")
local('git commit -am "doc update"')
local('git push origin gh-pages')
|
virantha/xenserver_backup
|
fabfile.py
|
Python
|
apache-2.0
| 1,582
|
# coding=utf-8
# Module arial_16
# generated from Arial 12pt
name = "Arial 16"
start_char = '!'
end_char = chr(127)
char_height = 16
space_width = 8
gap_width = 2
bitmaps = (
# @0 '!' (1 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x00, #
0x80, # O
0x00, #
0x00, #
0x00, #
# @16 '"' (4 pixels wide)
0x00, #
0x90, # O O
0x90, # O O
0x90, # O O
0x90, # O O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @32 '#' (9 pixels wide)
0x00, 0x00, #
0x11, 0x00, # O O
0x11, 0x00, # O O
0x11, 0x00, # O O
0x22, 0x00, # O O
0xFF, 0x80, # OOOOOOOOO
0x22, 0x00, # O O
0x22, 0x00, # O O
0x22, 0x00, # O O
0xFF, 0x80, # OOOOOOOOO
0x44, 0x00, # O O
0x44, 0x00, # O O
0x44, 0x00, # O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @64 '$' (7 pixels wide)
0x10, # O
0x3C, # OOOO
0x52, # O O O
0x92, # O O O
0x90, # O O
0x90, # O O
0x70, # OOO
0x1C, # OOO
0x12, # O O
0x12, # O O
0x92, # O O O
0x54, # O O O
0x38, # OOO
0x10, # O
0x00, #
0x00, #
# @80 '%' (12 pixels wide)
0x00, 0x00, #
0x70, 0x80, # OOO O
0x89, 0x00, # O O O
0x89, 0x00, # O O O
0x8A, 0x00, # O O O
0x8A, 0x00, # O O O
0x72, 0x00, # OOO O
0x04, 0xE0, # O OOO
0x05, 0x10, # O O O
0x09, 0x10, # O O O
0x09, 0x10, # O O O
0x11, 0x10, # O O O
0x10, 0xE0, # O OOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @112 '&' (9 pixels wide)
0x00, 0x00, #
0x38, 0x00, # OOO
0x44, 0x00, # O O
0x44, 0x00, # O O
0x44, 0x00, # O O
0x28, 0x00, # O O
0x30, 0x00, # OO
0x50, 0x00, # O O
0x88, 0x80, # O O O
0x85, 0x00, # O O O
0x82, 0x00, # O O
0x45, 0x00, # O O O
0x38, 0x80, # OOO O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @144 ''' (1 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @160 '(' (3 pixels wide)
0x00, #
0x20, # O
0x40, # O
0x40, # O
0x40, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x40, # O
0x40, # O
0x40, # O
0x20, # O
# @176 ')' (3 pixels wide)
0x00, #
0x80, # O
0x40, # O
0x40, # O
0x40, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x40, # O
0x40, # O
0x40, # O
0x80, # O
# @192 '*' (5 pixels wide)
0x00, #
0x20, # O
0xF8, # OOOOO
0x20, # O
0x50, # O O
0x50, # O O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @208 '+' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x10, # O
0x10, # O
0x10, # O
0xFE, # OOOOOOO
0x10, # O
0x10, # O
0x10, # O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @224 ',' (1 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x80, # O
0x80, # O
0x80, # O
0x00, #
# @240 '-' (4 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0xF0, # OOOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @256 '.' (1 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x80, # O
0x00, #
0x00, #
0x00, #
# @272 '/' (4 pixels wide)
0x00, #
0x10, # O
0x10, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x80, # O
0x80, # O
0x00, #
0x00, #
0x00, #
# @288 '0' (7 pixels wide)
0x00, #
0x38, # OOO
0x44, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x44, # O O
0x38, # OOO
0x00, #
0x00, #
0x00, #
# @304 '1' (4 pixels wide)
0x00, #
0x10, # O
0x30, # OO
0x50, # O O
0x90, # O O
0x10, # O
0x10, # O
0x10, # O
0x10, # O
0x10, # O
0x10, # O
0x10, # O
0x10, # O
0x00, #
0x00, #
0x00, #
# @320 '2' (7 pixels wide)
0x00, #
0x3C, # OOOO
0x44, # O O
0x82, # O O
0x02, # O
0x02, # O
0x04, # O
0x04, # O
0x08, # O
0x10, # O
0x20, # O
0x40, # O
0xFE, # OOOOOOO
0x00, #
0x00, #
0x00, #
# @336 '3' (7 pixels wide)
0x00, #
0x38, # OOO
0x44, # O O
0x84, # O O
0x04, # O
0x0C, # OO
0x38, # OOO
0x04, # O
0x02, # O
0x02, # O
0x82, # O O
0xC4, # OO O
0x38, # OOO
0x00, #
0x00, #
0x00, #
# @352 '4' (8 pixels wide)
0x00, #
0x02, # O
0x06, # OO
0x0A, # O O
0x12, # O O
0x12, # O O
0x22, # O O
0x42, # O O
0x82, # O O
0xFF, # OOOOOOOO
0x02, # O
0x02, # O
0x02, # O
0x00, #
0x00, #
0x00, #
# @368 '5' (7 pixels wide)
0x00, #
0x7E, # OOOOOO
0x40, # O
0x40, # O
0x80, # O
0xF8, # OOOOO
0x84, # O O
0x02, # O
0x02, # O
0x02, # O
0x82, # O O
0x44, # O O
0x38, # OOO
0x00, #
0x00, #
0x00, #
# @384 '6' (7 pixels wide)
0x00, #
0x38, # OOO
0x44, # O O
0x82, # O O
0x80, # O
0xB8, # O OOO
0xC4, # OO O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x44, # O O
0x38, # OOO
0x00, #
0x00, #
0x00, #
# @400 '7' (7 pixels wide)
0x00, #
0xFE, # OOOOOOO
0x04, # O
0x04, # O
0x08, # O
0x08, # O
0x10, # O
0x10, # O
0x10, # O
0x10, # O
0x20, # O
0x20, # O
0x20, # O
0x00, #
0x00, #
0x00, #
# @416 '8' (7 pixels wide)
0x00, #
0x38, # OOO
0x44, # O O
0x82, # O O
0x82, # O O
0x44, # O O
0x38, # OOO
0x44, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x44, # O O
0x38, # OOO
0x00, #
0x00, #
0x00, #
# @432 '9' (7 pixels wide)
0x00, #
0x38, # OOO
0x44, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x46, # O OO
0x3A, # OOO O
0x02, # O
0x82, # O O
0x44, # O O
0x38, # OOO
0x00, #
0x00, #
0x00, #
# @448 ':' (1 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x80, # O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x80, # O
0x00, #
0x00, #
0x00, #
# @464 ';' (1 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x80, # O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x80, # O
0x80, # O
0x80, # O
0x00, #
# @480 '<' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x02, # O
0x1C, # OOO
0x60, # OO
0x80, # O
0x60, # OO
0x1C, # OOO
0x02, # O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @496 '=' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0xFE, # OOOOOOO
0x00, #
0x00, #
0x00, #
0xFE, # OOOOOOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @512 '>' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x80, # O
0x70, # OOO
0x0C, # OO
0x02, # O
0x0C, # OO
0x70, # OOO
0x80, # O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @528 '?' (7 pixels wide)
0x00, #
0x38, # OOO
0x44, # O O
0x82, # O O
0x82, # O O
0x02, # O
0x04, # O
0x08, # O
0x10, # O
0x10, # O
0x10, # O
0x00, #
0x10, # O
0x00, #
0x00, #
0x00, #
# @544 '@' (15 pixels wide)
0x00, 0x00, #
0x07, 0xE0, # OOOOOO
0x18, 0x18, # OO OO
0x20, 0x04, # O O
0x43, 0xA4, # O OOO O O
0x44, 0x62, # O O OO O
0x88, 0x22, # O O O O
0x90, 0x22, # O O O O
0x90, 0x42, # O O O O
0x90, 0x42, # O O O O
0x90, 0x44, # O O O O
0x88, 0xC8, # O O OO O
0x47, 0x70, # O OOO OOO
0x20, 0x02, # O O
0x18, 0x0C, # OO OO
0x07, 0xF0, # OOOOOOO
# @576 'A' (9 pixels wide)
0x00, 0x00, #
0x08, 0x00, # O
0x14, 0x00, # O O
0x14, 0x00, # O O
0x14, 0x00, # O O
0x22, 0x00, # O O
0x22, 0x00, # O O
0x22, 0x00, # O O
0x7F, 0x00, # OOOOOOO
0x41, 0x00, # O O
0x41, 0x00, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @608 'B' (9 pixels wide)
0x00, 0x00, #
0xFF, 0x00, # OOOOOOOO
0x81, 0x00, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x81, 0x00, # O O
0xFE, 0x00, # OOOOOOO
0x81, 0x00, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x81, 0x00, # O O
0xFE, 0x00, # OOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @640 'C' (10 pixels wide)
0x00, 0x00, #
0x1F, 0x00, # OOOOO
0x20, 0x80, # O O
0x40, 0x40, # O O
0x80, 0x00, # O
0x80, 0x00, # O
0x80, 0x00, # O
0x80, 0x00, # O
0x80, 0x00, # O
0x80, 0x00, # O
0x40, 0x40, # O O
0x20, 0x80, # O O
0x1F, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @672 'D' (10 pixels wide)
0x00, 0x00, #
0xFE, 0x00, # OOOOOOO
0x81, 0x00, # O O
0x80, 0x80, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x80, 0x80, # O O
0x81, 0x00, # O O
0xFE, 0x00, # OOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @704 'E' (9 pixels wide)
0x00, 0x00, #
0xFF, 0x80, # OOOOOOOOO
0x80, 0x00, # O
0x80, 0x00, # O
0x80, 0x00, # O
0x80, 0x00, # O
0xFF, 0x00, # OOOOOOOO
0x80, 0x00, # O
0x80, 0x00, # O
0x80, 0x00, # O
0x80, 0x00, # O
0x80, 0x00, # O
0xFF, 0x80, # OOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @736 'F' (8 pixels wide)
0x00, #
0xFF, # OOOOOOOO
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0xFE, # OOOOOOO
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x00, #
0x00, #
0x00, #
# @752 'G' (10 pixels wide)
0x00, 0x00, #
0x1E, 0x00, # OOOO
0x21, 0x00, # O O
0x40, 0x80, # O O
0x80, 0x00, # O
0x80, 0x00, # O
0x80, 0x00, # O
0x87, 0xC0, # O OOOOO
0x80, 0x40, # O O
0x80, 0x40, # O O
0x40, 0x80, # O O
0x21, 0x00, # O O
0x1E, 0x00, # OOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @784 'H' (9 pixels wide)
0x00, 0x00, #
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0xFF, 0x80, # OOOOOOOOO
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @816 'I' (1 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x00, #
0x00, #
0x00, #
# @832 'J' (6 pixels wide)
0x00, #
0x04, # O
0x04, # O
0x04, # O
0x04, # O
0x04, # O
0x04, # O
0x04, # O
0x04, # O
0x84, # O O
0x84, # O O
0x84, # O O
0x78, # OOOO
0x00, #
0x00, #
0x00, #
# @848 'K' (9 pixels wide)
0x00, 0x00, #
0x80, 0x80, # O O
0x81, 0x00, # O O
0x82, 0x00, # O O
0x84, 0x00, # O O
0x88, 0x00, # O O
0x98, 0x00, # O OO
0xA8, 0x00, # O O O
0xC4, 0x00, # OO O
0x82, 0x00, # O O
0x82, 0x00, # O O
0x81, 0x00, # O O
0x80, 0x80, # O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @880 'L' (7 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0xFE, # OOOOOOO
0x00, #
0x00, #
0x00, #
# @896 'M' (11 pixels wide)
0x00, 0x00, #
0x80, 0x20, # O O
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xA0, 0xA0, # O O O O
0xA0, 0xA0, # O O O O
0x91, 0x20, # O O O O
0x91, 0x20, # O O O O
0x8A, 0x20, # O O O O
0x8A, 0x20, # O O O O
0x8A, 0x20, # O O O O
0x84, 0x20, # O O O
0x84, 0x20, # O O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @928 'N' (9 pixels wide)
0x00, 0x00, #
0x80, 0x80, # O O
0xC0, 0x80, # OO O
0xA0, 0x80, # O O O
0xA0, 0x80, # O O O
0x90, 0x80, # O O O
0x88, 0x80, # O O O
0x88, 0x80, # O O O
0x84, 0x80, # O O O
0x82, 0x80, # O O O
0x82, 0x80, # O O O
0x81, 0x80, # O OO
0x80, 0x80, # O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @960 'O' (10 pixels wide)
0x00, 0x00, #
0x1E, 0x00, # OOOO
0x21, 0x00, # O O
0x40, 0x80, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x40, 0x80, # O O
0x21, 0x00, # O O
0x1E, 0x00, # OOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @992 'P' (9 pixels wide)
0x00, 0x00, #
0xFE, 0x00, # OOOOOOO
0x81, 0x00, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x81, 0x00, # O O
0xFE, 0x00, # OOOOOOO
0x80, 0x00, # O
0x80, 0x00, # O
0x80, 0x00, # O
0x80, 0x00, # O
0x80, 0x00, # O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1024 'Q' (10 pixels wide)
0x00, 0x00, #
0x1E, 0x00, # OOOO
0x21, 0x00, # O O
0x40, 0x80, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x80, 0x40, # O O
0x46, 0xC0, # O OO OO
0x21, 0x80, # O OO
0x1E, 0xC0, # OOOO OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1056 'R' (9 pixels wide)
0x00, 0x00, #
0xFE, 0x00, # OOOOOOO
0x81, 0x00, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x81, 0x00, # O O
0xFE, 0x00, # OOOOOOO
0x84, 0x00, # O O
0x82, 0x00, # O O
0x82, 0x00, # O O
0x81, 0x00, # O O
0x80, 0x80, # O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1088 'S' (9 pixels wide)
0x00, 0x00, #
0x3E, 0x00, # OOOOO
0x41, 0x00, # O O
0x80, 0x80, # O O
0x80, 0x00, # O
0x40, 0x00, # O
0x38, 0x00, # OOO
0x07, 0x00, # OOO
0x00, 0x80, # O
0x00, 0x80, # O
0x80, 0x80, # O O
0x41, 0x00, # O O
0x3E, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1120 'T' (9 pixels wide)
0x00, 0x00, #
0xFF, 0x80, # OOOOOOOOO
0x08, 0x00, # O
0x08, 0x00, # O
0x08, 0x00, # O
0x08, 0x00, # O
0x08, 0x00, # O
0x08, 0x00, # O
0x08, 0x00, # O
0x08, 0x00, # O
0x08, 0x00, # O
0x08, 0x00, # O
0x08, 0x00, # O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1152 'U' (9 pixels wide)
0x00, 0x00, #
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x80, 0x80, # O O
0x41, 0x00, # O O
0x3E, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1184 'V' (9 pixels wide)
0x00, 0x00, #
0x80, 0x80, # O O
0x80, 0x80, # O O
0x41, 0x00, # O O
0x41, 0x00, # O O
0x41, 0x00, # O O
0x22, 0x00, # O O
0x22, 0x00, # O O
0x22, 0x00, # O O
0x14, 0x00, # O O
0x14, 0x00, # O O
0x08, 0x00, # O
0x08, 0x00, # O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1216 'W' (15 pixels wide)
0x00, 0x00, #
0x81, 0x02, # O O O
0x82, 0x82, # O O O O
0x42, 0x84, # O O O O
0x42, 0x84, # O O O O
0x44, 0x44, # O O O O
0x44, 0x44, # O O O O
0x24, 0x48, # O O O O
0x28, 0x28, # O O O O
0x28, 0x28, # O O O O
0x28, 0x28, # O O O O
0x10, 0x10, # O O
0x10, 0x10, # O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1248 'X' (11 pixels wide)
0x00, 0x00, #
0x40, 0x40, # O O
0x20, 0x80, # O O
0x11, 0x00, # O O
0x11, 0x00, # O O
0x0A, 0x00, # O O
0x04, 0x00, # O
0x0A, 0x00, # O O
0x11, 0x00, # O O
0x11, 0x00, # O O
0x20, 0x80, # O O
0x40, 0x40, # O O
0x80, 0x20, # O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1280 'Y' (9 pixels wide)
0x00, 0x00, #
0x80, 0x80, # O O
0x41, 0x00, # O O
0x41, 0x00, # O O
0x22, 0x00, # O O
0x14, 0x00, # O O
0x14, 0x00, # O O
0x08, 0x00, # O
0x08, 0x00, # O
0x08, 0x00, # O
0x08, 0x00, # O
0x08, 0x00, # O
0x08, 0x00, # O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1312 'Z' (9 pixels wide)
0x00, 0x00, #
0x7F, 0x80, # OOOOOOOO
0x01, 0x00, # O
0x02, 0x00, # O
0x02, 0x00, # O
0x04, 0x00, # O
0x08, 0x00, # O
0x08, 0x00, # O
0x10, 0x00, # O
0x20, 0x00, # O
0x20, 0x00, # O
0x40, 0x00, # O
0xFF, 0x80, # OOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1344 '[' (3 pixels wide)
0x00, #
0xE0, # OOO
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0xE0, # OOO
# @1360 '\' (4 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x10, # O
0x10, # O
0x00, #
0x00, #
0x00, #
# @1376 ']' (3 pixels wide)
0x00, #
0xE0, # OOO
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0xE0, # OOO
# @1392 '^' (7 pixels wide)
0x00, #
0x10, # O
0x28, # O O
0x28, # O O
0x44, # O O
0x44, # O O
0x82, # O O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @1408 '_' (9 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xFF, 0x80, # OOOOOOOOO
# @1440 '`' (2 pixels wide)
0x00, #
0x80, # O
0x40, # O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @1456 'a' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x3C, # OOOO
0x42, # O O
0x82, # O O
0x0E, # OOO
0x72, # OOO O
0x82, # O O
0x82, # O O
0x86, # O OO
0x7A, # OOOO O
0x00, #
0x00, #
0x00, #
# @1472 'b' (7 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x80, # O
0xB8, # O OOO
0xC4, # OO O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0xC4, # OO O
0xB8, # O OOO
0x00, #
0x00, #
0x00, #
# @1488 'c' (6 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x38, # OOO
0x44, # O O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x44, # O O
0x38, # OOO
0x00, #
0x00, #
0x00, #
# @1504 'd' (7 pixels wide)
0x00, #
0x02, # O
0x02, # O
0x02, # O
0x3A, # OOO O
0x46, # O OO
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x46, # O OO
0x3A, # OOO O
0x00, #
0x00, #
0x00, #
# @1520 'e' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x38, # OOO
0x44, # O O
0x82, # O O
0x82, # O O
0xFE, # OOOOOOO
0x80, # O
0x82, # O O
0x44, # O O
0x38, # OOO
0x00, #
0x00, #
0x00, #
# @1536 'f' (4 pixels wide)
0x00, #
0x30, # OO
0x40, # O
0x40, # O
0xF0, # OOOO
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x00, #
0x00, #
0x00, #
# @1552 'g' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x3A, # OOO O
0x46, # O OO
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x46, # O OO
0x3A, # OOO O
0x02, # O
0x84, # O O
0x78, # OOOO
# @1568 'h' (6 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x80, # O
0xB8, # O OOO
0xC4, # OO O
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0x00, #
0x00, #
0x00, #
# @1584 'i' (1 pixels wide)
0x00, #
0x80, # O
0x00, #
0x00, #
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x00, #
0x00, #
0x00, #
# @1600 'j' (3 pixels wide)
0x00, #
0x20, # O
0x00, #
0x00, #
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0xC0, # OO
# @1616 'k' (7 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x80, # O
0x82, # O O
0x84, # O O
0x88, # O O
0x90, # O O
0xB0, # O OO
0xC8, # OO O
0x88, # O O
0x84, # O O
0x82, # O O
0x00, #
0x00, #
0x00, #
# @1632 'l' (1 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x00, #
0x00, #
0x00, #
# @1648 'm' (11 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xB9, 0xC0, # O OOO OOO
0xC6, 0x20, # OO OO O
0x84, 0x20, # O O O
0x84, 0x20, # O O O
0x84, 0x20, # O O O
0x84, 0x20, # O O O
0x84, 0x20, # O O O
0x84, 0x20, # O O O
0x84, 0x20, # O O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1680 'n' (6 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0xB8, # O OOO
0xC4, # OO O
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0x00, #
0x00, #
0x00, #
# @1696 'o' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x38, # OOO
0x44, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x44, # O O
0x38, # OOO
0x00, #
0x00, #
0x00, #
# @1712 'p' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0xB8, # O OOO
0xC4, # OO O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0xC4, # OO O
0xB8, # O OOO
0x80, # O
0x80, # O
0x80, # O
# @1728 'q' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x3A, # OOO O
0x46, # O OO
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x82, # O O
0x46, # O OO
0x3A, # OOO O
0x02, # O
0x02, # O
0x02, # O
# @1744 'r' (4 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0xB0, # O OO
0xC0, # OO
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x00, #
0x00, #
0x00, #
# @1760 's' (6 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x78, # OOOO
0x84, # O O
0x80, # O
0x80, # O
0x78, # OOOO
0x04, # O
0x04, # O
0x84, # O O
0x78, # OOOO
0x00, #
0x00, #
0x00, #
# @1776 't' (4 pixels wide)
0x00, #
0x00, #
0x40, # O
0x40, # O
0xF0, # OOOO
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x70, # OOO
0x00, #
0x00, #
0x00, #
# @1792 'u' (6 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0x8C, # O OO
0x74, # OOO O
0x00, #
0x00, #
0x00, #
# @1808 'v' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x82, # O O
0x82, # O O
0x44, # O O
0x44, # O O
0x28, # O O
0x28, # O O
0x28, # O O
0x10, # O
0x10, # O
0x00, #
0x00, #
0x00, #
# @1824 'w' (11 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x84, 0x20, # O O O
0x84, 0x20, # O O O
0x4A, 0x40, # O O O O
0x4A, 0x40, # O O O O
0x51, 0x40, # O O O O
0x51, 0x40, # O O O O
0x51, 0x40, # O O O O
0x20, 0x80, # O O
0x20, 0x80, # O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1856 'x' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x82, # O O
0x44, # O O
0x28, # O O
0x28, # O O
0x10, # O
0x28, # O O
0x28, # O O
0x44, # O O
0x82, # O O
0x00, #
0x00, #
0x00, #
# @1872 'y' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x82, # O O
0x82, # O O
0x84, # O O
0x44, # O O
0x44, # O O
0x28, # O O
0x28, # O O
0x28, # O O
0x10, # O
0x10, # O
0x10, # O
0x60, # OO
# @1888 'z' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0xFE, # OOOOOOO
0x04, # O
0x08, # O
0x08, # O
0x10, # O
0x20, # O
0x20, # O
0x40, # O
0xFE, # OOOOOOO
0x00, #
0x00, #
0x00, #
# @1904 '{' (5 pixels wide)
0x00, #
0x18, # OO
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0xC0, # OO
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x18, # OO
# @1920 '|' (1 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
# @1936 '}' (5 pixels wide)
0x00, #
0xC0, # OO
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x18, # OO
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0x20, # O
0xC0, # OO
# @1952 '~' (8 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x71, # OOO O
0x8E, # O OOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @1968 '°' (4 pixels wide)
0x00, #
0x60, # OO
0x90, # O O
0x90, # O O
0x60, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
)
descriptors = (
(1,0),# !
(4,16),# "
(9,32),# #
(7,64),# $
(12,80),# %
(9,112),# &
(1,144),# '
(3,160),# (
(3,176),# )
(5,192),# *
(7,208),# +
(1,224),# ,
(4,240),# -
(1,256),# .
(4,272),# /
(7,288),# 0
(4,304),# 1
(7,320),# 2
(7,336),# 3
(8,352),# 4
(7,368),# 5
(7,384),# 6
(7,400),# 7
(7,416),# 8
(7,432),# 9
(1,448),# :
(1,464),# ;
(7,480),# <
(7,496),# =
(7,512),# >
(7,528),# ?
(15,544),# @
(9,576),# A
(9,608),# B
(10,640),# C
(10,672),# D
(9,704),# E
(8,736),# F
(10,752),# G
(9,784),# H
(1,816),# I
(6,832),# J
(9,848),# K
(7,880),# L
(11,896),# M
(9,928),# N
(10,960),# O
(9,992),# P
(10,1024),# Q
(9,1056),# R
(9,1088),# S
(9,1120),# T
(9,1152),# U
(9,1184),# V
(15,1216),# W
(11,1248),# X
(9,1280),# Y
(9,1312),# Z
(3,1344),# [
(4,1360),# \
(3,1376),# ]
(7,1392),# ^
(9,1408),# _
(2,1440),# `
(7,1456),# a
(7,1472),# b
(6,1488),# c
(7,1504),# d
(7,1520),# e
(4,1536),# f
(7,1552),# g
(6,1568),# h
(1,1584),# i
(3,1600),# j
(7,1616),# k
(1,1632),# l
(11,1648),# m
(6,1680),# n
(7,1696),# o
(7,1712),# p
(7,1728),# q
(4,1744),# r
(6,1760),# s
(4,1776),# t
(6,1792),# u
(7,1808),# v
(11,1824),# w
(7,1856),# x
(7,1872),# y
(7,1888),# z
(5,1904),# {
(1,1920),# |
(5,1936),# }
(8,1952),# ~
(4,1968),# °
)
kerning = (
(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,),
(4,4,2,4,4,3,4,3,4,4,1,3,0,3,2,4,4,4,4,1,4,4,4,4,4,4,4,0,0,4,4,3,1,4,4,4,4,4,4,4,4,0,4,4,4,4,4,4,4,4,4,4,4,4,4,3,4,3,4,4,4,3,0,4,2,4,2,2,2,4,2,4,4,2,4,4,4,4,2,4,2,4,3,4,4,4,4,4,4,4,2,4,4,0,4,),
(9,8,9,9,9,9,8,9,8,8,7,8,9,8,8,9,7,8,8,9,9,9,8,9,9,8,8,7,9,8,8,9,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,8,8,9,8,8,7,8,7,9,8,8,8,0,8,9,9,9,9,9,8,9,9,9,7,9,9,9,9,9,9,9,9,9,8,9,9,9,8,9,7,7,9,8,7,8,),
(7,7,7,7,7,7,7,7,6,7,6,6,7,6,6,7,6,7,7,7,7,7,6,7,7,6,6,6,7,7,7,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,7,7,7,5,6,5,7,7,6,5,0,6,7,7,7,7,7,6,7,7,7,5,7,7,7,7,7,7,7,7,6,6,7,5,6,5,6,5,7,7,6,6,7,),
(12,9,12,12,9,12,11,12,10,8,11,11,12,11,12,12,9,11,12,12,12,12,10,12,12,11,11,11,12,12,9,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,9,12,10,10,11,9,11,12,10,10,7,3,10,12,12,12,12,12,11,12,12,12,10,12,12,12,12,12,12,12,12,12,11,12,10,11,11,11,11,12,12,10,11,8,),
(9,6,8,7,6,9,8,9,8,6,6,9,8,9,9,9,6,9,8,9,7,9,7,9,7,9,9,8,8,7,6,9,9,9,9,9,9,9,9,9,9,8,9,9,9,9,9,9,9,9,7,5,9,7,7,9,5,9,9,7,7,5,0,7,8,9,9,9,9,8,9,9,9,7,9,9,9,9,9,9,9,9,8,8,9,7,8,9,8,9,9,9,7,4,6,),
(1,1,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,1,1,0,1,1,1,1,1,1,1,0,0,1,1,0,0,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,1,0,0,1,0,1,0,0,0,1,0,1,1,0,1,1,1,1,0,1,0,1,0,1,1,1,1,1,1,1,0,1,1,0,1,),
(3,3,1,2,2,1,3,1,3,2,1,2,1,2,2,2,2,2,2,1,2,2,3,2,2,2,2,1,1,2,2,1,2,3,2,3,3,3,2,3,3,1,3,3,3,3,2,3,2,3,2,3,3,3,3,2,3,2,3,3,3,1,3,3,1,3,1,1,1,2,2,3,3,3,3,3,2,2,1,3,1,2,1,2,2,2,2,2,2,2,1,3,3,1,2,),
(3,2,3,3,3,3,2,3,1,2,3,2,3,2,3,3,2,2,3,3,3,3,1,3,3,2,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,2,2,1,2,3,2,1,3,1,1,3,3,3,3,3,2,3,3,3,1,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,1,3,2,),
(5,5,4,4,5,4,5,4,4,5,1,4,1,4,2,4,4,4,4,1,4,4,3,4,4,4,4,1,4,4,4,3,2,5,4,5,5,5,4,5,5,0,5,5,5,5,4,5,4,5,4,3,5,5,5,3,4,2,5,5,3,3,0,4,3,5,3,3,3,4,3,5,4,2,5,5,4,4,3,4,3,4,4,4,4,4,4,4,4,4,3,5,3,0,5,),
(7,4,5,4,4,6,6,7,5,3,7,6,4,6,6,7,4,2,4,6,4,7,4,6,6,6,6,7,4,4,4,7,5,7,7,7,7,7,7,7,7,4,7,7,7,7,7,7,7,7,4,3,7,5,5,3,3,3,7,5,5,4,0,5,4,7,7,7,7,6,7,7,7,5,7,7,7,7,7,7,7,7,7,6,7,6,6,5,6,4,5,7,5,7,3,),
(1,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,1,1,0,1,1,1,0,1,1,0,1,1,1,1,0,1,0,1,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,1,0,0,0,0,1,1,1,0,1,1,1,1,0,1,0,1,0,0,0,0,0,1,0,1,0,1,0,0,0,),
(4,0,4,1,0,4,3,4,2,0,1,3,4,3,3,4,1,1,0,4,0,4,1,4,0,3,3,1,4,3,1,4,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,0,0,4,1,2,1,0,2,4,2,2,0,0,2,4,4,4,4,4,3,4,4,4,2,4,4,4,4,4,4,4,4,0,3,4,2,3,2,2,2,2,4,2,0,0,),
(1,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,1,1,0,1,1,1,0,1,1,0,1,1,1,1,0,1,0,1,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,1,1,1,1,0,1,0,1,0,0,0,0,0,1,0,1,0,1,0,0,0,),
(4,4,3,3,4,3,4,3,4,4,2,3,2,3,1,3,3,3,3,2,3,3,4,3,3,3,3,2,3,3,3,3,1,4,3,4,4,4,3,4,4,2,4,4,4,4,3,4,3,4,3,4,4,4,4,3,4,3,4,4,4,3,0,4,3,4,3,3,3,3,3,4,4,2,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,4,4,2,4,),
(7,7,7,7,7,7,7,7,6,6,7,6,7,6,6,7,7,7,7,7,7,7,5,7,7,7,7,7,7,7,7,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,5,7,6,6,5,6,5,7,6,5,7,0,5,7,7,7,7,7,7,7,7,7,5,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,5,7,7,),
(4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,0,4,4,4,4,4,4,4,4,4,4,2,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,),
(7,7,7,7,7,6,7,7,6,6,6,7,4,7,7,7,7,7,7,5,7,7,6,7,7,7,7,6,7,7,7,6,7,7,7,7,7,7,7,7,7,6,7,7,7,7,7,7,7,7,7,6,7,6,6,7,6,7,7,6,6,6,0,6,6,7,6,6,6,7,6,7,7,5,7,7,7,7,6,7,6,7,7,7,7,7,7,7,7,7,5,7,6,6,7,),
(7,6,7,7,6,7,6,7,5,6,6,6,7,6,6,7,6,6,7,7,7,7,5,7,7,6,6,6,7,7,6,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,5,7,6,6,5,5,5,7,6,5,5,0,5,7,7,7,7,7,6,7,7,7,5,7,7,7,7,7,7,7,7,6,6,7,6,6,6,6,6,7,7,5,6,6,),
(8,7,8,7,7,8,7,8,7,7,7,7,8,7,7,8,7,7,7,8,7,8,7,8,7,7,7,7,8,7,7,8,7,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,7,7,8,7,7,7,7,7,8,7,7,7,0,7,8,8,8,8,8,7,8,8,8,6,8,8,8,8,8,8,8,8,7,7,8,7,7,7,7,7,7,8,7,7,7,),
(7,7,7,7,6,7,7,7,7,5,7,6,7,6,6,7,4,5,7,7,7,7,7,7,7,6,6,7,7,7,5,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,7,6,7,7,7,6,0,7,7,7,7,7,7,6,7,7,7,5,7,7,7,7,7,7,7,7,7,6,7,6,6,5,6,5,7,7,7,7,6,),
(7,7,7,7,7,7,7,7,6,6,7,6,7,6,6,7,6,7,7,7,7,7,5,7,7,6,6,7,7,7,7,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,5,7,6,6,5,6,5,7,6,5,6,0,5,7,7,7,7,7,6,7,7,7,5,7,7,7,7,7,7,7,7,7,6,7,6,6,5,6,5,7,7,5,7,7,),
(7,7,5,6,6,5,7,5,7,6,4,6,4,6,4,6,5,6,6,4,6,6,7,6,6,6,6,4,5,5,6,4,3,7,5,7,7,7,5,7,7,4,7,7,7,7,5,7,5,7,6,7,7,7,7,6,7,6,7,7,7,4,0,7,4,7,4,4,4,5,4,7,7,5,7,7,5,5,4,5,4,5,5,5,5,5,5,5,5,5,4,7,7,4,6,),
(7,7,7,7,7,7,7,7,6,6,6,6,7,6,6,7,7,7,7,7,7,7,5,7,7,7,7,6,7,7,7,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,5,7,6,6,5,6,5,7,6,5,6,0,5,7,7,7,7,7,7,7,7,7,5,7,7,7,7,7,7,7,7,6,7,7,7,7,7,7,7,7,7,5,6,7,),
(7,7,7,7,7,7,7,7,6,6,7,6,7,6,6,7,7,7,7,7,7,7,5,7,7,7,7,7,7,7,7,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,5,7,6,6,5,6,5,7,6,5,7,0,5,7,7,7,7,7,7,7,7,7,5,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,5,7,7,),
(1,1,0,1,1,0,1,0,0,0,0,1,0,1,1,1,1,1,0,0,1,1,0,1,1,1,1,0,0,1,1,0,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,1,1,0,0,0,0,0,0,1,0,0,0,1,0,1,1,0,1,1,1,1,0,1,0,1,0,1,1,1,1,1,1,1,0,1,0,0,0,),
(1,1,0,1,1,0,1,0,0,0,0,1,0,1,1,1,1,1,0,0,1,1,0,1,1,1,1,0,0,1,1,0,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,1,1,0,0,0,0,0,0,1,0,0,0,1,1,1,1,0,1,1,1,1,0,1,0,1,0,1,1,1,1,1,1,1,0,1,0,0,0,),
(7,7,6,7,7,7,7,7,6,6,4,6,6,6,6,7,7,5,7,6,7,7,5,7,7,7,7,3,6,7,7,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,3,7,6,6,5,5,5,7,6,5,6,0,5,7,7,7,7,7,7,7,7,7,5,7,7,7,7,7,7,7,7,6,7,7,7,7,7,7,7,5,7,5,2,6,),
(7,3,7,7,7,7,6,7,5,6,4,6,7,6,6,7,4,4,3,7,7,7,4,7,7,6,6,4,7,6,4,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,3,7,6,6,4,4,5,7,6,5,6,0,5,7,7,7,7,7,6,7,7,7,5,7,7,7,7,7,7,7,7,7,6,7,7,7,6,7,5,5,7,5,0,3,),
(7,3,5,5,5,6,6,7,5,3,7,6,4,6,6,7,4,2,4,6,6,7,4,6,6,6,6,7,4,3,3,7,5,7,7,7,7,7,7,7,7,4,7,7,7,7,7,7,7,7,4,3,7,5,5,3,3,3,7,5,5,6,0,5,6,7,7,7,7,6,7,7,7,5,7,7,7,7,7,7,7,7,7,6,7,6,6,5,6,3,6,7,5,7,3,),
(7,7,7,7,7,6,7,7,6,6,5,6,4,6,5,7,7,7,7,4,7,7,5,7,7,7,7,5,7,7,7,6,5,7,7,7,7,7,7,7,7,4,7,7,7,7,7,7,7,7,7,5,7,6,6,4,6,4,7,6,5,6,0,5,6,7,6,6,6,7,6,7,7,5,7,7,7,7,6,7,6,7,7,7,7,7,7,7,7,7,5,7,5,5,7,),
(15,14,15,15,15,15,14,15,14,14,15,15,15,14,14,15,14,14,14,15,15,15,12,15,15,14,15,15,15,14,14,15,14,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,14,11,15,14,14,12,13,13,15,14,13,15,12,13,15,15,15,15,15,14,15,15,15,13,15,15,15,15,15,15,15,15,15,14,15,15,15,14,15,14,15,15,13,15,14,),
(9,6,8,8,7,8,8,9,8,6,7,9,8,9,9,8,6,9,9,8,8,8,7,8,8,9,9,7,8,8,6,9,9,9,8,9,9,9,8,9,9,9,9,9,9,9,8,9,8,9,8,5,8,6,6,9,5,9,9,6,7,7,0,7,9,9,8,8,8,8,8,9,9,7,9,9,9,9,8,9,8,9,9,8,9,7,7,9,7,9,8,9,7,7,6,),
(9,9,9,9,9,9,9,9,8,8,8,8,9,8,8,9,9,9,9,9,9,9,8,9,9,9,9,8,9,9,9,9,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,8,9,8,8,7,8,7,9,8,8,8,0,8,9,9,9,9,9,9,9,9,9,7,9,9,9,9,9,9,9,9,8,9,9,9,9,9,9,9,9,9,8,8,9,),
(10,10,9,10,10,10,10,10,9,9,7,9,6,9,9,10,9,10,10,6,10,10,8,10,10,9,9,4,3,10,10,10,9,10,9,10,10,10,9,10,10,10,10,10,10,10,9,10,9,10,10,8,10,9,9,8,9,8,10,9,8,8,1,8,10,10,10,10,10,9,10,10,10,8,10,10,10,10,10,10,10,10,9,9,10,8,9,8,8,8,8,10,8,2,10,),
(10,10,10,10,10,10,10,10,9,9,10,9,10,9,9,10,10,9,9,10,10,10,7,10,10,10,10,10,10,10,10,10,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,7,10,9,9,7,8,8,10,9,8,10,1,8,10,10,10,10,10,10,10,10,10,8,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,8,10,9,),
(9,9,8,7,8,7,9,8,9,7,5,9,5,9,9,8,6,9,7,6,8,8,9,7,8,9,9,7,2,4,7,8,9,9,8,9,9,9,8,9,9,8,9,9,9,9,8,9,8,9,7,9,9,9,9,9,9,9,9,9,9,8,0,9,8,9,8,8,8,8,8,9,9,7,9,9,9,9,8,9,8,9,8,8,8,7,7,9,8,9,7,9,9,7,8,),
(8,8,5,6,7,6,8,7,8,6,7,7,4,7,6,7,5,6,6,6,7,7,8,6,6,7,7,7,1,1,6,7,5,8,7,8,8,8,7,8,8,3,8,8,8,8,7,8,7,8,6,8,8,8,8,7,8,7,8,8,8,5,0,8,3,8,7,7,7,6,7,8,8,6,8,8,7,7,7,7,7,7,7,6,7,6,6,5,6,3,5,8,8,7,7,),
(10,9,10,9,9,10,9,10,8,8,10,9,10,9,9,10,8,9,9,10,9,10,7,10,9,9,9,10,10,9,9,10,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,9,7,10,8,8,7,8,8,10,8,8,7,1,8,10,10,10,10,10,9,10,10,10,8,10,10,10,10,10,10,10,10,10,9,10,9,9,8,9,8,10,10,8,10,9,),
(9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,0,9,9,9,9,9,9,9,9,9,9,7,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,),
(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,),
(6,6,6,6,6,6,6,6,6,6,6,5,6,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5,6,5,6,6,6,6,0,6,6,6,6,6,6,6,6,6,6,4,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,),
(9,9,8,7,8,7,9,8,9,8,5,9,7,9,9,7,6,9,8,7,8,7,9,7,7,9,9,5,7,7,7,8,9,9,7,9,9,9,7,9,9,8,9,9,9,9,7,9,7,9,7,9,9,9,9,9,9,9,9,9,9,6,0,9,8,9,7,7,7,8,7,9,9,7,9,9,9,9,7,9,7,9,8,8,8,6,7,9,6,9,7,9,9,5,8,),
(7,3,6,5,4,5,6,6,6,2,1,7,3,7,7,5,4,7,5,1,5,5,5,5,5,7,7,1,1,1,4,6,7,7,4,7,7,7,4,7,7,6,7,7,7,7,4,7,4,7,5,3,5,3,4,7,3,7,7,4,5,1,0,5,6,7,5,5,5,6,5,7,7,5,7,7,7,7,5,7,5,7,6,6,6,4,5,7,4,7,5,7,5,1,3,),
(11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,2,11,11,11,11,11,11,11,11,11,11,9,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,),
(9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,0,9,9,9,9,9,9,9,9,9,9,7,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,),
(10,10,10,10,10,10,10,10,9,9,10,9,10,9,9,10,10,9,9,10,10,10,7,10,10,10,10,10,10,10,10,10,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,7,10,9,9,7,8,8,10,9,8,10,1,8,10,10,10,10,10,10,10,10,10,8,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,8,10,9,),
(9,9,9,9,9,8,9,9,8,8,7,8,5,8,7,9,9,9,9,6,9,9,7,9,9,9,9,7,9,9,9,8,7,9,9,9,9,9,9,9,9,4,9,9,9,9,9,9,9,9,9,7,9,8,8,6,8,6,9,8,7,8,0,7,8,9,8,8,8,9,8,9,9,7,9,9,9,9,8,9,8,9,9,9,9,9,9,9,9,9,7,9,7,7,9,),
(10,10,10,10,10,10,10,10,9,9,10,10,10,10,10,10,10,10,10,10,10,10,8,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,7,10,9,9,10,8,10,10,9,8,10,1,8,10,10,10,10,10,10,10,10,10,8,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,8,10,9,),
(9,9,9,9,9,8,9,9,8,8,7,9,7,9,9,9,9,9,9,7,9,9,7,9,9,9,9,7,9,9,9,8,9,9,9,9,9,9,9,9,9,8,9,9,9,9,9,9,9,9,9,7,9,8,8,9,8,9,9,8,7,8,0,7,8,9,8,8,8,9,8,9,9,7,9,9,9,9,8,9,8,9,9,9,9,9,9,9,9,9,7,9,7,7,9,),
(9,9,9,9,9,9,9,9,8,8,8,8,9,8,8,9,8,9,9,9,9,9,7,9,9,8,8,8,9,9,9,9,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,7,9,8,8,7,8,7,9,8,7,7,0,7,9,9,9,9,9,8,9,9,9,7,9,9,9,9,9,9,9,9,8,8,9,7,8,7,8,7,9,9,7,8,9,),
(9,9,6,7,8,7,9,7,9,7,5,8,5,8,6,7,6,7,7,5,8,7,9,7,7,8,8,5,5,5,7,5,5,9,6,9,9,9,6,9,9,5,9,9,9,9,6,9,6,9,7,9,9,9,9,8,9,8,9,9,9,6,0,9,5,9,5,5,5,7,5,9,9,7,9,9,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,9,9,5,8,),
(9,9,9,9,9,9,9,9,9,9,9,8,9,8,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,8,9,8,9,9,9,9,0,9,9,9,9,9,9,9,9,9,9,7,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,),
(9,9,8,8,9,8,9,8,9,9,7,8,6,8,6,8,8,8,8,7,8,8,9,8,8,8,8,7,8,8,8,7,6,9,8,9,9,9,8,9,9,6,9,9,9,9,8,9,8,9,8,9,9,9,9,8,9,8,9,9,9,7,0,9,7,9,7,7,7,8,7,9,9,7,9,9,8,8,7,8,7,8,8,8,8,8,8,8,8,8,7,9,9,7,9,),
(15,15,14,14,15,14,15,14,15,15,13,14,13,14,12,14,14,14,14,13,14,14,15,14,14,14,14,13,14,14,14,14,12,15,14,15,15,15,14,15,15,13,15,15,15,15,14,15,14,15,14,15,15,15,15,14,15,14,15,15,15,14,6,15,14,15,14,14,14,14,14,15,15,13,15,15,14,14,14,14,14,14,14,14,14,14,14,14,14,14,13,15,15,13,15,),
(11,10,10,9,9,9,10,10,10,9,7,11,8,11,11,9,8,11,10,8,9,9,10,9,9,11,11,7,8,9,8,10,11,11,8,11,11,11,8,11,11,10,11,11,11,11,8,11,8,11,9,10,10,10,10,11,10,11,11,10,10,7,2,10,10,11,9,9,9,10,9,11,11,9,11,11,11,11,9,11,9,11,10,10,10,8,9,11,8,11,9,11,10,7,9,),
(9,9,6,8,8,7,9,7,9,8,5,8,5,8,6,8,7,8,8,5,8,8,9,8,8,8,8,5,6,7,8,6,5,9,7,9,9,9,7,9,9,5,9,9,9,9,7,9,7,9,8,9,9,9,9,8,9,8,9,9,9,6,0,9,6,9,6,6,6,7,6,9,9,7,9,9,7,7,6,7,6,7,6,7,7,7,7,7,7,7,6,9,9,5,8,),
(9,9,8,7,8,7,9,8,9,8,5,9,5,9,9,7,7,9,7,4,8,7,9,7,7,9,9,5,6,7,7,8,9,9,7,9,9,9,7,9,9,8,9,9,9,9,7,9,7,9,7,9,9,9,9,9,9,9,9,9,9,6,0,9,8,9,7,7,7,8,7,9,9,7,9,9,9,9,7,9,7,9,8,8,8,7,7,9,7,9,7,9,9,5,8,),
(3,3,1,1,2,1,3,1,3,1,1,2,1,2,1,1,1,1,1,1,2,1,3,1,1,2,2,1,1,1,1,1,1,3,1,3,3,3,1,3,3,1,3,3,3,3,1,3,1,3,1,3,3,3,3,2,3,2,3,3,3,1,3,3,1,3,1,1,1,1,2,3,3,3,3,3,1,1,1,3,1,1,1,1,1,1,1,1,2,1,1,3,3,1,2,),
(4,2,3,3,2,3,3,4,3,1,3,4,3,4,4,3,2,4,4,3,3,3,2,3,3,4,4,3,3,3,2,4,4,4,3,4,4,4,3,4,4,4,4,4,4,4,3,4,3,4,3,1,3,1,1,4,1,4,4,1,2,2,0,2,4,4,3,3,3,3,3,4,4,2,4,4,4,4,3,4,3,4,4,3,4,2,2,4,2,4,3,4,2,3,2,),
(3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,),
(7,6,6,6,6,5,6,7,5,5,4,6,3,6,5,7,6,5,5,5,7,7,4,6,7,6,6,6,6,6,6,7,5,7,7,7,7,7,7,7,7,2,7,7,7,7,7,7,7,7,6,4,7,5,6,3,4,3,7,6,5,7,0,5,7,7,7,7,7,6,7,7,7,5,7,7,7,7,7,7,7,7,7,6,7,6,6,6,7,6,5,7,5,6,5,),
(8,5,0,2,0,0,8,7,9,4,2,8,5,8,5,2,5,2,2,1,2,2,2,2,2,8,8,2,2,2,2,4,0,0,0,0,0,1,0,0,8,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,5,9,2,9,7,2,2,3,2,2,5,8,3,8,9,2,8,0,3,2,9,3,5,3,5,3,2,0,2,8,2,6,9,9,1,5,),
(2,2,0,1,2,1,2,1,1,2,0,1,0,1,0,1,0,1,1,0,1,1,1,1,1,1,1,0,0,0,1,0,0,2,0,2,2,2,0,2,2,0,2,2,2,2,0,2,0,2,1,1,2,2,2,0,1,0,2,2,1,0,0,1,0,2,0,0,0,1,0,2,1,0,2,2,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,2,1,0,2,),
(7,6,7,7,7,7,6,7,6,6,7,7,7,7,7,7,6,7,7,7,7,7,5,7,7,7,7,7,7,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,3,7,6,6,7,4,7,7,6,5,7,0,5,7,7,7,7,7,6,7,7,7,5,7,7,7,7,7,7,7,7,7,6,7,7,7,7,7,7,7,7,5,7,5,),
(7,5,7,7,6,7,6,7,5,5,7,6,7,6,6,7,5,5,7,7,7,7,5,7,7,6,6,7,7,7,5,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,3,7,5,6,5,4,5,7,6,5,7,0,5,7,7,7,7,7,6,7,7,7,5,7,7,7,7,7,7,7,7,7,6,7,6,6,5,7,5,7,7,5,7,4,),
(6,5,6,6,6,5,5,6,4,5,3,5,2,5,6,6,5,5,6,3,6,6,4,5,6,5,5,3,6,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5,2,6,5,5,5,3,5,6,5,4,5,0,4,6,6,5,5,5,5,5,6,6,4,6,6,6,6,5,6,5,6,6,5,6,6,6,5,6,5,4,6,4,1,4,),
(7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,0,7,7,7,7,7,7,7,7,7,7,5,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,),
(7,5,6,7,6,7,6,7,5,5,7,6,3,6,6,7,5,5,7,7,7,7,5,7,7,6,6,7,6,7,5,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,3,7,5,6,5,4,5,7,6,5,7,0,5,7,7,7,7,7,6,7,7,7,5,7,7,7,7,7,7,7,7,7,6,7,6,6,5,7,5,7,7,5,7,4,),
(4,4,2,4,4,3,4,3,4,3,2,3,2,3,2,4,4,2,2,2,4,4,4,4,4,4,4,2,2,4,4,3,2,4,4,4,4,4,4,4,4,2,4,4,4,4,4,4,4,4,4,4,4,4,4,3,4,3,4,4,4,3,0,4,2,4,2,2,2,4,2,4,4,2,4,4,4,4,2,4,2,4,3,4,4,4,4,4,4,4,2,4,4,2,3,),
(7,7,7,7,7,7,7,7,6,6,7,7,7,7,7,7,7,7,7,7,7,7,5,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,3,7,6,6,7,5,7,7,6,5,7,5,5,7,7,7,7,7,7,7,7,7,5,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,5,7,6,),
(6,5,6,6,6,6,5,6,5,5,6,6,6,6,6,6,5,6,6,6,6,6,4,6,6,6,6,6,6,6,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,2,6,5,5,6,3,6,6,5,4,6,0,4,6,6,6,6,6,5,6,6,6,4,6,6,6,6,6,6,6,6,6,5,6,6,6,6,6,6,6,6,4,6,4,),
(1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,),
(3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,),
(7,7,6,7,7,6,7,6,6,6,4,7,5,7,7,7,7,7,6,5,7,7,5,7,7,7,7,4,6,7,7,6,7,7,7,7,7,7,7,7,7,6,7,7,7,7,7,7,7,7,7,3,7,6,6,7,5,7,7,6,5,6,0,5,6,7,5,5,5,7,5,7,7,5,7,7,7,7,5,7,5,7,6,7,7,7,7,7,7,7,5,7,5,4,6,),
(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,),
(11,10,11,11,11,11,10,11,10,10,11,11,11,11,11,11,10,11,11,11,11,11,9,11,11,11,11,11,11,11,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,7,11,10,10,11,8,11,11,10,9,11,2,9,11,11,11,11,11,10,11,11,11,9,11,11,11,11,11,11,11,11,11,10,11,11,11,11,11,11,11,11,9,11,9,),
(6,5,6,6,6,6,5,6,5,5,6,6,6,6,6,6,5,6,6,6,6,6,4,6,6,6,6,6,6,6,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,2,6,5,5,6,3,6,6,5,4,6,0,4,6,6,6,6,6,5,6,6,6,4,6,6,6,6,6,6,6,6,6,5,6,6,6,6,6,6,6,6,4,6,4,),
(7,5,7,7,6,7,6,7,5,5,7,6,7,6,6,7,5,5,7,7,7,7,5,7,7,6,6,7,7,7,5,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,3,7,5,6,5,4,5,7,6,5,7,0,5,7,7,7,7,7,6,7,7,7,5,7,7,7,7,7,7,7,7,7,6,7,6,6,5,7,5,7,7,5,7,4,),
(7,5,7,7,6,7,6,7,5,5,7,6,7,6,6,7,5,5,7,7,7,7,5,7,7,6,6,7,7,7,5,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,3,7,5,6,5,4,5,7,6,5,7,1,5,7,7,7,7,7,6,7,7,7,5,7,7,7,7,7,7,7,7,7,6,7,6,6,5,7,5,7,7,5,7,4,),
(7,7,7,7,7,7,7,7,7,6,7,7,7,7,7,7,7,7,7,7,7,7,5,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,3,7,6,6,7,5,7,7,6,7,7,7,5,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,),
(4,4,2,4,4,3,4,3,3,3,1,3,1,3,2,4,4,1,1,1,4,4,0,4,4,4,4,1,2,4,4,3,1,4,4,4,4,4,4,4,4,1,4,4,4,4,4,4,4,4,4,0,4,3,3,1,2,1,4,3,2,3,0,2,2,4,2,2,2,4,2,4,4,2,4,4,4,4,2,4,2,4,3,4,4,4,4,4,4,4,2,4,2,1,3,),
(6,5,6,6,6,6,5,6,4,5,3,5,6,5,6,6,5,5,6,6,6,6,4,6,6,5,5,4,6,6,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,2,6,5,5,5,3,5,6,5,4,5,0,4,6,6,6,6,6,5,6,6,6,4,6,6,6,6,6,6,6,6,6,5,6,6,6,5,6,5,5,6,4,1,4,),
(4,4,3,4,4,3,4,3,3,3,2,4,2,4,4,4,4,4,2,2,4,4,2,4,4,4,4,2,2,4,4,3,4,4,4,4,4,4,4,4,4,3,4,4,4,4,4,4,4,4,4,0,4,3,3,4,2,4,4,3,2,3,0,2,3,4,2,2,2,4,2,4,4,2,4,4,4,4,2,4,2,4,3,4,4,4,4,4,4,4,2,4,2,2,3,),
(6,6,6,6,6,6,6,6,5,5,6,6,6,6,6,6,6,6,6,6,6,6,4,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,2,6,5,5,6,4,6,6,5,4,6,0,4,6,6,6,6,6,6,6,6,6,4,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,4,6,5,),
(7,7,7,7,7,6,7,7,6,6,6,6,5,6,5,7,7,4,5,5,7,7,3,7,7,7,7,6,7,7,7,6,5,7,7,7,7,7,7,7,7,5,7,7,7,7,7,7,7,7,7,3,7,6,6,4,5,4,7,6,5,6,0,5,6,7,6,6,6,7,6,7,7,5,7,7,7,7,6,7,6,7,7,7,7,7,7,7,7,7,5,7,5,6,6,),
(11,11,11,11,11,10,11,11,10,10,10,10,10,10,9,11,11,9,10,10,11,11,8,11,11,11,11,10,11,11,11,10,9,11,11,11,11,11,11,11,11,10,11,11,11,11,11,11,11,11,11,7,11,10,10,9,9,9,11,10,9,10,2,9,10,11,10,10,10,11,10,11,11,9,11,11,11,11,10,11,10,11,11,11,11,11,11,11,11,11,10,11,9,10,10,),
(7,7,6,7,7,6,7,6,6,6,5,7,5,7,7,7,7,7,6,5,7,7,5,7,7,7,7,5,6,7,7,6,7,7,7,7,7,7,7,7,7,6,7,7,7,7,7,7,7,7,7,3,7,6,6,7,5,7,7,6,5,6,0,5,6,7,5,5,5,7,5,7,7,5,7,7,7,7,5,7,5,7,6,7,7,7,7,7,7,7,5,7,5,5,6,),
(7,7,7,7,7,6,7,7,6,6,6,6,5,6,5,7,7,4,5,6,7,7,3,7,7,7,7,6,7,7,7,6,5,7,7,7,7,7,7,7,7,5,7,7,7,7,7,7,7,7,7,3,7,6,6,4,5,4,7,6,5,6,3,5,6,7,6,6,6,7,6,7,7,5,7,7,7,7,6,7,6,7,7,7,7,7,7,7,7,7,6,7,5,6,6,),
(7,7,6,7,7,6,7,6,6,6,5,7,3,7,7,7,7,7,5,4,7,7,5,7,7,7,7,5,6,7,7,6,7,7,7,7,7,7,7,7,7,6,7,7,7,7,7,7,7,7,7,3,7,6,6,7,5,7,7,6,5,6,0,5,6,7,5,5,5,7,5,7,7,5,7,7,7,7,5,7,5,7,6,7,7,7,7,7,7,7,5,7,5,5,6,),
(5,5,3,3,4,3,5,3,5,3,3,4,3,4,3,3,3,3,3,3,4,3,5,3,3,4,4,3,3,3,3,3,3,5,3,5,5,5,3,5,5,3,5,5,5,5,3,5,3,5,3,5,5,5,5,4,5,4,5,5,5,3,5,5,3,5,3,3,3,3,4,5,5,5,5,5,3,3,3,5,3,3,3,3,3,3,3,3,4,3,2,5,5,3,4,),
(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,),
(5,3,3,3,3,5,4,5,3,3,3,4,3,4,4,5,3,3,3,5,3,5,2,5,3,4,4,4,3,3,3,5,4,5,5,5,5,5,5,5,5,3,5,5,5,5,5,5,5,5,3,2,5,3,3,3,2,3,5,3,3,3,2,3,4,5,5,5,5,4,5,5,5,3,5,5,5,5,5,5,5,5,4,4,5,3,4,3,4,3,5,5,2,3,3,),
(8,4,6,7,7,6,7,8,6,3,7,7,4,7,6,8,5,3,6,6,8,8,5,6,8,7,7,7,1,4,3,8,6,8,8,8,8,8,8,8,8,3,8,8,8,8,8,8,8,8,6,4,8,6,7,3,5,4,8,7,6,8,0,6,8,8,8,8,8,7,8,8,8,6,8,8,8,8,8,8,8,8,8,7,8,7,7,6,8,4,6,8,6,7,4,),
(4,4,1,4,4,3,4,3,3,4,0,3,0,3,2,4,3,4,4,0,3,4,3,4,4,3,3,0,0,3,4,2,1,4,3,4,4,4,3,4,4,0,4,4,4,4,3,4,3,4,4,3,4,4,4,2,3,2,4,4,3,2,0,3,1,4,1,1,1,3,1,4,3,1,4,4,3,3,1,3,1,3,2,3,3,3,3,3,3,3,2,4,3,0,4,),
)
# End of font
|
HudsonWerks/OLED-SSD1306
|
ssd1306/fonts/arial_16.py
|
Python
|
lgpl-3.0
| 57,268
|
from asyncio import get_event_loop
from sif import Context, Sif
from sif_nats import Nats, SifNatsRpc
from .deps import GreeterStub
nats = Nats()
sif = Sif('greeter.server')
sif.add_rpc_transport('nats', SifNatsRpc(sif, nats))
greeter = GreeterStub(sif)
@greeter.greet.listen
async def greet(payload: str, ctx: Context) -> str:
return 'Hello ' + payload
def main():
loop = get_event_loop()
sif.create_server()
loop.run_forever()
if __name__ == '__main__':
main()
|
rudineirk/sif
|
examples/greeter/server.py
|
Python
|
mit
| 492
|
import argparse
import pyjsonrpc
from gensim.models.word2vec import Word2Vec
W2V_MODEL = None
class Word2VecRequestHandler(pyjsonrpc.HttpRequestHandler):
""" a simple handler class that implements the 'expand' method """
@pyjsonrpc.rpcmethod
def expand(self, word):
""" expand the word using the word2vec model """
try:
result = W2V_MODEL.most_similar(positive=[word], negative=[], topn=10)
except:
result = []
return [pair[0] for pair in result]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='A word2vec-based term-expansion JSON/RPC server')
parser.add_argument('model', metavar='MODEL',
help='word2vec model in binary formmat')
parser.add_argument('port', metavar='PORT', type=int,
help='port number')
args = parser.parse_args()
# Set up word2vec model
W2V_MODEL = Word2Vec.load_word2vec_format(args.model, binary=True)
W2V_MODEL.init_sims(replace=True)
http_server = pyjsonrpc.ThreadingHttpServer(
server_address=('localhost', args.port),
RequestHandlerClass=Word2VecRequestHandler)
print "Starting word2vec server ..."
print "URL: http://localhost:{}".format(args.port)
http_server.serve_forever()
|
rueycheng/word2vec-expansion-jsonrpc-server
|
server.py
|
Python
|
mit
| 1,323
|
# -*- coding: utf-8 -*-
"""
WSGI config for Darky project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
application = get_wsgi_application()
|
nijel/darky
|
wsgi.py
|
Python
|
gpl-3.0
| 421
|
from MaKaC.common.general import *
from MaKaC.webinterface.rh import sessionModif
if DEVELOPEMENT:
sessionModif = reload( sessionModif )
def index(req, **params):
return sessionModif.RHSessionModifTools( req ).process( params )
def delete( req, **params ):
return sessionModif.RHSessionDeletion( req ).process( params )
def writeMinutes( req, **params ):
return sessionModif.RHSessionWriteMinutes( req ).process( params )
|
belokop-an/agenda-tools
|
code/htdocs/sessionModifTools.py
|
Python
|
gpl-2.0
| 444
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 MarkLogic Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File History
# ------------
#
# Paul Hoehne 03/26/2015 Initial development
#
"""
Host related classes for manipulating MarkLogic hosts
"""
from __future__ import unicode_literals, print_function, absolute_import
import json, logging, time
from marklogic.connection import Connection
from marklogic.exceptions import *
class Host:
"""
The Host class encapsulates a MarkLogic host.
"""
def __init__(self,name=None,connection=None,save_connection=True):
"""
Create a host.
"""
if name is None:
self._config = {}
else:
self._config = {'host-name': name}
self.name = name
self.etag = None
if save_connection:
self.connection = connection
else:
self.connection = None
self.logger = logging.getLogger("marklogic.host")
self._just_initialized = False
def host_name(self):
"""
Returns the host name of the cluster member
:return: The member host name
"""
return self._config['host-name']
def set_host_name(self, name):
self._config['host-name'] = name
def group_name(self):
"""
The cluster member's group
:return: Host's Group
"""
return self._config['group']
def set_group_name(self, name):
self._config['group'] = name
def bind_port(self):
"""
The bind port of the cluster member
:return: The host's bind port
"""
return self._config['bind-port']
def set_bind_port(self, port):
self._config['bind-port'] = port
def foreign_bind_port(self):
"""
The foreign bind port.
:return: The Host's foreign bind port
"""
return self._config['foreign-bind-port']
def set_foreign_bind_port(self, port):
self._config['foreign-bind-port'] = port
def zone(self):
"""
The zone
:return: The zone
"""
return self._config['zone']
def set_zone(self, zone):
self._config['zone'] = zone
def bootstrap_host(self):
"""
Indicates if this is the bootstrap host
:return:Bootstrap host indicator
"""
return self._config['boostrap-host']
def just_initialized(self):
"""
Indicates if this host was just initialized. This method will
only return True if the host was just initialized (i.e, returned
by MarkLogic.instance_init()).
:return:True or False
"""
return self._just_initialized
def _set_just_initialized(self):
"""
Internal method used to specify that the host was just initialized.
:return: The host object
"""
self._just_initialized = True
return self
def read(self, connection=None):
"""
Loads the host from the MarkLogic server. This will refresh
the properties of the object.
:param connection: The connection to a MarkLogic server
:return: The host object
"""
if connection is None:
connection = self.connection
host = Host.lookup(connection, self.name)
if host is not None:
self._config = host._config
self.etag = host.etag
return self
def update(self, connection=None):
"""
Save the configuration changes with the given connection.
:param connection:The server connection
:return: The host object
"""
if connection is None:
connection = self.connection
uri = connection.uri("hosts", self.name)
struct = self.marshal()
response = connection.put(uri, payload=struct, etag=self.etag)
# In case we renamed it
self.name = self._config['host-name']
if 'etag' in response.headers:
self.etag = response.headers['etag']
return self
def restart(self, connection=None):
"""
Restart the host.
:param connection:The server connection
:return: The host object
"""
if connection is None:
connection = self.connection
uri = connection.uri("hosts", self.name, properties=None)
struct = {'operation':'restart'}
response = connection.post(uri, payload=struct)
return self
def shutdown(self, connection=None):
"""
Shutdown the host.
:param connection:The server connection
:return: None
"""
if connection is None:
connection = self.connection
uri = connection.uri("hosts", self.name, properties=None)
struct = {'operation':'shutdown'}
response = connection.post(uri, payload=struct)
return None
@classmethod
def lookup(cls, connection, name):
"""
Look up an individual host within the cluster.
:param name: The name of the host
:param connection: A connection to a MarkLogic server
:return: The host information
"""
uri = connection.uri("hosts", name)
response = connection.get(uri)
if response.status_code == 200:
result = Host.unmarshal(json.loads(response.text))
if 'etag' in response.headers:
result.etag = response.headers['etag']
return result
else:
return None
@classmethod
def list(cls, connection):
"""
Lists the names of hosts available on this cluster.
:param connection: A connection to a MarkLogic server
:return: A list of host names
"""
uri = connection.uri("hosts")
response = connection.get(uri)
if response.status_code == 200:
response_json = json.loads(response.text)
host_count = response_json['host-default-list']['list-items']['list-count']['value']
result = []
if host_count > 0:
for item in response_json['host-default-list']['list-items']['list-item']:
result.append(item['nameref'])
else:
raise UnexpectedManagementAPIResponse(response.text)
return result
@classmethod
def unmarshal(cls, config):
result = Host()
result._config = config
result.name = result._config['host-name']
return result
def marshal(self):
struct = { }
for key in self._config:
struct[key] = self._config[key]
return struct
def join_cluster(self, cluster, cluster_connection=None):
if cluster_connection is None:
cluster_connection = cluster.connection
xml = self._get_server_config()
cfgzip = cluster._post_server_config(xml,cluster_connection)
connection = Connection(self.host_name(), cluster_connection.auth)
self._post_cluster_config(cfgzip,connection)
def _get_server_config(self):
"""
Obtain the server configuration. This is the data necessary for
the first part of the handshake necessary to join a host to a
cluster. The returned data is not intended for introspection.
:return: The config. This is always XML.
"""
connection = Connection(self.host_name(), None)
uri = "http://{0}:8001/admin/v1/server-config".format(connection.host)
response = connection.get(uri, accept="application/xml")
if response.status_code != 200:
raise UnexpectedManagementAPIResponse(response.text)
return response.text # this is always XML
def _post_cluster_config(self,cfgzip,connection):
"""
Send the cluster configuration to the the server that's joining
the cluster. This is the second half of
the handshake necessary to join a host to a cluster.
:param connection: The connection credentials to use
:param cfgzip: The ZIP payload from post_server_config()
"""
uri = "{0}://{1}:8001/admin/v1/cluster-config" \
.format(connection.protocol, connection.host)
response = connection.post(uri, payload=cfgzip,
content_type="application/zip")
if response.status_code != 202:
raise UnexpectedManagementAPIResponse(response.text)
data = json.loads(response.text)
|
supriyantomaftuh/python_api
|
python_api/marklogic/models/host.py
|
Python
|
apache-2.0
| 9,046
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import content_index
import std_index
import document_storage
import document_directory
import directory_content
import directory_report
import document
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Johnzero/erp
|
openerp/addons/document/__init__.py
|
Python
|
agpl-3.0
| 1,227
|
# -*- coding: utf-8 -*-
"""Test cases for Russian"""
NAME = "TestRussian"
TEST_CASES = {
0: 'ноль',
1: 'один', 2: 'два', 3: 'три', 4: 'четыре', 5: 'пять', 6: 'шесть', 7: 'семь',
8: 'восемь', 9: 'девять', 10: 'десять', 11: 'одиннадцать', 12: 'двенадцать',
13: 'тринадцать', 14: 'четырнадцать', 15: 'пятнадцать', 16: 'шестнадцать',
17: 'семнадцать', 18: 'восемнадцать', 19: 'девятнадцать', 20: 'двадцать',
21: 'двадцать один', 22: 'двадцать два', 23: 'двадцать три',
24: 'двадцать четыре', 25: 'двадцать пять', 26: 'двадцать шесть',
27: 'двадцать семь', 28: 'двадцать восемь', 29: 'двадцать девять',
30: 'тридцать', 31: 'тридцать один', 32: 'тридцать два', 33: 'тридцать три',
34: 'тридцать четыре', 35: 'тридцать пять', 36: 'тридцать шесть',
37: 'тридцать семь', 38: 'тридцать восемь', 39: 'тридцать девять',
40: 'сорок', 41: 'сорок один', 42: 'сорок два', 43: 'сорок три',
44: 'сорок четыре', 45: 'сорок пять', 46: 'сорок шесть', 47: 'сорок семь',
48: 'сорок восемь', 49: 'сорок девять', 50: 'пятьдесят',
51: 'пятьдесят один', 52: 'пятьдесят два', 53: 'пятьдесят три',
54: 'пятьдесят четыре', 55: 'пятьдесят пять', 56: 'пятьдесят шесть',
57: 'пятьдесят семь', 58: 'пятьдесят восемь', 59: 'пятьдесят девять',
60: 'шестьдесят', 61: 'шестьдесят один', 62: 'шестьдесят два',
63: 'шестьдесят три', 64: 'шестьдесят четыре', 65: 'шестьдесят пять',
66: 'шестьдесят шесть', 67: 'шестьдесят семь', 68: 'шестьдесят восемь',
69: 'шестьдесят девять', 70: 'семьдесят', 71: 'семьдесят один',
72: 'семьдесят два', 73: 'семьдесят три', 74: 'семьдесят четыре',
75: 'семьдесят пять', 76: 'семьдесят шесть', 77: 'семьдесят семь',
78: 'семьдесят восемь', 79: 'семьдесят девять', 80: 'восемьдесят',
81: 'восемьдесят один', 82: 'восемьдесят два', 83: 'восемьдесят три',
84: 'восемьдесят четыре', 85: 'восемьдесят пять',
86: 'восемьдесят шесть', 87: 'восемьдесят семь', 88: 'восемьдесят восемь',
89: 'восемьдесят девять', 90: 'девяносто', 91: 'девяносто один',
92: 'девяносто два', 93: 'девяносто три', 94: 'девяносто четыре',
95: 'девяносто пять', 96: 'девяносто шесть', 97: 'девяносто семь',
98: 'девяносто восемь', 99: 'девяносто девять',
100: 'сто',
101: 'сто один',
102: 'сто два',
109: 'сто девять',
110: 'сто десять',
120: 'сто двадцать',
121: 'сто двадцать один',
130: 'сто тридцать',
198: 'сто девяносто восемь',
199: 'сто девяносто девять',
200: 'двести',
203: 'двести три',
204: 'двести четыре',
211: 'двести одиннадцать',
212: 'двести двенадцать',
232: 'двести тридцать два',
250: 'двести пятьдесят',
299: 'двести девяносто девять',
300: 'триста', 376: 'триста семьдесят шесть',
400: 'четыреста', 402: 'четыреста два',
500: 'пятьсот', 515: 'пятьсот пятнадцать',
600: 'шестьсот', 611: 'шестьсот одиннадцать',
700: 'семьсот', 713: 'семьсот тринадцать',
800: 'восемьсот', 817: 'восемьсот семнадцать',
900: 'девятьсот', 919: 'девятьсот девятнадцать',
999: 'девятьсот девяносто девять',
1000: 'тысяча',
1001: 'тысяча один',
1010: 'тысяча десять',
1130: 'тысяча сто тридцать',
1134: 'тысяча сто тридцать четыре',
1345: 'тысяча триста сорок пять',
1989: 'тысяча девятьсот восемьдесят девять',
2000: 'две тысячи',
3000: 'три тысячи',
7456: 'семь тысяч четыреста пятьдесят шесть',
10000: 'десять тысяч',
10567: 'десять тысяч пятьсот шестьдесят семь',
13000: 'тринадцать тысяч',
20933: 'двадцать тысяч девятьсот тридцать три',
21000: 'двадцать одна тысяча',
30000: 'тридцать тысяч',
31000: 'тридцать одна тысяча',
90000: 'девяносто тысяч',
100000: 'сто тысяч',
101000: 'сто одна тысяча',
201000: 'двести одна тысяча',
301000: 'триста одна тысяча',
200100: 'двести тысяч сто',
250000: 'двести пятьдесят тысяч',
934756: 'девятьсот тридцать четыре тысячи семьсот пятьдесят шесть',
1000000: 'миллион',
1100234: 'миллион сто тысяч двести тридцать четыре',
2000000: 'два миллиона',
6000000: 'шесть миллионов',
21000000: 'двадцать один миллион',
31000000: 'тридцать один миллион',
10000000: 'десять миллионов',
10678456: 'десять миллионов шестьсот семьдесят восемь тысяч четыреста пятьдесят шесть',
100000000: 'сто миллионов',
100100000: 'сто миллионов сто тысяч',
101000000: 'сто один миллион',
300873678: 'триста миллионов восемьсот семьдесят три тысячи шестьсот семьдесят восемь',
301000000: 'триста один миллион',
15789513: 'пятнадцать миллионов семьсот восемьдесят девять тысяч пятьсот тринадцать',
143000000: 'сто сорок три миллиона',
143007000: 'сто сорок три миллиона семь тысяч',
1000000000: 'миллиард',
2000000000: 'два миллиарда',
10000000000: 'десять миллиардов',
13000000000: 'тринадцать миллиардов',
21000000000: 'двадцать один миллиард',
100000000000: 'сто миллиардов',
101000000000: 'сто один миллиард',
110000000000: 'сто десять миллиардов',
201000000000: 'двести один миллиард',
1000000000000: 'триллион',
}
|
alco/numspell
|
test/cases_ru.py
|
Python
|
mit
| 7,765
|
# -*- coding: utf-8 -*-
from django.http import HttpResponse, Http404
from django.shortcuts import render, get_object_or_404, redirect
from mailinglist.models import MailingList
from mailinglist.forms import SubscribeForm
def index(request):
return HttpResponse('Ahoj Svet.\
Práve ste v mailinglistovom indexe.')
def mlist(request, list_id):
try:
ml = MailingList.objects.get(pk=list_id)
except MailingList.DoesNotExist:
raise Http404('Mailinglist neexistuje!!!')
return render(request, 'mailinglist/mlist.html', {'ml': ml})
def subscribe(request, list_id):
ml = get_object_or_404(MailingList, pk=list_id)
if request.method == "POST":
form = SubscribeForm(request.POST)
if form.is_valid():
subscriber = form.save()
ml.subscriber.add(subscriber)
return redirect('mlist', list_id=list_id)
else:
form = SubscribeForm()
return render(request, 'mailinglist/subscribe.html',
{'ml': ml, 'form': form})
|
ricco386/zaciname-s-djangom
|
konferencia/mailinglist/views.py
|
Python
|
bsd-3-clause
| 1,058
|
'''
Created on 2014-5-4
@author: xiajie
'''
import numpy as np
import prostate
def ls(X, Y):
#print X
#print Y
XT = np.transpose(X)
M = XT.dot(X)
if np.linalg.matrix_rank(M) < len(M):
return None
beta = np.linalg.inv(M).dot(XT).dot(Y)
return beta
def augment(data):
dlist = data.tolist()
for i in range(len(dlist)):
dlist[i].insert(0, 1)
return np.array(dlist)
def predict(X, beta):
yhat = X.dot(beta)
return yhat
def RSS(X, Y, beta):
yhat = predict(X, beta)
return np.sum((yhat-Y)**2)
def thegama2(yhat, output, p):
delta = yhat-output
return np.sum(delta*delta)/(len(yhat)-p-1)
def z_score(beta, theg, data):
z = np.zeros(len(beta))
DT = np.transpose(data)
M = np.linalg.inv(DT.dot(data))
for i in range(len(beta)):
z[i] = beta[i]/(theg*np.sqrt(M[i,i]))
return z
if __name__ == '__main__':
inputs, output, Ttype = prostate.loaddata()
train_data, train_out, test_data, test_out = prostate.cookdata2(inputs, output, Ttype)
beta = ls(augment(train_data), train_out)
print beta
#yhat = predict(augment(train_data), beta)
#theg2 = thegama2(yhat, train_out, len(train_data[0]))
#print theg2
#z = z_score(beta, np.sqrt(theg2), augment(train_data))
#print z
for i in range(len(test_out)):
lst = test_data[i].tolist()
lst.insert(0,1.)
new_lst = np.array(lst)
print predict(new_lst, beta), test_out[i]
print RSS(augment(test_data), test_out, beta)/float(len(test_out))
|
jayshonzs/ESL
|
LinearRegression/leastsquare.py
|
Python
|
mit
| 1,567
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AzureFileShareReference(Model):
"""Details of the Azure File Share to mount on the cluster.
:param account_name: Name of the storage account.
:type account_name: str
:param azure_file_url: URL to access the Azure File.
:type azure_file_url: str
:param credentials: Information of the Azure File credentials.
:type credentials: :class:`AzureStorageCredentialsInfo
<azure.mgmt.batchai.models.AzureStorageCredentialsInfo>`
:param relative_mount_path: Specifies the relative path on the compute
node where the Azure file share will be mounted. Note that all file shares
will be mounted under $AZ_BATCHAI_MOUNT_ROOT location.
:type relative_mount_path: str
:param file_mode: Specifies the file mode. Default value is 0777. Valid
only if OS is linux. Default value: "0777" .
:type file_mode: str
:param directory_mode: Specifies the directory Mode. Default value is
0777. Valid only if OS is linux. Default value: "0777" .
:type directory_mode: str
"""
_validation = {
'account_name': {'required': True},
'azure_file_url': {'required': True},
'credentials': {'required': True},
'relative_mount_path': {'required': True},
}
_attribute_map = {
'account_name': {'key': 'accountName', 'type': 'str'},
'azure_file_url': {'key': 'azureFileUrl', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'AzureStorageCredentialsInfo'},
'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'},
'file_mode': {'key': 'fileMode', 'type': 'str'},
'directory_mode': {'key': 'directoryMode', 'type': 'str'},
}
def __init__(self, account_name, azure_file_url, credentials, relative_mount_path, file_mode="0777", directory_mode="0777"):
self.account_name = account_name
self.azure_file_url = azure_file_url
self.credentials = credentials
self.relative_mount_path = relative_mount_path
self.file_mode = file_mode
self.directory_mode = directory_mode
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-batchai/azure/mgmt/batchai/models/azure_file_share_reference.py
|
Python
|
mit
| 2,596
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPygit2(PythonPackage):
"""Pygit2 is a set of Python bindings to the libgit2 shared library,
libgit2 implements the core of Git.
"""
homepage = "https://www.pygit2.org/"
pypi = "pygit2/pygit2-0.24.1.tar.gz"
version('1.6.0', sha256='7aacea4e57011777f4774421228e5d0ddb9a6ddb87ac4b542346d17ab12a4d62')
version('1.4.0', sha256='cbeb38ab1df9b5d8896548a11e63aae8a064763ab5f1eabe4475e6b8a78ee1c8')
version('1.3.0', sha256='0be93f6a8d7cbf0cc79ae2f0afb1993fc055fc0018c27e2bd01ba143e51d4452')
version('0.28.2', sha256='4d8c3fbbf2e5793a9984681a94e6ac2f1bc91a92cbac762dbdfbea296b917f86')
version('0.24.1', sha256='4d1d0196b38d6012faf0a7c45e235c208315672b6035da504566c605ba494064')
depends_on('py-setuptools', type='build')
# Version must match with libgit2
# See: https://www.pygit2.org/install.html
depends_on('libgit2@1.1.0:1.1', when='@1.4:')
depends_on('libgit2@1.0.0:1.0', when='@1.2:1.3')
depends_on('libgit2@0:1.0', when='@1.1.0:1.1')
depends_on('libgit2@0.28.0:0.28', when='@0.28:1.0')
depends_on('libgit2@0.24:0.27', when='@0.24:0.27')
depends_on('python@3.6:', when='@1.4.0:')
depends_on('python@3.6:3.8', when='@1.2:1.3')
depends_on('python@3.5:3.8', when='@1.0:1.1')
depends_on('python@2.7:3.7', when='@0.28:0')
depends_on('py-six', type=('build', 'run'), when='@:0.28.2')
depends_on('py-cffi@1.4.0:', type=('build', 'run'))
depends_on('py-cached-property', when='@1.1.0:1.5', type=('build', 'run'))
depends_on('py-cached-property', when='@1.6.0: ^python@:3.7', type=('build', 'run'))
def setup_build_environment(self, env):
spec = self.spec
# https://www.pygit2.org/install.html
env.set('LIBGIT2', spec['libgit2'].prefix)
env.set('LIBGIT2_LIB', spec['libgit2'].prefix.lib)
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-pygit2/package.py
|
Python
|
lgpl-2.1
| 2,045
|
# coding=UTF-8
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import glyphsLib
from fontTools.designspaceLib import DesignSpaceDocument
from glyphsLib.builder.instances import apply_instance_data
import defcon
import pytest
import py.path
from ..test_helpers import write_designspace_and_UFOs
DATA = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data")
@pytest.mark.parametrize(
"instance_names",
[None, ["Extra Light"], ["Regular", "Bold"]],
ids=["default", "include_1", "include_2"],
)
def test_apply_instance_data(tmpdir, instance_names):
font = glyphsLib.GSFont(os.path.join(DATA, "GlyphsUnitTestSans.glyphs"))
instance_dir = "instances"
designspace = glyphsLib.to_designspace(font, instance_dir=instance_dir)
path = str(tmpdir / (font.familyName + ".designspace"))
write_designspace_and_UFOs(designspace, path)
test_designspace = DesignSpaceDocument()
test_designspace.read(designspace.path)
if instance_names is None:
# Collect all instances.
test_instances = [instance.filename for instance in test_designspace.instances]
else:
# Collect only selected instances.
test_instances = [
instance.filename
for instance in test_designspace.instances
if instance.styleName in instance_names
]
# Generate dummy UFOs for collected instances so we don't actually need to
# interpolate.
tmpdir.mkdir(instance_dir)
for instance in test_instances:
ufo = defcon.Font()
ufo.save(str(tmpdir / instance))
ufos = apply_instance_data(designspace.path, include_filenames=test_instances)
for filename in test_instances:
assert os.path.isdir(str(tmpdir / filename))
assert len(ufos) == len(test_instances)
for ufo in ufos:
assert ufo.info.openTypeOS2WeightClass is not None
assert ufo.info.openTypeOS2WidthClass is not None
def test_reexport_apply_instance_data():
# this is for compatibility with fontmake
# https://github.com/googlei18n/fontmake/issues/451
from glyphsLib.interpolation import apply_instance_data as reexported
assert reexported is apply_instance_data
def test_reencode_glyphs(tmpdir):
data_dir = py.path.local(DATA)
designspace_path = data_dir / "TestReencode.designspace"
designspace_path.copy(tmpdir)
ufo_path = data_dir / "TestReencode-Regular.ufo"
ufo_path.copy(tmpdir.ensure_dir("TestReencode-Regular.ufo"))
instance_dir = tmpdir.ensure_dir("instance_ufo")
ufo_path.copy(instance_dir.ensure_dir("TestReencode-Regular.ufo"))
ufo_path.copy(instance_dir.ensure_dir("TestReencodeUI-Regular.ufo"))
ufos = apply_instance_data(str(tmpdir / "TestReencode.designspace"))
assert len(ufos) == 2
assert ufos[0]["A"].unicode == 0x0041
assert ufos[0]["A.alt"].unicode is None
assert ufos[0]["C"].unicode == 0x0043
# Reencode Glyphs: A.alt=0041, C=
assert ufos[1]["A"].unicode is None
assert ufos[1]["A.alt"].unicode == 0x0041
assert ufos[1]["C"].unicode is None
|
googlei18n/glyphsLib
|
tests/builder/instances_test.py
|
Python
|
apache-2.0
| 3,713
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_transportation', '0007_auto_20150226_0932'),
]
operations = [
migrations.AlterModelOptions(
name='line',
options={'verbose_name': 'Linja', 'verbose_name_plural': 'Linjat'},
),
migrations.AlterModelOptions(
name='stop',
options={'verbose_name': 'Pys\xe4kki',
'verbose_name_plural': 'Pys\xe4kit'},
),
migrations.AddField(
model_name='line',
name='destination',
field=models.CharField(default=' ', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='line',
name='raw',
field=models.CharField(
default=' ', max_length=50, verbose_name=b'Sis\xc3\xa4inen numero'),
preserve_default=False,
),
migrations.AlterField(
model_name='line',
name='line_number',
field=models.CharField(max_length=20, verbose_name=b'Numero'),
preserve_default=True,
),
migrations.AlterField(
model_name='line',
name='line_number_raw',
field=models.CharField(max_length=20),
preserve_default=True,
),
migrations.AlterField(
model_name='line',
name='show_line',
field=models.BooleanField(
default=False, verbose_name=b'N\xc3\xa4yt\xc3\xa4 l\xc3\xa4hd\xc3\xb6t'),
preserve_default=True,
),
migrations.AlterField(
model_name='stop',
name='description',
field=models.CharField(max_length=50, verbose_name=b'Kuvaus'),
preserve_default=True,
),
migrations.AlterField(
model_name='stop',
name='stop_number',
field=models.CharField(
unique=True, max_length=20, verbose_name=b'Numero'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='line',
unique_together=set([('stop', 'raw')]),
),
]
|
ojarva/home-info-display
|
homedisplay/info_transportation/migrations/0008_auto_20150226_1015.py
|
Python
|
bsd-3-clause
| 2,319
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new line item creative associations (LICAs) for an
existing line item and a set of creative ids.
To determine which LICAs exist, run get_all_licas.py."""
# Import appropriate modules from the client library.
from googleads import dfp
# Set the line item ID and creative IDs to associate.
LINE_ITEM_ID = 'INSERT_LINE_ITEM_ID_HERE'
CREATIVE_IDS = ['INSERT_CREATIVE_IDS_HERE']
def main(client, line_item_id, creative_ids):
# Initialize appropriate service.
lica_service = client.GetService(
'LineItemCreativeAssociationService', version='v201508')
licas = []
for creative_id in creative_ids:
licas.append({'creativeId': creative_id,
'lineItemId': line_item_id})
# Create the LICAs remotely.
licas = lica_service.createLineItemCreativeAssociations(licas)
# Display results.
if licas:
for lica in licas:
print ('LICA with line item id \'%s\', creative id \'%s\', and '
'status \'%s\' was created.' %
(lica['lineItemId'], lica['creativeId'], lica['status']))
else:
print 'No LICAs created.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, LINE_ITEM_ID, CREATIVE_IDS)
|
wubr2000/googleads-python-lib
|
examples/dfp/v201508/line_item_creative_association_service/create_licas.py
|
Python
|
apache-2.0
| 1,879
|
import webbrowser
import tornado.httpserver
import tornado.ioloop
from mitmproxy import addons
from mitmproxy import log
from mitmproxy import master
from mitmproxy import optmanager
from mitmproxy.addons import eventstore
from mitmproxy.addons import intercept
from mitmproxy.addons import readfile
from mitmproxy.addons import termlog
from mitmproxy.addons import view
from mitmproxy.addons import termstatus
from mitmproxy.tools.web import app, webaddons, static_viewer
class WebMaster(master.Master):
def __init__(self, options, with_termlog=True):
super().__init__(options)
self.view = view.View()
self.view.sig_view_add.connect(self._sig_view_add)
self.view.sig_view_remove.connect(self._sig_view_remove)
self.view.sig_view_update.connect(self._sig_view_update)
self.view.sig_view_refresh.connect(self._sig_view_refresh)
self.events = eventstore.EventStore()
self.events.sig_add.connect(self._sig_events_add)
self.events.sig_refresh.connect(self._sig_events_refresh)
self.options.changed.connect(self._sig_options_update)
self.options.changed.connect(self._sig_settings_update)
self.addons.add(*addons.default_addons())
self.addons.add(
webaddons.WebAddon(),
intercept.Intercept(),
readfile.ReadFile(),
static_viewer.StaticViewer(),
self.view,
self.events,
)
if with_termlog:
self.addons.add(termlog.TermLog(), termstatus.TermStatus())
self.app = app.Application(
self, self.options.web_debug
)
def _sig_view_add(self, view, flow):
app.ClientConnection.broadcast(
resource="flows",
cmd="add",
data=app.flow_to_json(flow)
)
def _sig_view_update(self, view, flow):
app.ClientConnection.broadcast(
resource="flows",
cmd="update",
data=app.flow_to_json(flow)
)
def _sig_view_remove(self, view, flow, index):
app.ClientConnection.broadcast(
resource="flows",
cmd="remove",
data=flow.id
)
def _sig_view_refresh(self, view):
app.ClientConnection.broadcast(
resource="flows",
cmd="reset"
)
def _sig_events_add(self, event_store, entry: log.LogEntry):
app.ClientConnection.broadcast(
resource="events",
cmd="add",
data=app.logentry_to_json(entry)
)
def _sig_events_refresh(self, event_store):
app.ClientConnection.broadcast(
resource="events",
cmd="reset"
)
def _sig_options_update(self, options, updated):
options_dict = optmanager.dump_dicts(options, updated)
app.ClientConnection.broadcast(
resource="options",
cmd="update",
data=options_dict
)
def _sig_settings_update(self, options, updated):
app.ClientConnection.broadcast(
resource="settings",
cmd="update",
data={k: getattr(options, k) for k in updated}
)
def run(self): # pragma: no cover
iol = tornado.ioloop.IOLoop.instance()
http_server = tornado.httpserver.HTTPServer(self.app)
http_server.listen(self.options.web_port, self.options.web_iface)
iol.add_callback(self.start)
tornado.ioloop.PeriodicCallback(lambda: self.tick(timeout=0), 5).start()
web_url = "http://{}:{}/".format(self.options.web_iface, self.options.web_port)
self.add_log(
"Web server listening at {}".format(web_url),
"info"
)
if self.options.web_open_browser:
success = open_browser(web_url)
if not success:
self.add_log(
"No web browser found. Please open a browser and point it to {}".format(web_url),
"info"
)
try:
iol.start()
except KeyboardInterrupt:
self.shutdown()
def shutdown(self):
tornado.ioloop.IOLoop.instance().stop()
super().shutdown()
def open_browser(url: str) -> bool:
"""
Open a URL in a browser window.
In contrast to webbrowser.open, we limit the list of suitable browsers.
This gracefully degrades to a no-op on headless servers, where webbrowser.open
would otherwise open lynx.
Returns:
True, if a browser has been opened
False, if no suitable browser has been found.
"""
browsers = (
"windows-default", "macosx",
"google-chrome", "chrome", "chromium", "chromium-browser",
"firefox", "opera", "safari",
)
for browser in browsers:
try:
b = webbrowser.get(browser)
except webbrowser.Error:
pass
else:
b.open(url)
return True
return False
|
MatthewShao/mitmproxy
|
mitmproxy/tools/web/master.py
|
Python
|
mit
| 4,995
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from os import path, getenv
from glob import glob
import codecs
from LatXmlParser import LatXmlParser
from NindIdentifiantsClef import NindIdentifiantsClef
def usage():
if getenv("PY") != None: script = sys.argv[0].replace(getenv("PY"), '$PY')
else: script = sys.argv[0]
print ("""© l'ATEJCON.
Convertit un corpus XML CLEF analysé par LIMA en un fichier texte.
Le nom du fichier d'entrée doit être */xml/<lang>/<fichier xml>
Le nom du fichier de sortie sera */txt/<lang>/<fichier xml>.txt
Le fichier texte constituera le corpus analysé prêt pour l'indexation.
Il y a une ligne par document: noDoc { terme localisation,longueur }
usage : %s <fichiers xml>
exemple : %s "clef/xml/fre/*.xml.mult.xml"
"""%(script, script))
def main():
try:
if len(sys.argv) < 2 : raise Exception()
fichiersXml = path.realpath(sys.argv[1])
convertitXmlClef(fichiersXml)
except Exception as exc:
if len(exc.args) == 0: usage()
else:
print ("******************************")
print (exc.args[0])
print ("******************************")
raise
def convertitXmlClef(fichiersXml):
#trouve le chemin de sortie
repertoire = path.dirname(fichiersXml).replace('/xml/','/txt/')
#init la conversion
langue = repertoire[-3:]
nindIdentifiantsClef = NindIdentifiantsClef(langue)
docs = ids = termes = 0
listeFichiers = glob(fichiersXml)
if len(listeFichiers) == 0: raise Exception('ERREUR : %s fichier inconnu'%(fichiersXml))
noFichier = 0
for inFileName in listeFichiers:
noFichier +=1
#print("%03d/%03d : %s"%(noFichier, len(listeFichiers), inFileName))
#nom fichier de sortie
outFileName = repertoire + '/' + path.basename(inFileName) + '.txt'
outFile = codecs.open(outFileName, 'w', 'utf-8')
dumpParser = DumpParser(inFileName, outFile, nindIdentifiantsClef)
outFile.close()
#affiche resultat
(comptDocs, comptIds, comptTermes) = dumpParser.compteurs
docs += comptDocs
ids += comptIds
termes += comptTermes
#print (' %d documents trouvés'%(comptDocs))
#print (' %d identifiants de documents trouvés'%(comptIds))
#print (' %d occurrences de termes trouvées'%(comptTermes))
print ("TOTAL")
print ('%d documents trouvés'%(docs))
print ('%d identifiants de documents trouvés'%(ids))
print ('%d occurrences de termes trouvées'%(termes))
sys.exit()
class DumpParser():
def __init__(self, inFileName, outFile, nindIdentifiantsClef):
self.outFile = outFile
self.nindIdentifiantsClef = nindIdentifiantsClef
self.compteurs = [0, 0, 0]
#cette classe analyse un fichier XML. Elle utilise une facon a la xpath.
pathsArray = ['/MultimediaDocuments/node/node', '/MultimediaDocuments/node/node/properties/property', 'bowToken', 'bowTerm', 'bowNamedEntity']
#le parser
self.xmlParser = LatXmlParser()
self.xmlParser.setPathsAndCallback(pathsArray, self.nodeCallback, self.textCallback, self.endNodeCallback)
self.xmlParser.startParse(inFileName)
def nodeCallback(self, path, attr):
if path == '/MultimediaDocuments/node/node':
if attr.getValue('elementName') != 'DOC':
raise Exception('/MultimediaDocuments/node/node à la ligne %d'%(self.xmlParser.getLineNumber()))
#nouveau document, on initialise le receptacle a termes
self.termSet = set()
self.compteurs[0] +=1
elif path == '/MultimediaDocuments/node/node/properties/property':
if attr.getValue('name') == 'identPrpty':
self.docId = int(self.nindIdentifiantsClef.tradVersNind(attr.getValue('value')))
self.compteurs[1] +=1
elif path.endswith('bowToken') or path.endswith('bowTerm'):
lemma = attr.getValue('lemma').strip()
position = int(attr.getValue('position'))
length = int(attr.getValue('length'))
if len(lemma) != 0: self.termSet.add((position, length, lemma))
#self.termSet.add((position, length, lemma))
elif path.endswith('bowNamedEntity'):
lemma = attr.getValue('lemma').strip()
position = int(attr.getValue('position'))
length = int(attr.getValue('length'))
typeNe = attr.getValue('type')
#if len(lemma) != 0: self.termSet.add((position, length, typeNe + ':' + lemma))
self.termSet.add((position, length, typeNe + ':' + lemma))
def endNodeCallback(self, path):
if path == '/MultimediaDocuments/node/node':
self.compteurs[2] += len(self.termSet)
termList = list(self.termSet)
termList.sort()
self.outFile.write('%d'%(self.docId))
for (position, length, lemma) in termList:
self.outFile.write(' %s %d,%d'%(lemma, position, length))
self.outFile.write('\n')
def textCallback(self, path, text):
#Sax ne s'interdit pas d'envoyer les textes par morceaux !
#il est donc IMPERATIF de programmer ainsi
#self.text += text
return
if __name__ == '__main__':
main()
|
jys/nind
|
src/py/amose/Nind_convertitXmlClef.py
|
Python
|
lgpl-3.0
| 5,417
|
import errno
import io
import os
import shutil
from django.core.cache import cache as default_cache, caches
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.files import File
from django.core.urlresolvers import reverse
from django.db import models
from django.dispatch import receiver
from PIL import (Image as PILImage,
ImageFile,
JpegImagePlugin)
from betty.conf.app import settings
from betty.cropper.flush import get_cache_flusher
from betty.cropper.tasks import search_image_quality
from jsonfield import JSONField
# Make best effort to load corrupt images
ImageFile.LOAD_TRUNCATED_IMAGES = True
logger = __import__('logging').getLogger(__name__)
ANIMATED_EXTENSIONS = ['gif', 'jpg']
CROP_EXTENSIONS = ["png", "jpg"]
def source_upload_to(instance, filename):
return os.path.join(instance.path(), filename)
def optimized_upload_to(instance, filename):
_path, ext = os.path.splitext(filename)
return os.path.join(instance.path(), "optimized{}".format(ext))
def optimize_image(image_model, image_buffer, filename):
im = PILImage.open(image_buffer)
# Let's cache some important stuff
format = im.format
icc_profile = im.info.get("icc_profile")
quantization = getattr(im, "quantization", None)
subsampling = None
if format == "JPEG":
try:
subsampling = JpegImagePlugin.get_sampling(im)
except IndexError:
# Ignore if sampling fails
logger.debug('JPEG sampling failed, ignoring')
except:
# mparent(2016-03-25): Eventually eliminate "catch all", but need to log errors to see
# if we're missing any other exception types in the wild
logger.exception('JPEG sampling error')
if im.size[0] > settings.BETTY_MAX_WIDTH:
# If the image is really large, we'll save a more reasonable version as the "original"
height = settings.BETTY_MAX_WIDTH * float(im.size[1]) / float(im.size[0])
im = im.resize((settings.BETTY_MAX_WIDTH, int(round(height))), PILImage.ANTIALIAS)
out_buffer = io.BytesIO()
if format == "JPEG" and im.mode == "RGB":
# For JPEG files, we need to make sure that we keep the quantization profile
try:
im.save(
out_buffer,
icc_profile=icc_profile,
qtables=quantization,
subsampling=subsampling,
format="JPEG")
except ValueError as e:
# Maybe the image already had an invalid quant table?
if e.args[:1] == ('Invalid quantization table',):
out_buffer = io.BytesIO() # Make sure it's empty after failed save attempt
im.save(
out_buffer,
icc_profile=icc_profile,
format=format,
)
else:
raise
else:
im.save(out_buffer,
icc_profile=icc_profile,
format=format)
image_model.optimized.save(filename, File(out_buffer))
else:
# No modifications, just save original as optimized
image_buffer.seek(0)
image_model.optimized.save(filename, File(image_buffer))
image_model.save()
class Ratio(object):
def __init__(self, ratio):
self.string = ratio
self.height = 0
self.width = 0
if ratio != "original":
if len(ratio.split("x")) != 2:
raise ValueError("Improper ratio!")
self.width = int(ratio.split("x")[0])
self.height = int(ratio.split("x")[1])
class ImageManager(models.Manager):
def create_from_path(self, path, filename=None, name=None, credit=None):
"""Creates an image object from a TemporaryUploadedFile insance"""
image_buffer = io.BytesIO(open(path, 'rb').read())
im = PILImage.open(image_buffer)
if filename is None:
filename = os.path.split(path)[1]
if name is None:
name = filename
image = self.create(
name=name,
credit=credit,
width=im.size[0],
height=im.size[1]
)
# Copy temp image file to S3
image_buffer.seek(0)
image.source.save(filename, File(image_buffer))
# If the image is a GIF, we need to do some special stuff
if im.format == "GIF":
image.animated = True
image.save()
# Use temp image path (instead of pulling from S3)
image_buffer.seek(0)
optimize_image(image_model=image, image_buffer=image_buffer, filename=filename)
if settings.BETTY_JPEG_QUALITY_RANGE:
search_image_quality.apply_async(args=(image.id,))
return image
def save_crop_to_disk(image_data, path):
try:
os.makedirs(os.path.dirname(path))
except OSError as e:
if e.errno != errno.EEXIST:
raise e
with open(path, 'wb+') as out:
out.write(image_data)
def _read_from_storage(file_field):
"""Convenience wrapper to cache strorage backend and ensure entire file is read and properly
closed.
Currently source images are never deleted, so there is no need for a cache.delete() call
anywhere. To be safe, cache expriation is set via BETTY_CACHE_STORAGE_SEC.
"""
if file_field:
try:
cache = caches['storage']
except InvalidCacheBackendError:
cache = default_cache
cache_key = ':'.join(['storage', file_field.name])
raw_image = cache.get(cache_key)
if not raw_image:
with file_field as f:
raw_image = f.read()
cache.set(cache_key, raw_image, settings.BETTY_CACHE_STORAGE_SEC)
return io.BytesIO(raw_image)
class Image(models.Model):
name = models.CharField(max_length=255)
credit = models.CharField(max_length=120, null=True, blank=True)
source = models.FileField(upload_to=source_upload_to,
max_length=255, null=True, blank=True)
optimized = models.FileField(upload_to=optimized_upload_to,
max_length=255, null=True, blank=True)
height = models.IntegerField(null=True, blank=True)
width = models.IntegerField(null=True, blank=True)
selections = JSONField(null=True, blank=True)
jpeg_quality = models.IntegerField(null=True, blank=True)
jpeg_quality_settings = JSONField(null=True, blank=True)
animated = models.BooleanField(default=False)
# Used for "If-Modified-Since/304" handling
last_modified = models.DateTimeField(auto_now=True)
objects = ImageManager()
class Meta:
permissions = (
("read", "Can search images, and see the detail data"),
("crop", "Can crop images")
)
@property
def id_string(self):
id_string = ""
for index, char in enumerate(str(self.id)):
if index % 4 == 0 and index != 0:
id_string += "/"
id_string += char
return id_string
@property
def best(self):
"""Convenience method to prefer optimzied over source image, if available."""
if self.optimized:
return self.optimized
else:
return self.source
def read_best_bytes(self):
return _read_from_storage(self.best)
def read_source_bytes(self):
return _read_from_storage(self.source)
def read_optimized_bytes(self):
return _read_from_storage(self.optimized)
def get_height(self):
"""Lazily returns the height of the image
If the width exists in the database, that value will be returned,
otherwise the width will be read source image."""
if not self.height:
self._refresh_dimensions()
return self.height
def get_width(self):
"""Lazily returns the width of the image
If the width exists in the database, that value will be returned,
otherwise the width will be read source image."""
if not self.width:
self._refresh_dimensions()
return self.width
def _refresh_dimensions(self):
img = PILImage.open(self.read_source_bytes())
self.height = img.size[1]
self.width = img.size[0]
def get_selection(self, ratio):
"""Returns the image selection for a given ratio
If the selection for this ratio has been set manually, that value
is returned exactly, otherwise the selection is auto-generated."""
# This is kiiiiinda a hack. If we have an optimized image, hack up the height and width.
if self.width > settings.BETTY_MAX_WIDTH and self.optimized:
height = settings.BETTY_MAX_WIDTH * float(self.height) / float(self.width)
self.height = int(round(height))
self.width = settings.BETTY_MAX_WIDTH
selection = None
if self.selections is not None:
if ratio.string in self.selections:
selection = self.selections.get(ratio.string)
# Here I need to check for all kinds of bad data.
if selection['y1'] > self.get_height() or selection['x1'] > self.get_width():
selection = None
elif selection['y1'] < selection['y0'] or selection['x1'] < selection['x0']:
selection = None
else:
for key in ('x0', 'x1', 'y0', 'y1'):
if selection[key] < 0:
selection = None
break
if selection is None:
source_aspect = self.get_width() / float(self.get_height())
selection_aspect = ratio.width / float(ratio.height)
min_x = 0
min_y = 0
max_x = self.get_width()
max_y = self.get_height()
if source_aspect > selection_aspect:
offset = (max_x - (max_y * ratio.width / ratio.height)) / 2.0
min_x = offset
max_x -= offset
if source_aspect < selection_aspect:
offset = (max_y - (max_x * ratio.height / ratio.width)) / 2.0
min_y = offset
max_y -= offset
selection = {
'x0': int(min_x),
'y0': int(min_y),
'x1': int(max_x),
'y1': int(max_y)
}
if selection['y1'] > self.get_height():
selection['y1'] = int(self.get_height())
if selection['x1'] > self.get_width():
selection['x1'] = int(self.get_width())
if selection['x0'] < 0:
selection['x0'] = 0
if selection['y0'] < 0:
selection['y0'] = 0
return selection
def clear_crops(self, ratios=None):
if ratios is None:
ratios = list(settings.BETTY_RATIOS)
ratios.append("original")
# Optional cache flush support
flusher = get_cache_flusher()
if flusher:
paths = []
for ratio_slug in ratios:
# Since might now know which formats to flush (since maybe not saving crops to
# disk), need to flush all possible crops.
paths += [self.get_absolute_url(ratio=ratio_slug, width=width, extension=extension)
for extension in CROP_EXTENSIONS
for width in sorted(set(settings.BETTY_WIDTHS +
settings.BETTY_CLIENT_ONLY_WIDTHS))]
if self.animated:
for extension in ANIMATED_EXTENSIONS:
paths.append(self.get_animated_url(extension=extension))
flusher(paths)
# Optional disk crops support
if settings.BETTY_SAVE_CROPS_TO_DISK:
for ratio_slug in (ratios + ['animated']):
ratio_path = os.path.join(self.path(settings.BETTY_SAVE_CROPS_TO_DISK_ROOT),
ratio_slug)
if os.path.exists(ratio_path):
shutil.rmtree(ratio_path)
def get_jpeg_quality(self, width):
quality = None
if self.jpeg_quality_settings:
closest = 0
for w, q in self.jpeg_quality_settings.items():
if abs(width - int(w)) < abs(width - closest):
closest = int(w)
quality = self.jpeg_quality_settings[w]
return quality
def path(self, root=None):
id_string = ""
for index, char in enumerate(str(self.id)):
if index % 4 == 0:
id_string += "/"
id_string += char
if root is None:
root = settings.BETTY_IMAGE_ROOT
return os.path.join(root, id_string[1:])
def get_source(self):
image_bytes = self.read_source_bytes()
# Detect format
img = PILImage.open(image_bytes)
return image_bytes.getvalue(), img.format.lower()
def get_animated(self, extension):
"""Legacy (Pre-v2.0) animated behavior.
Originally betty just wrote these to disk on image creation and let NGINX try-files
automatically serve these animated GIF + JPG.
"""
assert self.animated
img_bytes = self.read_best_bytes()
if extension == "jpg":
# Thumbnail
img = PILImage.open(img_bytes)
if img.mode != "RGB":
img = img.convert("RGB")
img_bytes = io.BytesIO()
img.save(img_bytes, "JPEG")
elif extension != "gif":
raise Exception('Unsupported extension')
if settings.BETTY_SAVE_CROPS_TO_DISK:
save_crop_to_disk(img_bytes.getvalue(),
os.path.join(self.path(settings.BETTY_SAVE_CROPS_TO_DISK_ROOT),
'animated',
'original.{}'.format(extension)))
return img_bytes.getvalue()
def crop(self, ratio, width, extension):
img = PILImage.open(self.read_best_bytes())
icc_profile = img.info.get("icc_profile")
if ratio.string == 'original':
ratio.width = img.size[0]
ratio.height = img.size[1]
selection = self.get_selection(ratio)
try:
img = img.crop((selection['x0'], selection['y0'], selection['x1'], selection['y1']))
except ValueError:
# Looks like we have bad height and width data. Let's reload that and try again.
self.width = img.size[0]
self.height = img.size[1]
self.save()
selection = self.get_selection(ratio)
img = img.crop((selection['x0'], selection['y0'], selection['x1'], selection['y1']))
height = int(round(width * float(ratio.height) / float(ratio.width)))
img = img.resize((width, height), PILImage.ANTIALIAS)
if extension == "jpg":
if img.mode != "RGB":
img = img.convert("RGB")
pillow_kwargs = {"format": "jpeg"}
if self.get_jpeg_quality(width):
pillow_kwargs["quality"] = self.get_jpeg_quality(width)
elif img.format == "JPEG":
pillow_kwargs["quality"] = "keep"
else:
pillow_kwargs["quality"] = settings.BETTY_DEFAULT_JPEG_QUALITY
if extension == "png":
# Fix "cannot write mode CMYK as PNG" errors
# https://github.com/python-pillow/Pillow/issues/1380
if img.mode == 'CMYK':
img = img.convert('RGB')
pillow_kwargs = {"format": "png"}
if icc_profile:
pillow_kwargs["icc_profile"] = icc_profile
tmp = io.BytesIO()
img.save(tmp, **pillow_kwargs)
if settings.BETTY_SAVE_CROPS_TO_DISK:
# We only want to save this to the filesystem if it's one of our usual widths.
if width in settings.BETTY_WIDTHS or not settings.BETTY_WIDTHS:
ratio_dir = os.path.join(self.path(settings.BETTY_SAVE_CROPS_TO_DISK_ROOT),
ratio.string)
save_crop_to_disk(tmp.getvalue(),
os.path.join(ratio_dir, "%d.%s" % (width, extension)))
return tmp.getvalue()
def get_absolute_url(self, ratio="original", width=600, extension="jpg"):
return reverse("betty.cropper.views.crop", kwargs={
"id": self.id_string,
"ratio_slug": ratio,
"width": width,
"extension": extension
})
def get_animated_url(self, extension="gif"):
return reverse("betty.cropper.views.animated", kwargs={
"id": self.id_string,
"extension": extension
})
def to_native(self):
"""Returns a Python dictionary, sutiable for Serialization"""
# This is kiiiiinda a hack. If we have an optimized image, hack up the height and width.
if self.width > settings.BETTY_MAX_WIDTH and self.optimized:
height = settings.BETTY_MAX_WIDTH * float(self.height) / float(self.width)
self.height = int(round(height))
self.width = settings.BETTY_MAX_WIDTH
data = {
'id': self.id,
'name': self.name,
'width': self.get_width(),
'height': self.get_height(),
'credit': self.credit,
'selections': {}
}
for ratio in settings.BETTY_RATIOS:
data['selections'][ratio] = self.get_selection(Ratio(ratio))
data['selections'][ratio]["source"] = "auto"
if self.selections and data['selections'][ratio] == self.selections.get(ratio):
data['selections'][ratio]["source"] = "user"
return data
def cache_key(self):
"""
Returns string unique to cache instance
"""
return "image-{}".format(self.id)
@receiver(models.signals.post_delete, sender=Image)
def auto_flush_and_delete_files_on_delete(sender, instance, **kwargs):
instance.clear_crops()
for file_field in [instance.source, instance.optimized]:
if file_field:
file_field.delete(save=False)
|
theonion/betty-cropper
|
betty/cropper/models.py
|
Python
|
mit
| 18,486
|
#!/usr/bin/env python
import sys
from collections import defaultdict
VICTORY = [17, 61]
bots = defaultdict(list)
instr = [x.strip() for x in sys.stdin]
while instr:
command = instr.pop(0)
ops = command.split()
if ops[0] == 'value':
val, bot = int(ops[1]), ops[-1]
bots[bot].append(val)
bots[bot] = sorted(bots[bot])
if bots[bot] == VICTORY:
break
continue
if ops[0] != 'bot':
raise Exception('bad command')
# resolution
bot = ops[1]
if len(bots[bot]) != 2:
instr.append(command)
# print 'Deferring', command
continue
# low
if ops[5] == 'bot':
target = ops[6]
bots[target].append(bots[bot][0])
bots[target] = sorted(bots[target])
if bots[bot] == VICTORY:
break
# high
if ops[-2] == 'bot':
target = ops[-1]
bots[target].append(bots[bot][1])
bots[target] = sorted(bots[target])
if bots[bot] == VICTORY:
break
# print ops
for i, bot in bots.iteritems():
if bot == VICTORY:
print i, bot
|
bildzeitung/2016adventofcode
|
10/1.py
|
Python
|
gpl-3.0
| 1,124
|
from __future__ import absolute_import, print_function
import random
import socket
import string
import sys
import time
import unittest2 as unittest
import warnings
import weakref
from nose import SkipTest
from kombu import Connection
from kombu import Exchange, Queue
from kombu.five import range
if sys.version_info >= (2, 5):
from hashlib import sha256 as _digest
else:
from sha import new as _digest # noqa
def _nobuf(x):
return [str(i) if isinstance(i, buffer) else i for i in x]
def consumeN(conn, consumer, n=1, timeout=30):
messages = []
def callback(message_data, message):
messages.append(message_data)
message.ack()
prev, consumer.callbacks = consumer.callbacks, [callback]
consumer.consume()
seconds = 0
while True:
try:
conn.drain_events(timeout=1)
except socket.timeout:
seconds += 1
msg = 'Received %s/%s messages. %s seconds passed.' % (
len(messages), n, seconds)
if seconds >= timeout:
raise socket.timeout(msg)
if seconds > 1:
print(msg)
if len(messages) >= n:
break
consumer.cancel()
consumer.callback = prev
return messages
class TransportCase(unittest.TestCase):
transport = None
prefix = None
sep = '.'
userid = None
password = None
event_loop_max = 100
connection_options = {}
suppress_disorder_warning = False
reliable_purge = True
connected = False
skip_test_reason = None
message_size_limit = None
def before_connect(self):
pass
def after_connect(self, connection):
pass
def setUp(self):
if self.transport:
try:
self.before_connect()
except SkipTest as exc:
self.skip_test_reason = str(exc)
else:
self.do_connect()
self.exchange = Exchange(self.prefix, 'direct')
self.queue = Queue(self.prefix, self.exchange, self.prefix)
def purge(self, names):
chan = self.connection.channel()
total = 0
for queue in names:
while 1:
# ensure the queue is completly empty
purged = chan.queue_purge(queue=queue)
if not purged:
break
total += purged
chan.close()
return total
def get_connection(self, **options):
if self.userid:
options.setdefault('userid', self.userid)
if self.password:
options.setdefault('password', self.password)
return Connection(transport=self.transport, **options)
def do_connect(self):
self.connection = self.get_connection(**self.connection_options)
try:
self.connection.connect()
self.after_connect(self.connection)
except self.connection.connection_errors:
self.skip_test_reason = '{0} transport cannot connect'.format(
self.transport,
)
else:
self.connected = True
def verify_alive(self):
if self.transport:
if not self.connected:
raise SkipTest(self.skip_test_reason)
return True
def purge_consumer(self, consumer):
return self.purge([queue.name for queue in consumer.queues])
def test_produce__consume(self):
if not self.verify_alive():
return
chan1 = self.connection.channel()
consumer = chan1.Consumer(self.queue)
self.purge_consumer(consumer)
producer = chan1.Producer(self.exchange)
producer.publish({'foo': 'bar'}, routing_key=self.prefix)
message = consumeN(self.connection, consumer)
self.assertDictEqual(message[0], {'foo': 'bar'})
chan1.close()
self.purge([self.queue.name])
def test_purge(self):
if not self.verify_alive():
return
chan1 = self.connection.channel()
consumer = chan1.Consumer(self.queue)
self.purge_consumer(consumer)
producer = chan1.Producer(self.exchange)
for i in range(10):
producer.publish({'foo': 'bar'}, routing_key=self.prefix)
if self.reliable_purge:
self.assertEqual(consumer.purge(), 10)
self.assertEqual(consumer.purge(), 0)
else:
purged = 0
while purged < 9:
purged += self.purge_consumer(consumer)
def _digest(self, data):
return _digest(data).hexdigest()
def test_produce__consume_large_messages(
self, bytes=1048576, n=10,
charset=string.punctuation + string.letters + string.digits):
if not self.verify_alive():
return
bytes = min(x for x in [bytes, self.message_size_limit] if x)
messages = [''.join(random.choice(charset)
for j in range(bytes)) + '--%s' % n
for i in range(n)]
digests = []
chan1 = self.connection.channel()
consumer = chan1.Consumer(self.queue)
self.purge_consumer(consumer)
producer = chan1.Producer(self.exchange)
for i, message in enumerate(messages):
producer.publish({'text': message,
'i': i}, routing_key=self.prefix)
digests.append(self._digest(message))
received = [(msg['i'], msg['text'])
for msg in consumeN(self.connection, consumer, n)]
self.assertEqual(len(received), n)
ordering = [i for i, _ in received]
if ordering != list(range(n)) and not self.suppress_disorder_warning:
warnings.warn(
'%s did not deliver messages in FIFO order: %r' % (
self.transport, ordering))
for i, text in received:
if text != messages[i]:
raise AssertionError('%i: %r is not %r' % (
i, text[-100:], messages[i][-100:]))
self.assertEqual(self._digest(text), digests[i])
chan1.close()
self.purge([self.queue.name])
def P(self, rest):
return '%s%s%s' % (self.prefix, self.sep, rest)
def test_produce__consume_multiple(self):
if not self.verify_alive():
return
chan1 = self.connection.channel()
producer = chan1.Producer(self.exchange)
b1 = Queue(self.P('b1'), self.exchange, 'b1')(chan1)
b2 = Queue(self.P('b2'), self.exchange, 'b2')(chan1)
b3 = Queue(self.P('b3'), self.exchange, 'b3')(chan1)
[q.declare() for q in (b1, b2, b3)]
self.purge([b1.name, b2.name, b3.name])
producer.publish('b1', routing_key='b1')
producer.publish('b2', routing_key='b2')
producer.publish('b3', routing_key='b3')
chan1.close()
chan2 = self.connection.channel()
consumer = chan2.Consumer([b1, b2, b3])
messages = consumeN(self.connection, consumer, 3)
self.assertItemsEqual(_nobuf(messages), ['b1', 'b2', 'b3'])
chan2.close()
self.purge([self.P('b1'), self.P('b2'), self.P('b3')])
def test_timeout(self):
if not self.verify_alive():
return
chan = self.connection.channel()
self.purge([self.queue.name])
consumer = chan.Consumer(self.queue)
self.assertRaises(
socket.timeout, self.connection.drain_events, timeout=0.3,
)
consumer.cancel()
chan.close()
def test_basic_get(self):
if not self.verify_alive():
return
chan1 = self.connection.channel()
producer = chan1.Producer(self.exchange)
chan2 = self.connection.channel()
queue = Queue(self.P('basic_get'), self.exchange, 'basic_get')
queue = queue(chan2)
queue.declare()
producer.publish({'basic.get': 'this'}, routing_key='basic_get')
chan1.close()
for i in range(self.event_loop_max):
m = queue.get()
if m:
break
time.sleep(0.1)
self.assertEqual(m.payload, {'basic.get': 'this'})
self.purge([queue.name])
chan2.close()
def test_cyclic_reference_transport(self):
if not self.verify_alive():
return
def _createref():
conn = self.get_connection()
conn.transport
conn.close()
return weakref.ref(conn)
self.assertIsNone(_createref()())
def test_cyclic_reference_connection(self):
if not self.verify_alive():
return
def _createref():
conn = self.get_connection()
conn.connect()
conn.close()
return weakref.ref(conn)
self.assertIsNone(_createref()())
def test_cyclic_reference_channel(self):
if not self.verify_alive():
return
def _createref():
conn = self.get_connection()
conn.connect()
chanrefs = []
try:
for i in range(100):
channel = conn.channel()
chanrefs.append(weakref.ref(channel))
channel.close()
finally:
conn.close()
return chanrefs
for chanref in _createref():
self.assertIsNone(chanref())
def tearDown(self):
if self.transport and self.connected:
self.connection.close()
|
mverrilli/kombu
|
funtests/transport.py
|
Python
|
bsd-3-clause
| 9,529
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import contextlib
import mock
from stoqlib.api import api
from stoqlib.domain.payment.payment import Payment
from stoqlib.domain.purchase import PurchaseOrder
from stoqlib.lib.dateutils import localdate
from stoqlib.gui.dialogs.paymentcommentsdialog import PaymentCommentsDialog
from stoqlib.gui.dialogs.paymentchangedialog import PaymentDueDateChangeDialog
from stoqlib.gui.editors.paymenteditor import OutPaymentEditor
from stoqlib.gui.editors.paymentseditor import PurchasePaymentsEditor
from stoqlib.gui.search.paymentsearch import OutPaymentBillCheckSearch
from stoqlib.gui.slaves.paymentconfirmslave import PurchasePaymentConfirmSlave
from stoqlib.reporting.paymentsreceipt import OutPaymentReceipt
from stoq.gui.payable import PayableApp
from stoq.gui.test.baseguitest import BaseGUITest
class TestPayable(BaseGUITest):
def _check_run_dialog(self, app, action, dialog, other_args):
with contextlib.nested(
mock.patch('stoq.gui.payable.run_dialog'),
mock.patch('stoq.gui.payable.api.new_store'),
mock.patch.object(self.store, 'commit'),
mock.patch.object(self.store, 'close')) as ctx:
new_store = ctx[1]
new_store.return_value = self.store
self.activate(action)
run_dialog = ctx[0]
self.assertEqual(run_dialog.call_count, 1)
args, kwargs = run_dialog.call_args
self.assertEquals(args[0], dialog)
self.assertEquals(args[1], app)
self.assertEquals(args[2], self.store)
if not other_args or len(other_args) != len(args[2:]):
return
for arg in args[2:]:
for other_arg in other_args:
self.assertEquals(arg, other_arg)
def setUp(self):
BaseGUITest.setUp(self)
def create_purchase_payment(self):
branch = api.get_current_branch(self.store)
order = self.create_purchase_order(branch=branch)
order.identifier = 12345
order.status = PurchaseOrder.ORDER_PENDING
order.add_item(self.create_sellable(), 1)
payment = self.add_payments(order, method_type=u'money')[0]
payment.open_date = payment.due_date = localdate(2012, 1, 1)
order.confirm()
payment.identifier = 67890
order.close()
return order, payment
def test_initial(self):
app = self.create_app(PayableApp, u'payable')
self.check_app(app, u'payable')
def test_select(self):
purchase, payment = self.create_purchase_payment()
app = self.create_app(PayableApp, u'payable')
olist = app.results
olist.select(olist[1])
self.check_app(app, u'payable-selected')
@mock.patch('stoq.gui.payable.run_dialog')
def test_pay(self, run_dialog):
purchase, payment = self.create_purchase_payment()
app = self.create_app(PayableApp, u'payable')
olist = app.results
olist.select(olist[1])
with mock.patch('stoq.gui.payable.api', new=self.fake.api):
self.activate(app.Pay)
run_dialog.assert_called_once_with(
PurchasePaymentConfirmSlave, app,
self.store.readonly, payments=[payment])
@mock.patch('stoq.gui.payable.run_dialog')
def test_edit(self, run_dialog):
purchase, payment = self.create_purchase_payment()
app = self.create_app(PayableApp, u'payable')
olist = app.results
olist.select(olist[1])
with mock.patch('stoq.gui.payable.api', new=self.fake.api):
self.activate(app.Edit)
run_dialog.assert_called_once_with(
PurchasePaymentsEditor, app,
self.store.readonly, purchase)
@mock.patch('stoq.gui.accounts.run_dialog')
def test_change_due_date_dialog(self, run_dialog):
purchase, payment = self.create_purchase_payment()
app = self.create_app(PayableApp, u'payable')
olist = app.results
olist.select(olist[1])
with mock.patch('stoq.gui.accounts.api', new=self.fake.api):
self.activate(app.ChangeDueDate)
run_dialog.assert_called_once_with(
PaymentDueDateChangeDialog, app,
self.store.readonly, payment, purchase)
@mock.patch('stoq.gui.accounts.run_dialog')
def test_details(self, run_dialog):
purchase, payment = self.create_purchase_payment()
app = self.create_app(PayableApp, u'payable')
olist = app.results
olist.select(olist[1])
with mock.patch('stoq.gui.accounts.api', new=self.fake.api):
self.activate(app.Details)
run_dialog.assert_called_once_with(
OutPaymentEditor, app,
self.store.readonly, payment)
@mock.patch('stoq.gui.accounts.run_dialog')
def test_comments(self, run_dialog):
purchase, payment = self.create_purchase_payment()
app = self.create_app(PayableApp, u'payable')
olist = app.results
olist.select(olist[1])
with mock.patch('stoq.gui.accounts.api', new=self.fake.api):
self.activate(app.Comments)
run_dialog.assert_called_once_with(
PaymentCommentsDialog, app,
self.store.readonly, payment)
def test_can_edit(self):
purchase, payment = self.create_purchase_payment()
purchase.status = PurchaseOrder.ORDER_CANCELLED
app = self.create_app(PayableApp, u'payable')
olist = app.results
self.assertFalse(app._can_edit([olist[-1]]))
def test_can_pay(self):
sale, payment1 = self.create_purchase_payment()
payment2 = self.add_payments(sale, method_type=u'bill')[0]
payment2.identifier = 67891
app = self.create_app(PayableApp, u'payable')
olist = app.results
payments = list(olist)[-2:]
for payment in payments:
payment.status = Payment.STATUS_PENDING
self.assertTrue(app._can_pay(payments))
@mock.patch('stoq.gui.payable.print_report')
@mock.patch('stoq.gui.payable.localtoday')
def test_print_receipt(self, localtoday_, print_report):
today_ = localdate(2012, 1, 1)
localtoday_.return_value = today_
purchase, payment = self.create_purchase_payment()
payment.pay()
app = self.create_app(PayableApp, u'payable')
olist = app.results
olist.select(olist[-1])
self.activate(app.PrintReceipt)
print_report.assert_called_once_with(OutPaymentReceipt, payment=payment,
order=purchase, date=today_.date())
@mock.patch('stoq.gui.payable.PayableApp.change_status')
def test_cancel_payment(self, change_status):
payment = self.create_payment()
payment.status = Payment.STATUS_PENDING
payment.payment_type = Payment.TYPE_OUT
app = self.create_app(PayableApp, u'payable')
olist = app.results
olist.select(olist[-1])
self.activate(app.CancelPayment)
change_status.assert_called_once_with(olist[-1], None,
Payment.STATUS_CANCELLED)
@mock.patch('stoq.gui.payable.PayableApp.change_status')
def test_set_not_paid(self, change_status):
purchase, payment = self.create_purchase_payment()
payment.pay()
app = self.create_app(PayableApp, u'payable')
olist = app.results
olist.select(olist[-1])
self.activate(app.SetNotPaid)
change_status.assert_called_once_with(olist[-1], purchase,
Payment.STATUS_PENDING)
@mock.patch('stoq.gui.payable.PayableApp.change_due_date')
def test_change_due_date(self, change_due_date):
purchase, payment = self.create_purchase_payment()
app = self.create_app(PayableApp, u'payable')
olist = app.results
olist.select(olist[-1])
self.activate(app.ChangeDueDate)
change_due_date.assert_called_once_with(olist[-1], purchase)
def test_run_search(self):
app = self.create_app(PayableApp, u'payable')
self._check_run_dialog(app, app.BillCheckSearch,
OutPaymentBillCheckSearch, [])
|
andrebellafronte/stoq
|
stoq/gui/test/test_payable.py
|
Python
|
gpl-2.0
| 9,151
|
class DiffusiveConductanceTest:
def test_bulk_diffusion(self):
pass
|
amdouglas/OpenPNM
|
test/unit/Physics/models/DiffusiveConductanceTest.py
|
Python
|
mit
| 80
|
from importlib import _bootstrap
from . import util
import collections
import imp
import sys
import unittest
class PathHookTests(unittest.TestCase):
"""Test the path hook for extension modules."""
# XXX Should it only succeed for pre-existing directories?
# XXX Should it only work for directories containing an extension module?
def hook(self, entry):
return _bootstrap._ExtensionFileFinder(entry)
def test_success(self):
# Path hook should handle a directory where a known extension module
# exists.
self.assert_(hasattr(self.hook(util.PATH), 'find_module'))
def test_main():
from test.support import run_unittest
run_unittest(PathHookTests)
if __name__ == '__main__':
test_main()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.1/Lib/importlib/test/extension/test_path_hook.py
|
Python
|
mit
| 757
|
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from django.conf import settings
from django.forms import SelectMultiple
import django_filters
from pdc.apps.common.filters import MultiValueFilter, NullableCharFilter
from . import models
class RPMFilter(django_filters.FilterSet):
name = MultiValueFilter()
version = MultiValueFilter()
epoch = MultiValueFilter()
release = MultiValueFilter()
arch = MultiValueFilter()
srpm_name = MultiValueFilter()
srpm_nevra = NullableCharFilter()
filename = MultiValueFilter()
compose = MultiValueFilter(name='composerpm__variant_arch__variant__compose__compose_id',
distinct=True)
linked_release = MultiValueFilter(name='linked_releases__release_id', distinct=True)
class Meta:
model = models.RPM
fields = ('name', 'version', 'epoch', 'release', 'arch', 'srpm_name',
'srpm_nevra', 'compose', 'filename', 'linked_release')
class ImageFilter(django_filters.FilterSet):
file_name = MultiValueFilter()
image_format = MultiValueFilter(name='image_format__name')
image_type = MultiValueFilter(name='image_type__name')
disc_number = MultiValueFilter()
disc_count = MultiValueFilter()
arch = MultiValueFilter()
mtime = MultiValueFilter()
size = MultiValueFilter()
implant_md5 = MultiValueFilter()
volume_id = MultiValueFilter()
md5 = MultiValueFilter()
sha1 = MultiValueFilter()
sha256 = MultiValueFilter()
compose = MultiValueFilter(name='composeimage__variant_arch__variant__compose__compose_id',
distinct=True)
class Meta:
model = models.Image
fields = ('file_name', 'image_format', 'image_type', 'disc_number',
'disc_count', 'arch', 'mtime', 'size', 'bootable',
'implant_md5', 'volume_id', 'md5', 'sha1', 'sha256')
class BuildImageFilter(django_filters.FilterSet):
if settings.WITH_BINDINGS:
component_name = django_filters.MethodFilter(action='filter_by_component_name',
widget=SelectMultiple)
else:
component_name = MultiValueFilter(name='rpms__srpm_name', distinct=True)
rpm_version = MultiValueFilter(name='rpms__version', distinct=True)
rpm_release = MultiValueFilter(name='rpms__release', distinct=True)
image_id = MultiValueFilter()
image_format = MultiValueFilter(name='image_format__name')
md5 = MultiValueFilter()
archive_build_nvr = MultiValueFilter(name='archives__build_nvr', distinct=True)
archive_name = MultiValueFilter(name='archives__name', distinct=True)
archive_size = MultiValueFilter(name='archives__size', distinct=True)
archive_md5 = MultiValueFilter(name='archives__md5', distinct=True)
release_id = MultiValueFilter(name='releases__release_id', distinct=True)
def filter_by_component_name(self, queryset, value):
from pdc.apps.bindings import models as binding_models
srpm_names = binding_models.ReleaseComponentSRPMNameMapping.objects.filter(
release_component__name__in=value).distinct().values_list('srpm_name')
if value:
if srpm_names:
return queryset.filter(rpms__srpm_name__in=srpm_names).distinct()
else:
return queryset.filter(rpms__srpm_name__in=value).distinct()
else:
return queryset
class Meta:
model = models.BuildImage
fields = ('component_name', 'rpm_version', 'rpm_release', 'image_id', 'image_format', 'md5',
'archive_build_nvr', 'archive_name', 'archive_size', 'archive_md5', 'release_id')
|
maxamillion/product-definition-center
|
pdc/apps/package/filters.py
|
Python
|
mit
| 4,125
|
import pytest
from tests.setup_transaction_tests import chain as s, tester as t, ethereum_utils as u, check_gas, \
get_contract_with_gas_estimation, get_contract
def test_basic_repeater():
basic_repeater = """
def repeat(z: num) -> num:
x = 0
for i in range(6):
x = x + z
return(x)
"""
c = get_contract_with_gas_estimation(basic_repeater)
assert c.repeat(9) == 54
print('Passed basic repeater test')
def test_digit_reverser():
digit_reverser = """
def reverse_digits(x: num) -> num:
dig: num[6]
z = x
for i in range(6):
dig[i] = z % 10
z = z / 10
o = 0
for i in range(6):
o = o * 10 + dig[i]
return o
"""
c = get_contract_with_gas_estimation(digit_reverser)
assert c.reverse_digits(123456) == 654321
print('Passed digit reverser test')
def test_more_complex_repeater():
more_complex_repeater = """
def repeat() -> num:
out = 0
for i in range(6):
out = out * 10
for j in range(4):
out = out + j
return(out)
"""
c = get_contract_with_gas_estimation(more_complex_repeater)
assert c.repeat() == 666666
print('Passed complex repeater test')
def test_offset_repeater():
offset_repeater = """
def sum() -> num:
out = 0
for i in range(80, 121):
out = out + i
return(out)
"""
c = get_contract_with_gas_estimation(offset_repeater)
assert c.sum() == 4100
print('Passed repeater with offset test')
def test_offset_repeater_2():
offset_repeater_2 = """
def sum(frm: num, to: num) -> num:
out = 0
for i in range(frm, frm + 101):
if i == to:
break
out = out + i
return(out)
"""
c = get_contract_with_gas_estimation(offset_repeater_2)
assert c.sum(100, 99999) == 15150
assert c.sum(70, 131) == 6100
print('Passed more complex repeater with offset test')
|
NedYork/viper
|
tests/parser/features/iteration/test_repeater.py
|
Python
|
mit
| 1,931
|
# http://blog.csdn.net/alex_chen_16/article/details/50900416
|
sniperyen/MyDjango
|
ace/__init__.py
|
Python
|
apache-2.0
| 60
|
# -*- coding: ascii -*-
r"""
:Copyright:
Copyright 2013 - 2015
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================
Template Tools
================
Template Tools.
"""
if __doc__:
# pylint: disable = redefined-builtin
__doc__ = __doc__.encode('ascii').decode('unicode_escape')
__author__ = r"Andr\xe9 Malo".encode('ascii').decode('unicode_escape')
__docformat__ = "restructuredtext en"
import pprint as _pprint
from .._exceptions import ( # noqa # pylint: disable = unused-import
DependencyError, DependencyCycle
)
from .. import _graph
class TemplateUndecided(DependencyError):
"""
Template could not be determined, because the decision is ambiguous
The exception argument is a dict mapping undecidable overlay names to
the respective set of template names
"""
class TemplateListProxy(object):
"""
Proxy `TemplateList` instances for lazy creation
:IVariables:
`_list` : ``callable``
List creator
`__list` : ``list``
List cache (if possible)
"""
def __init__(self, creator, autoreload):
"""
Initialization
:Parameters:
`creator` : ``callable``
List creator function
`autoreload` : ``bool``
Autoreload possible?
"""
if autoreload:
create = creator
else:
self.__list = None
def create():
""" self-destroying creator """
if self.__list is None:
self.__list = creator()
self._list = lambda: self.__list
return self.__list
self._list = create
def __getattr__(self, name):
return getattr(self._list(), name)
def __len__(self):
return len(self._list())
def __repr__(self):
return repr(self._list())
def __str__(self):
return str(self._list())
def __add__(self, other):
return self._list() + other
def __mul__(self, other):
return self._list() * other
def __getitem__(self, index):
return self._list()[index]
def __contains__(self, item):
return item in self._list()
def __hash__(self):
return hash(self._list())
def __cmp__(self, other):
return cmp(self._list(), other)
def __iter__(self):
return iter(self._list())
class TemplateList(list):
"""
Container for template names
This class contains the resulting template list, generated by the
layout code.
:IVariables:
`missing` : ``list`` or ``None``
Missing overlays
"""
missing = None
def __init__(*args, **kwargs): # pylint: disable = no-method-argument
"""
Initialization
:Keywords:
`MISSING` : ``iterable``
Missing overlay list
"""
self, args = args[0], args[1:]
missing = kwargs.pop('MISSING', None)
if kwargs:
raise TypeError("Unrecognized keywords")
super(TemplateList, self).__init__(*args)
self.missing = list(missing or ()) or None
def __repr__(self):
"""
Debug representation
:Return: The debug string
:Rtype: ``str``
"""
return "%s(%s%s,%sMISSING=%s%s)" % (
self.__class__.__name__,
self and '\n' or '',
_pprint.pformat(list(self)),
self and '\n\n ' or ' ',
self.missing and '\n' or ' ',
_pprint.pformat(self.missing)
)
@classmethod
def discover(cls, loader, names, use=None, ignore=None):
"""
Disover templates and create a new template list
:Parameters:
`loader` : `Loader`
Template loader
`names` : ``iterable``
Base names. These templates are always added first, in order and
define the initial list of overlays to discover.
`use` : ``dict``
Extra target mapping (overlay name -> template name). This is
used, before the global overlay mapping is asked. Pass ambiguous
overlay decisions here, or disable certain overlays by passing
``None`` as name.
`ignore` : ``iterable``
List of template names to ignore completely.
:Return: Template list
:Rtype: `TemplateList`
:Exceptions:
- `TemplateUndecided` : Ambiguous template decisions
- `DependencyCycle` : A dependency cycle occured
"""
result, missing, undecided = discover(
loader, names, use=use, ignore=ignore
)
if undecided:
raise TemplateUndecided(undecided)
return cls(result, MISSING=missing)
class Layout(object):
"""
Create template lists based on a start configuration
:IVariables:
`_base` : ``tuple``
Base template list
`_use` : ``dict``
extra overlay -> filename mapping
`_ignore` : ``frozenset``
Template names to ignore
"""
def __init__(self, loader, *base, **kwargs):
"""
Initialization
:Parameters:
`loader` : `Loader`
Template loader
`base` : ``tuple``
Base template list
`kwargs` : ``dict``
Keywords
:Keywords:
`use` : ``dict``
extra overlay -> filename mapping
`ignore` : ``iterable``
template names to ignore
`cls` : ``callable``
template list factory. If omitted or ``None``, `TemplateList` is
used.
`lazy` : ``bool``
Lazy loading?
"""
use = kwargs.pop('use', None)
ignore = kwargs.pop('ignore', None)
cls = kwargs.pop('cls', None)
lazy = kwargs.pop('lazy', None)
if kwargs:
raise TypeError("Unrecognized keywords")
self._base = base
self._use = dict(use or ())
self._ignore = frozenset(ignore or ())
self._loader = loader
self._cls = cls is None and TemplateList or cls
self._lazy = lazy is None and True or bool(lazy)
def extend(self, *base, **kwargs):
"""
Extend the layout and create a new one.
:Parameters:
`base` : ``tuple``
Base template list
`kwargs` : ``dict``
Keywords
:Keywords:
`use` : ``dict``
extra overlay -> filename mapping
`ignore` : ``iterable``
template names to ignore
`consider` : ``iterable``
Template names to "unignore"
:Return: New layout
:Rtype: `Layout`
"""
use = kwargs.pop('use', None)
ignore = kwargs.pop('ignore', None)
consider = kwargs.pop('consider', None)
if kwargs:
raise TypeError("Unrecognized keywords")
newbase = tuple(self._base) + base
newuse = self._use
if use:
newuse = dict(newuse)
newuse.update(use)
newignore = self._ignore
if ignore or consider:
newignore = set(newignore)
if ignore:
newignore.update(set(ignore))
if consider:
newignore -= set(consider)
return self.__class__(self._loader, *newbase, **dict(
use=newuse, ignore=newignore, cls=self._cls, lazy=self._lazy
))
def __call__(self, *names, **kwargs):
"""
Create a template list from this layout
:Parameters:
`names` : ``tuple``
Base template list
`kwargs` : ``dict``
Keywords
:Keywords:
`use` : ``dict``
extra overlay -> filename mapping
`ignore` : ``iterable``
template names to ignore
`consider` : ``iterable``
Template names to "unignore"
:Return: template list
:Rtype: `TemplateList`
"""
use_ = kwargs.pop('use', None)
ignore_ = kwargs.pop('ignore', None)
consider = kwargs.pop('consider', None)
if kwargs:
raise TypeError("Unrecognized keywords")
base = tuple(self._base) + names
use = dict(self._use)
if use_:
use.update(use_)
ignore = set(self._ignore)
if ignore_:
ignore.update(set(ignore_))
if consider:
ignore -= set(consider)
lazy, autoreload = self._lazy, self._loader.autoreload()
def make_creator(base, use, ignore):
""" Make a new template list creator """
cls, loader = self._cls, self._loader
def creator():
""" Create """
return cls.discover(loader, base, use=use, ignore=ignore)
return creator
creator = make_creator(base, use, ignore)
if not lazy and not autoreload:
return creator()
result = TemplateListProxy(creator, autoreload)
if not lazy:
iter(result) # trigger list creation
return result
def distinct_overlays(tpl):
"""
Extract distinct overlay names of a template
Overlay names available both as target and source within the template
are discarded.
:Parameters:
`tpl` : `tdi.template.Template`
Template to inspect
:Return: set(targets), set(sources)
:Rtype: ``tuple``
"""
targets = set(tpl.target_overlay_names)
sources = set(tpl.source_overlay_names)
return targets - sources, sources - targets
def discover(loader, names, use=None, ignore=None):
"""
Find templates to use and order them topologically correct
:Parameters:
`loader` : `Loader`
Template loader
`names` : ``iterable``
Base names. These templates are always added first, in order and
define the initial list of overlays to discover.
`use` : ``dict``
Extra target mapping (overlay name -> template name). This is
used, before the global overlay mapping is asked. Pass ambiguous
overlay decisions here, or disable certain overlays by passing
``None`` as name.
`ignore` : ``iterable``
List of template names to ignore completely.
:Return: list(template names), set(missing overlays),
dict(undecidable overlays -> possible template names)
:Rtype: ``tuple``
:Exceptions:
- `DependencyCycle` : A dependency cycle occured
"""
# pylint: disable = too-many-branches
names, missing, undecided = list(names), set(), {}
if not names:
return names, missing, undecided
overlays = lambda x: distinct_overlays(loader.load(x))
available = loader.available()
names.reverse()
dep = names.pop()
use, ignore = dict(use or ()), set(ignore or ())
targets, graph = set(overlays(dep)[0]), _graph.DependencyGraph()
# initial templates
while names:
tname = names.pop()
ttargets, tsources = overlays(tname)
targets -= tsources
targets |= ttargets
graph.add(dep, tname)
dep = tname
# automatic templates
targets = dict((target, set([dep])) for target in targets)
while targets:
target, deps = targets.popitem()
ttargets = None
if target in use:
tname = use[target]
if tname is None:
missing.add(target)
continue
else:
ttargets, tsources = overlays(tname)
if target not in tsources:
raise AssertionError('"use" source %r not in %r' % (
target, tname
))
else:
tnames = [
tname
for tname in available.get(target) or ()
if tname not in ignore
]
if not tnames:
missing.add(target)
continue
elif len(tnames) > 1:
undecided[target] = tuple(sorted(tnames))
continue
tname = tnames[0]
if ttargets is None:
ttargets = overlays(tname)[0]
for dep in deps:
graph.add(dep, tname)
for target in ttargets:
if target not in targets:
targets[target] = set()
targets[target].add(tname)
return graph.resolve(), missing, undecided
class Loader(object):
"""
Find, load and select templates
:IVariables:
`_available` : ``dict`` or ``None``
The mapping cache. This dict contains the overlay -> template mapping.
If ``None``, the dict is created during the next call of the
``available`` method.
`_registered` : ``set``
List of template names registered for autoreload
`_load` : callable
Loader
`_list` : callable
Lister
`_select` : callable
Selector
"""
def __init__(self, list, load, select):
"""
Initialization
:Parameters:
`list` : callable
Template lister. This function is called without parameters and
expected to return a list of all template names available.
Template names are hashable tokens (such as strings) identifying
the templates. They are passed to the `load` function if the
template needs to be loaded.
`load` : callable
Template loader. This function is called with a template name as
parameter and expected to return the actual template object.
`select` : callable
Template selector. This function is called with two parameters:
The loader instance (self) and the template name. It is expected
to return a bool value, which decides whether this template is
in the pool for automatic templates or not.
"""
# pylint: disable = redefined-builtin
self._available = None
self._registered = set()
self._load = load
self._list = list
self._select = select
def autoreload(self):
"""
Autoreload templates?
:Return: Autoreloading available?
:Rtype: ``bool``
"""
return bool(self._registered)
def _callback(self, _):
""" Autoupdate callback - reset the source mapping """
self._available = None
def load(self, name):
"""
Load a single template and register the autoupdate callback
:Parameters:
`name` : hashable
Template name
:Return: The template
:Rtype: `tdi.template.Template`
"""
tpl = self._load(name)
if name not in self._registered:
register = getattr(tpl, 'autoupdate_register_callback', None)
if register is not None:
register(self._callback)
self._registered.add(name)
return tpl
def available(self):
"""
Determine automatic overlay -> template name mapping
This method should only list the automatic overlay mappings.
:Return: source overlay name -> set(template name)
:Rtype: ``dict``
"""
result = self._available
if result is None:
result = {}
for name in self._list():
if self._select(self, name):
tpl = self.load(name)
for source in distinct_overlays(tpl)[1]:
if source not in result:
result[source] = set()
result[source].add(name)
self._available = result
return result
|
ndparker/tdi
|
tdi/tools/template.py
|
Python
|
apache-2.0
| 16,325
|
# -*- test-case-name: twisted.names.test.test_names -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Authoritative resolvers.
"""
from __future__ import absolute_import, division
import os
import time
from twisted.names import dns, error, common
from twisted.internet import defer
from twisted.python import failure
from twisted.python.compat import execfile
def getSerial(filename = '/tmp/twisted-names.serial'):
"""Return a monotonically increasing (across program runs) integer.
State is stored in the given file. If it does not exist, it is
created with rw-/---/--- permissions.
"""
serial = time.strftime('%Y%m%d')
o = os.umask(0o177)
try:
if not os.path.exists(filename):
with open(filename, 'w') as f:
f.write(serial + ' 0')
finally:
os.umask(o)
with open(filename, 'r') as serialFile:
lastSerial, ID = serialFile.readline().split()
ID = (lastSerial == serial) and (int(ID) + 1) or 0
with open(filename, 'w') as serialFile:
serialFile.write('%s %d' % (serial, ID))
#class LookupCacherMixin(object):
# _cache = None
#
# def _lookup(self, name, cls, type, timeout = 10):
# if not self._cache:
# self._cache = {}
# self._meth = super(LookupCacherMixin, self)._lookup
#
# if self._cache.has_key((name, cls, type)):
# return self._cache[(name, cls, type)]
# else:
# r = self._meth(name, cls, type, timeout)
# self._cache[(name, cls, type)] = r
# return r
serial = serial + ('%02d' % (ID,))
return serial
class FileAuthority(common.ResolverBase):
"""
An Authority that is loaded from a file.
@ivar _ADDITIONAL_PROCESSING_TYPES: Record types for which additional
processing will be done.
@ivar _ADDRESS_TYPES: Record types which are useful for inclusion in the
additional section generated during additional processing.
@ivar soa: A 2-tuple containing the SOA domain name as a L{bytes} and a
L{dns.Record_SOA}.
"""
# See https://twistedmatrix.com/trac/ticket/6650
_ADDITIONAL_PROCESSING_TYPES = (dns.CNAME, dns.MX, dns.NS)
_ADDRESS_TYPES = (dns.A, dns.AAAA)
soa = None
records = None
def __init__(self, filename):
common.ResolverBase.__init__(self)
self.loadFile(filename)
self._cache = {}
def __setstate__(self, state):
self.__dict__ = state
# print 'setstate ', self.soa
def _additionalRecords(self, answer, authority, ttl):
"""
Find locally known information that could be useful to the consumer of
the response and construct appropriate records to include in the
I{additional} section of that response.
Essentially, implement RFC 1034 section 4.3.2 step 6.
@param answer: A L{list} of the records which will be included in the
I{answer} section of the response.
@param authority: A L{list} of the records which will be included in
the I{authority} section of the response.
@param ttl: The default TTL for records for which this is not otherwise
specified.
@return: A generator of L{dns.RRHeader} instances for inclusion in the
I{additional} section. These instances represent extra information
about the records in C{answer} and C{authority}.
"""
for record in answer + authority:
if record.type in self._ADDITIONAL_PROCESSING_TYPES:
name = record.payload.name.name
for rec in self.records.get(name.lower(), ()):
if rec.TYPE in self._ADDRESS_TYPES:
yield dns.RRHeader(
name, rec.TYPE, dns.IN,
rec.ttl or ttl, rec, auth=True)
def _lookup(self, name, cls, type, timeout = None):
"""
Determine a response to a particular DNS query.
@param name: The name which is being queried and for which to lookup a
response.
@type name: L{bytes}
@param cls: The class which is being queried. Only I{IN} is
implemented here and this value is presently disregarded.
@type cls: L{int}
@param type: The type of records being queried. See the types defined
in L{twisted.names.dns}.
@type type: L{int}
@param timeout: All processing is done locally and a result is
available immediately, so the timeout value is ignored.
@return: A L{Deferred} that fires with a L{tuple} of three sets of
response records (to comprise the I{answer}, I{authority}, and
I{additional} sections of a DNS response) or with a L{Failure} if
there is a problem processing the query.
"""
cnames = []
results = []
authority = []
additional = []
default_ttl = max(self.soa[1].minimum, self.soa[1].expire)
domain_records = self.records.get(name.lower())
if domain_records:
for record in domain_records:
if record.ttl is not None:
ttl = record.ttl
else:
ttl = default_ttl
if record.TYPE == dns.NS and name.lower() != self.soa[0].lower():
# NS record belong to a child zone: this is a referral. As
# NS records are authoritative in the child zone, ours here
# are not. RFC 2181, section 6.1.
authority.append(
dns.RRHeader(name, record.TYPE, dns.IN, ttl, record, auth=False)
)
elif record.TYPE == type or type == dns.ALL_RECORDS:
results.append(
dns.RRHeader(name, record.TYPE, dns.IN, ttl, record, auth=True)
)
if record.TYPE == dns.CNAME:
cnames.append(
dns.RRHeader(name, record.TYPE, dns.IN, ttl, record, auth=True)
)
if not results:
results = cnames
# https://tools.ietf.org/html/rfc1034#section-4.3.2 - sort of.
# See https://twistedmatrix.com/trac/ticket/6732
additionalInformation = self._additionalRecords(
results, authority, default_ttl)
if cnames:
results.extend(additionalInformation)
else:
additional.extend(additionalInformation)
if not results and not authority:
# Empty response. Include SOA record to allow clients to cache
# this response. RFC 1034, sections 3.7 and 4.3.4, and RFC 2181
# section 7.1.
authority.append(
dns.RRHeader(self.soa[0], dns.SOA, dns.IN, ttl, self.soa[1], auth=True)
)
return defer.succeed((results, authority, additional))
else:
if dns._isSubdomainOf(name, self.soa[0]):
# We may be the authority and we didn't find it.
# XXX: The QNAME may also be a in a delegated child zone. See
# #6581 and #6580
return defer.fail(failure.Failure(dns.AuthoritativeDomainError(name)))
else:
# The QNAME is not a descendant of this zone. Fail with
# DomainError so that the next chained authority or
# resolver will be queried.
return defer.fail(failure.Failure(error.DomainError(name)))
def lookupZone(self, name, timeout = 10):
if self.soa[0].lower() == name.lower():
# Wee hee hee hooo yea
default_ttl = max(self.soa[1].minimum, self.soa[1].expire)
if self.soa[1].ttl is not None:
soa_ttl = self.soa[1].ttl
else:
soa_ttl = default_ttl
results = [dns.RRHeader(self.soa[0], dns.SOA, dns.IN, soa_ttl, self.soa[1], auth=True)]
for (k, r) in self.records.items():
for rec in r:
if rec.ttl is not None:
ttl = rec.ttl
else:
ttl = default_ttl
if rec.TYPE != dns.SOA:
results.append(dns.RRHeader(k, rec.TYPE, dns.IN, ttl, rec, auth=True))
results.append(results[0])
return defer.succeed((results, (), ()))
return defer.fail(failure.Failure(dns.DomainError(name)))
def _cbAllRecords(self, results):
ans, auth, add = [], [], []
for res in results:
if res[0]:
ans.extend(res[1][0])
auth.extend(res[1][1])
add.extend(res[1][2])
return ans, auth, add
class PySourceAuthority(FileAuthority):
"""A FileAuthority that is built up from Python source code."""
def loadFile(self, filename):
g, l = self.setupConfigNamespace(), {}
execfile(filename, g, l)
if not l.has_key('zone'):
raise ValueError("No zone defined in " + filename)
self.records = {}
for rr in l['zone']:
if isinstance(rr[1], dns.Record_SOA):
self.soa = rr
self.records.setdefault(rr[0].lower(), []).append(rr[1])
def wrapRecord(self, type):
return lambda name, *arg, **kw: (name, type(*arg, **kw))
def setupConfigNamespace(self):
r = {}
items = dns.__dict__.iterkeys()
for record in [x for x in items if x.startswith('Record_')]:
type = getattr(dns, record)
f = self.wrapRecord(type)
r[record[len('Record_'):]] = f
return r
class BindAuthority(FileAuthority):
"""An Authority that loads BIND configuration files"""
def loadFile(self, filename):
self.origin = os.path.basename(filename) + '.' # XXX - this might suck
with open(filename, 'rb') as f:
lines = f.readlines()
lines = self.stripComments(lines)
lines = self.collapseContinuations(lines)
self.parseLines(lines)
def stripComments(self, lines):
return [
a.find(';') == -1 and a or a[:a.find(';')] for a in [
b.strip() for b in lines
]
]
def collapseContinuations(self, lines):
L = []
state = 0
for line in lines:
if state == 0:
if line.find('(') == -1:
L.append(line)
else:
L.append(line[:line.find('(')])
state = 1
else:
if line.find(')') != -1:
L[-1] += ' ' + line[:line.find(')')]
state = 0
else:
L[-1] += ' ' + line
lines = L
L = []
for line in lines:
L.append(line.split())
return filter(None, L)
def parseLines(self, lines):
TTL = 60 * 60 * 3
ORIGIN = self.origin
self.records = {}
for (line, index) in zip(lines, range(len(lines))):
if line[0] == '$TTL':
TTL = dns.str2time(line[1])
elif line[0] == '$ORIGIN':
ORIGIN = line[1]
elif line[0] == '$INCLUDE': # XXX - oh, fuck me
raise NotImplementedError('$INCLUDE directive not implemented')
elif line[0] == '$GENERATE':
raise NotImplementedError('$GENERATE directive not implemented')
else:
self.parseRecordLine(ORIGIN, TTL, line)
def addRecord(self, owner, ttl, type, domain, cls, rdata):
if not domain.endswith('.'):
domain = domain + '.' + owner
else:
domain = domain[:-1]
f = getattr(self, 'class_%s' % cls, None)
if f:
f(ttl, type, domain, rdata)
else:
raise NotImplementedError("Record class %r not supported" % cls)
def class_IN(self, ttl, type, domain, rdata):
record = getattr(dns, 'Record_%s' % type, None)
if record:
r = record(*rdata)
r.ttl = ttl
self.records.setdefault(domain.lower(), []).append(r)
print('Adding IN Record', domain, ttl, r)
if type == 'SOA':
self.soa = (domain, r)
else:
raise NotImplementedError("Record type %r not supported" % type)
#
# This file ends here. Read no further.
#
def parseRecordLine(self, origin, ttl, line):
MARKERS = dns.QUERY_CLASSES.values() + dns.QUERY_TYPES.values()
cls = 'IN'
owner = origin
if line[0] == '@':
line = line[1:]
owner = origin
# print 'default owner'
elif not line[0].isdigit() and line[0] not in MARKERS:
owner = line[0]
line = line[1:]
# print 'owner is ', owner
if line[0].isdigit() or line[0] in MARKERS:
domain = owner
owner = origin
# print 'woops, owner is ', owner, ' domain is ', domain
else:
domain = line[0]
line = line[1:]
# print 'domain is ', domain
if line[0] in dns.QUERY_CLASSES.values():
cls = line[0]
line = line[1:]
# print 'cls is ', cls
if line[0].isdigit():
ttl = int(line[0])
line = line[1:]
# print 'ttl is ', ttl
elif line[0].isdigit():
ttl = int(line[0])
line = line[1:]
# print 'ttl is ', ttl
if line[0] in dns.QUERY_CLASSES.values():
cls = line[0]
line = line[1:]
# print 'cls is ', cls
type = line[0]
# print 'type is ', type
rdata = line[1:]
# print 'rdata is ', rdata
self.addRecord(owner, ttl, type, domain, cls, rdata)
|
Architektor/PySnip
|
venv/lib/python2.7/site-packages/twisted/names/authority.py
|
Python
|
gpl-3.0
| 14,143
|
import os
import glob
import time
import json
class Sensor(dict):
def __init__(self, name):
self.readings = {}
dict.__init__(self, name=name, readings=self.readings)
self.name = name
def add_reading(self, loopcount):
f = open(self.name, 'r')
lines = f.readlines()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
raw_temp = float(lines[1][equals_pos+2:])
self.readings[loopcount] = raw_temp
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folders = glob.glob(base_dir + '28*')
sensors = []
for device_folder in device_folders:
sensors.append(Sensor(device_folder + '/w1_slave'))
try:
loopcount = 0
while True:
for s in sensors:
s.add_reading(loopcount)
time.sleep(1)
loopcount = loopcount + 1
except KeyboardInterrupt:
print(json.dumps({'sensors': sensors}))
|
jjulik/keezer-pi
|
client/testing/sensortest.py
|
Python
|
mit
| 857
|
"""Component to allow selecting an option from a list as platforms."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import timedelta
import logging
from typing import Any, final
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity import Entity, EntityDescription
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.typing import ConfigType
from .const import ATTR_OPTION, ATTR_OPTIONS, DOMAIN, SERVICE_SELECT_OPTION
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up Select entities."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_SELECT_OPTION,
{vol.Required(ATTR_OPTION): cv.string},
async_select_option,
)
return True
async def async_select_option(entity: SelectEntity, service_call: ServiceCall) -> None:
"""Service call wrapper to set a new value."""
option = service_call.data[ATTR_OPTION]
if option not in entity.options:
raise ValueError(f"Option {option} not valid for {entity.name}")
await entity.async_select_option(option)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_unload_entry(entry)
@dataclass
class SelectEntityDescription(EntityDescription):
"""A class that describes select entities."""
class SelectEntity(Entity):
"""Representation of a Select entity."""
entity_description: SelectEntityDescription
_attr_current_option: str | None
_attr_options: list[str]
_attr_state: None = None
@property
def capability_attributes(self) -> dict[str, Any]:
"""Return capability attributes."""
return {
ATTR_OPTIONS: self.options,
}
@property
@final
def state(self) -> str | None:
"""Return the entity state."""
if self.current_option is None or self.current_option not in self.options:
return None
return self.current_option
@property
def options(self) -> list[str]:
"""Return a set of selectable options."""
return self._attr_options
@property
def current_option(self) -> str | None:
"""Return the selected entity option to represent the entity state."""
return self._attr_current_option
def select_option(self, option: str) -> None:
"""Change the selected option."""
raise NotImplementedError()
async def async_select_option(self, option: str) -> None:
"""Change the selected option."""
await self.hass.async_add_executor_job(self.select_option, option)
|
lukas-hetzenecker/home-assistant
|
homeassistant/components/select/__init__.py
|
Python
|
apache-2.0
| 3,535
|
from ...util.linear_functions import (
hellinger_distance,
normalized_hellinger_distance,
hellinger_distance_prebin)
import unittest
from unittest import TestCase
import numpy
class HellingerDistanceTest(TestCase):
def test_max(self):
a = [2, 2, 2, 2, 2, 2, 2, 2, 9, 9]
b = [9, 9, 9, 9, 9, 9, 9, 9, 2, 2]
self.assertAlmostEqual(.707, hellinger_distance(a, b, 2), 3)
def test_min(self):
a = [9, 9, 9, 9, 9, 9, 9, 9, 2, 2]
b = [9, 9, 9, 9, 9, 9, 9, 9, 2, 2]
self.assertEqual(0, hellinger_distance(a, b, 2))
class HellingerDistancePrebinTest(TestCase):
def test(self):
a = numpy.zeros((20, 1))
b = numpy.zeros((20, 1))
a = range(1, 21)
b = range(20, 0, -1)
self.assertAlmostEqual(0.3111, hellinger_distance_prebin(a, b), 3)
class NormalizedHellingerDistanceTest(TestCase):
def test_max(self):
a = [2, 2, 2, 2, 2, 2, 2, 2, 9, 9]
b = [9, 9, 9, 9, 9, 9, 9, 9, 2, 2]
self.assertAlmostEqual(.5, normalized_hellinger_distance(a, b, 2), 1)
def test_min(self):
a = [9, 9, 9, 9, 9, 9, 9, 9, 2, 2]
b = [9, 9, 9, 9, 9, 9, 9, 9, 2, 2]
self.assertEqual(0, normalized_hellinger_distance(a, b, 2))
if __name__ == '__main__':
unittest.main()
|
arider/riderml
|
riderml/tests/util/test_linear_functions.py
|
Python
|
mit
| 1,304
|
# -*- coding: utf-8 -*-
#
# Ninja documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 16 11:40:35 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
source_suffix = ['.rst']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ninja'
copyright = u'2015, Basis Technology'
author = u'Basis Technology'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ninjadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Ninja.tex', u'Ninja Documentation',
u'Basis Technology', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ninja', u'Ninja Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Ninja', u'Ninja Documentation',
author, 'Ninja', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
yuval/ninja
|
doc/conf.py
|
Python
|
apache-2.0
| 9,097
|
"""
You don't really want to use this module. Try helper.py instead.
"""
CLEAR = 0
BOLD = 1
DIM = 2
ITALIC = 3
UNDERSCORE = 4
BLINK_SLOW = 5
BLINK_FAST = 6
REVERSE = 7
CONCEALED = 8
FG_BLACK = 30
FG_RED = 31
FG_GREEN = 32
FG_YELLOW = 33
FG_BLUE = 34
FG_MAGENTA = 35
FG_CYAN = 36
FG_WHITE = 37
BG_BLACK = 40
BG_RED = 41
BG_GREEN = 42
BG_YELLOW = 43
BG_BLUE = 44
BG_MAGENTA = 45
BG_CYAN = 46
BG_WHITE = 47
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/conch/insults/colors.py
|
Python
|
bsd-3-clause
| 434
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File: config.py
Author: Rob Phoenix
Email: rob@robphoenix.com
Github: https://github.com/robphoenix
Description: Prepare Cisco IOS configs for comparison
"""
import os
import re
from collections import namedtuple
import diffios
class Config(object):
"""Config prepares a Cisco IOS Config to diff.
Config takes a Cisco IOS config, as a file or a
list, removes any invalid lines, such as comments,
breaks the config into a hierarchical block structure
and partitions the config according to a list of
lines to ignore.
Attributes:
config (list): List of valid config lines
ignore_lines (list): List of lines to ignore
Args:
config (str|list): Path to config file, or list
containing lines of config
Kwargs:
ignore_lines (str|list): Path to ignores file, or list
containing lines to ignore. Defaults to empty list.
>>> config = [
... '!',
... 'hostname ROUTER',
... '!',
... 'interface FastEthernet0/1',
... ' description *** Link to Core ***',
... ' ip address 192.168.0.1 255.255.255.0']
>>> ignore = [
... 'hostname',
... '^ description']
>>> conf = Config(config, ignore)
>>> conf.config
['!', 'hostname ROUTER', '!', 'interface FastEthernet0/1', ' description \
*** Link to Core ***', ' ip address 192.168.0.1 255.255.255.0']
>>> conf.ignore_lines
['hostname', '^ description']
>>> conf.ignored()
[['hostname ROUTER'], [' description *** Link to Core ***']]
>>> conf.included()
[['interface FastEthernet0/1', ' ip address 192.168.0.1 255.255.255.0']]
"""
def __init__(self, config, ignore_lines=None):
self.config = self._check_data('config', config)
if ignore_lines is None:
ignore_lines = []
self.ignore_lines = self._ignore(
self._check_data('ignore_lines', ignore_lines))
def _valid_config(self):
return [l.rstrip() for l in self.config if self._valid_line(l)]
def _group_config(self):
current_group, groups = [], []
for line in self._valid_config():
if not line.startswith(' ') and current_group:
groups.append(current_group)
current_group = [line]
else:
current_group.append(line)
if current_group:
groups.append(current_group)
return sorted(groups)
def _partition_group(self, group):
Partition = namedtuple("Partition", "ignored included")
ignored, included = [], []
for i, line in enumerate(group):
if self._ignore_line(line) and i == 0:
return Partition(group, included)
elif self._ignore_line(line):
ignored.append(line)
else:
included.append(line)
return Partition(ignored, included)
def _partition_config(self):
Partition = namedtuple("Partition", "ignored included")
included, ignored = [], []
for group in self._group_config():
partition = self._partition_group(group)
if partition.included:
included.append(partition.included)
if partition.ignored:
ignored.append(partition.ignored)
return Partition(ignored, included)
def included(self):
"""Lines from the original config that are not ignored. """
return self._partition_config().included
def ignored(self):
"""Lines from the original config that are ignored. """
return self._partition_config().ignored
@staticmethod
def _ignore(ignore):
return [line.strip().lower() for line in ignore]
@staticmethod
def _check_data(name, data):
invalid_arg = "diffios.Config() received an invalid argument: {}={}\n"
unable_to_open = "diffios.Config() could not open '{}'"
if isinstance(data, list):
return data
try:
with open(data) as fin:
return fin.read().splitlines() # remove '\n' from lines
except IOError:
raise RuntimeError((unable_to_open.format(data)))
except:
raise RuntimeError(invalid_arg.format(name, data))
@staticmethod
def _valid_line(line):
line = line.strip()
return len(line) > 0 and not line.startswith(
"!") and line != '^' and line != '^C'
def _ignore_line(self, line):
for line_to_ignore in self.ignore_lines:
for metacharacter in diffios.REGEX_METACHARACTERS:
if metacharacter in line_to_ignore:
line_to_ignore = line_to_ignore.replace(
metacharacter, '\{}'.format(metacharacter))
if re.search(line_to_ignore, line.lower()):
return True
return False
|
bordeltabernacle/diffios
|
diffios/config.py
|
Python
|
mit
| 4,912
|
from __future__ import absolute_import
import hmac
from django.core.urlresolvers import reverse
from exam import fixture
from hashlib import sha256
from sentry.utils.compat.mock import patch
from sentry.models import ProjectOption
from sentry.testutils import TestCase
from sentry.utils import json
class ReleaseWebhookTestBase(TestCase):
def setUp(self):
super(ReleaseWebhookTestBase, self).setUp()
self.organization = self.create_organization()
self.team = self.create_team(organization=self.organization)
self.project = self.create_project(teams=[self.team])
self.token = "a2587e3af83411e4a28634363b8514c2"
ProjectOption.objects.set_value(self.project, "sentry:release-token", self.token)
@fixture
def signature(self):
return hmac.new(
key=self.token.encode("utf-8"),
msg=("{}-{}".format(self.plugin_id, self.project.id)).encode("utf-8"),
digestmod=sha256,
).hexdigest()
@fixture
def path(self):
return reverse(
"sentry-release-hook",
kwargs={
"project_id": self.project.id,
"plugin_id": self.plugin_id,
"signature": self.signature,
},
)
class ReleaseWebhookTest(ReleaseWebhookTestBase):
def setUp(self):
super(ReleaseWebhookTest, self).setUp()
self.plugin_id = "dummy"
def test_no_token(self):
project = self.create_project(teams=[self.team])
path = reverse(
"sentry-release-hook",
kwargs={"project_id": project.id, "plugin_id": "dummy", "signature": self.signature},
)
resp = self.client.post(path)
assert resp.status_code == 403
def test_invalid_signature(self):
path = reverse(
"sentry-release-hook",
kwargs={"project_id": self.project.id, "plugin_id": "dummy", "signature": "wrong"},
)
resp = self.client.post(path)
assert resp.status_code == 403
def test_invalid_project(self):
path = reverse(
"sentry-release-hook",
kwargs={"project_id": 1000000, "plugin_id": "dummy", "signature": self.signature},
)
resp = self.client.post(path)
assert resp.status_code == 404
@patch("sentry.plugins.base.plugins.get")
def test_valid_signature(self, mock_plugin_get):
MockPlugin = mock_plugin_get.return_value
MockPlugin.is_enabled.return_value = True
MockReleaseHook = MockPlugin.get_release_hook.return_value
resp = self.client.post(self.path)
assert resp.status_code == 204
mock_plugin_get.assert_called_once_with("dummy")
MockPlugin.get_release_hook.assert_called_once_with()
MockReleaseHook.assert_called_once_with(self.project)
assert MockReleaseHook.return_value.handle.call_count == 1
@patch("sentry.plugins.base.plugins.get")
def test_disabled_plugin(self, mock_plugin_get):
MockPlugin = mock_plugin_get.return_value
MockPlugin.is_enabled.return_value = False
resp = self.client.post(self.path)
assert resp.status_code == 403
mock_plugin_get.assert_called_once_with("dummy")
assert not MockPlugin.get_release_hook.called
class BuiltinReleaseWebhookTest(ReleaseWebhookTestBase):
def setUp(self):
super(BuiltinReleaseWebhookTest, self).setUp()
self.plugin_id = "builtin"
def test_invalid_params(self):
resp = self.client.post(self.path, content_type="application/json")
assert resp.status_code == 400
def test_valid_params(self):
resp = self.client.post(
self.path, data=json.dumps({"version": "a"}), content_type="application/json"
)
assert resp.status_code == 201, resp.content
data = json.loads(resp.content)
assert data["version"] == "a"
def test_no_teams_and_no_user(self):
self.project.remove_team(self.team)
resp = self.client.post(self.path, user=None, content_type="application/json")
assert resp.status_code == 403
|
beeftornado/sentry
|
tests/sentry/web/frontend/test_release_webhook.py
|
Python
|
bsd-3-clause
| 4,134
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import os
import random
import sys
from oslo_concurrency import processutils
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import service
from oslo_utils import importutils
from nova import baserpc
from nova import conductor
import nova.conf
from nova import context
from nova import debugger
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova import objects
from nova.objects import base as objects_base
from nova.objects import service as service_obj
from nova import rpc
from nova import servicegroup
from nova import utils
from nova import version
from nova import wsgi
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
CONF.import_opt('host', 'nova.netconf')
def _create_service_ref(this_service, context):
service = objects.Service(context)
service.host = this_service.host
service.binary = this_service.binary
service.topic = this_service.topic
service.report_count = 0
service.create()
return service
def _update_service_ref(this_service, context):
service = objects.Service.get_by_host_and_binary(context,
this_service.host,
this_service.binary)
if not service:
LOG.error(_LE('Unable to find a service record to update for '
'%(binary)s on %(host)s'),
{'binary': this_service.binary,
'host': this_service.host})
return
if service.version != service_obj.SERVICE_VERSION:
LOG.info(_LI('Updating service version for %(binary)s on '
'%(host)s from %(old)i to %(new)i'),
{'binary': this_service.binary,
'host': this_service.host,
'old': service.version,
'new': service_obj.SERVICE_VERSION})
service.version = service_obj.SERVICE_VERSION
service.save()
class Service(service.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
its state to the database services table.
"""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_enable=None, periodic_fuzzy_delay=None,
periodic_interval_max=None, db_allowed=True,
*args, **kwargs):
super(Service, self).__init__()
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
self.servicegroup_api = servicegroup.API()
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host, *args, **kwargs)
self.rpcserver = None
self.report_interval = report_interval
self.periodic_enable = periodic_enable
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.periodic_interval_max = periodic_interval_max
self.saved_args, self.saved_kwargs = args, kwargs
self.backdoor_port = None
self.conductor_api = conductor.API(use_local=db_allowed)
self.conductor_api.wait_until_ready(context.get_admin_context())
def start(self):
verstr = version.version_string_with_package()
LOG.info(_LI('Starting %(topic)s node (version %(version)s)'),
{'topic': self.topic, 'version': verstr})
self.basic_config_check()
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
self.service_ref = objects.Service.get_by_host_and_binary(
ctxt, self.host, self.binary)
if not self.service_ref:
try:
self.service_ref = _create_service_ref(self, ctxt)
except (exception.ServiceTopicExists,
exception.ServiceBinaryExists):
# NOTE(danms): If we race to create a record with a sibling
# worker, don't fail here.
self.service_ref = objects.Service.get_by_host_and_binary(
ctxt, self.host, self.binary)
self.manager.pre_start_hook()
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
LOG.debug("Creating RPC server for service %s", self.topic)
target = messaging.Target(topic=self.topic, server=self.host)
endpoints = [
self.manager,
baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port)
]
endpoints.extend(self.manager.additional_endpoints)
serializer = objects_base.NovaObjectSerializer()
self.rpcserver = rpc.get_server(target, endpoints, serializer)
self.rpcserver.start()
self.manager.post_start_hook()
LOG.debug("Join ServiceGroup membership for this service %s",
self.topic)
# Add service to the ServiceGroup membership group.
self.servicegroup_api.join(self.host, self.topic, self)
if self.periodic_enable:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
self.tg.add_dynamic_timer(self.periodic_tasks,
initial_delay=initial_delay,
periodic_interval_max=
self.periodic_interval_max)
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_enable=None,
periodic_fuzzy_delay=None, periodic_interval_max=None,
db_allowed=True):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'nova-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_enable: defaults to CONF.periodic_enable
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
:param periodic_interval_max: if set, the max time to wait between runs
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(sys.argv[0])
if not topic:
topic = binary.rpartition('nova-')[2]
if not manager:
manager_cls = ('%s_manager' %
binary.rpartition('nova-')[2])
manager = CONF.get(manager_cls, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_enable is None:
periodic_enable = CONF.periodic_enable
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
debugger.init()
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_enable=periodic_enable,
periodic_fuzzy_delay=periodic_fuzzy_delay,
periodic_interval_max=periodic_interval_max,
db_allowed=db_allowed)
return service_obj
def kill(self):
"""Destroy the service object in the datastore.
NOTE: Although this method is not used anywhere else than tests, it is
convenient to have it here, so the tests might easily and in clean way
stop and remove the service_ref.
"""
self.stop()
try:
self.service_ref.destroy()
except exception.NotFound:
LOG.warning(_LW('Service killed that has no database entry'))
def stop(self):
try:
self.rpcserver.stop()
self.rpcserver.wait()
except Exception:
pass
try:
self.manager.cleanup_host()
except Exception:
LOG.exception(_LE('Service error occurred during cleanup_host'))
pass
super(Service, self).stop()
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def basic_config_check(self):
"""Perform basic config checks before starting processing."""
# Make sure the tempdir exists and is writable
try:
with utils.tempdir():
pass
except Exception as e:
LOG.error(_LE('Temporary directory is invalid: %s'), e)
sys.exit(1)
def reset(self):
self.manager.reset()
class WSGIService(service.Service):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None, use_ssl=False, max_url_len=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
# NOTE(danms): Name can be metadata, os_compute, or ec2, per
# nova.service's enabled_apis
self.binary = 'nova-%s' % name
self.topic = None
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
# inherit all compute_api worker counts from osapi_compute
if name.startswith('openstack_compute_api'):
wname = 'osapi_compute'
else:
wname = name
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = (getattr(CONF, '%s_workers' % wname, None) or
processutils.get_worker_count())
if self.workers and self.workers < 1:
worker_name = '%s_workers' % name
msg = (_("%(worker_name)s value of %(workers)s is invalid, "
"must be greater than 0") %
{'worker_name': worker_name,
'workers': str(self.workers)})
raise exception.InvalidInput(msg)
self.use_ssl = use_ssl
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port,
use_ssl=self.use_ssl,
max_url_len=max_url_len)
# Pull back actual port used
self.port = self.server.port
self.backdoor_port = None
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self.server.reset()
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
ctxt = context.get_admin_context()
service_ref = objects.Service.get_by_host_and_binary(ctxt, self.host,
self.binary)
if not service_ref:
try:
service_ref = _create_service_ref(self, ctxt)
except (exception.ServiceTopicExists,
exception.ServiceBinaryExists):
# NOTE(danms): If we race to create a record wth a sibling,
# don't fail here.
service_ref = objects.Service.get_by_host_and_binary(
ctxt, self.host, self.binary)
_update_service_ref(service_ref, ctxt)
if self.manager:
self.manager.init_host()
self.manager.pre_start_hook()
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.server.start()
if self.manager:
self.manager.post_start_hook()
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def process_launcher():
return service.ProcessLauncher(CONF)
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
_launcher = service.launch(CONF, server, workers=workers)
def wait():
_launcher.wait()
|
cernops/nova
|
nova/service.py
|
Python
|
apache-2.0
| 14,686
|
# -*- coding: utf-8; -*-
from __future__ import absolute_import, unicode_literals
from tests import unittest, mock
from tests.factories.docker_client_factory import DockerClientFactory
from freight_forwarder.container.config import Config
from freight_forwarder.image import Image
from freight_forwarder.registry.registry import V2
import docker
import requests
class ImageTest(unittest.TestCase):
def setUp(self):
self.docker_client = DockerClientFactory()
def tearDown(self):
del self.docker_client
@mock.patch.object(Image, '_inspect_and_map')
def test_create_image(self, mock_image_inspect):
image = Image(client=self.docker_client, identifier='123')
self.assertEqual(image.identifier, '123')
self.assertIsInstance(image, Image)
def test_create_image_failure(self):
with self.assertRaises(Exception):
Image(client=False, identifier='123')
with self.assertRaises(TypeError):
Image(client=self.docker_client, identifier=False)
@mock.patch.object(docker.api.ImageApiMixin, 'push')
@mock.patch.object(V2, 'ping')
@mock.patch.object(Image, 'tag')
@mock.patch.object(Image, '_inspect_and_map')
def test_push(self, mock_image_inspect, mock_image_tag, mock_v2_registry_ping, mock_docker_image_push):
mock_v2_registry = mock.Mock(spec=V2(address='https://v2.com'))
mock_v2_registry_ping.return_value = True
mock_docker_image_push.return_value = {}
image = Image(client=self.docker_client, identifier='123')
image.push(registry=mock_v2_registry, repository_tag='foo', tag='bar')
self.assertTrue(True) # TODO: ...
@mock.patch.object(Image, '_inspect_and_map')
def test_push_failure_invalid_registry(self, mock_image_inspect):
with self.assertRaises(Exception):
image = Image(client=self.docker_client, identifier='123')
image.push(registry=False, repository_tag='foo')
@mock.patch.object(Image, '_inspect_and_map')
def test_push_failure_invalid_tag(self, mock_image_inspect):
mock_v2_registry = mock.Mock(spec=V2(address='https://v2.com'))
mock_v2_registry.return_value.ping.return_value = True
with self.assertRaises(TypeError):
image = Image(client=self.docker_client, identifier='123')
image.push(registry=mock_v2_registry, repository_tag=False)
@mock.patch.object(docker.api.ImageApiMixin, 'tag')
@mock.patch.object(Image, '_inspect_and_map')
def test_tag(self, mock_image_inspect, mock_docker_image_tag):
image = Image(client=self.docker_client, identifier='123')
image.id = '123'
image.tag(repository_tag='foo:bar', tags=['abc'])
self.assertEqual(image.repo_tags, ('foo:abc', 'foo:bar'))
@mock.patch.object(docker.api.ImageApiMixin, 'tag')
@mock.patch.object(Image, '_inspect_and_map')
def test_tag_no_tags(self, mock_image_inspect, mock_docker_image_tag):
image = Image(client=self.docker_client, identifier='123')
image.id = '123'
image.tag(repository_tag='foo')
self.assertEqual(image.repo_tags, ('foo:latest',))
@mock.patch.object(docker.api.ImageApiMixin, 'tag')
@mock.patch.object(Image, '_inspect_and_map')
def test_tag_failure(self, mock_image_inspect, mock_docker_image_tag):
with self.assertRaises(TypeError):
image = Image(client=self.docker_client, identifier='123')
image.tag(repository_tag=False, tags=[])
with self.assertRaises(TypeError):
image = Image(client=self.docker_client, identifier='123')
image.tag(repository_tag='foo', tags=False)
mock_docker_image_tag.side_effect = Exception
with self.assertRaises(Exception):
image = Image(client=self.docker_client, identifier='123')
image.id = '123'
image.tag(repository_tag='foo:bar', tags=['abc'])
self.assertEqual(image.repo_tags, ('foo:abc', 'foo:bar'))
@mock.patch.object(docker.api.ImageApiMixin, 'remove_image')
@mock.patch.object(Image, '_inspect_and_map')
def test_delete(self, mock_image_inspect, mock_docker_image_remove):
mock_docker_image_remove.return_value = None
image = Image(client=self.docker_client, identifier='123')
image.id = '123'
self.assertTrue(image.delete())
@mock.patch.object(docker.api.ImageApiMixin, 'remove_image')
@mock.patch.object(Image, '_inspect_and_map')
def test_delete_failure(self, mock_image_inspect, mock_docker_image_remove):
mock_docker_image_remove.return_value = True
image = Image(client=self.docker_client, identifier='123')
image.id = '123'
self.assertFalse(image.delete())
mock_docker_image_remove.side_effect = Exception
mock_docker_image_remove.return_value = True
image = Image(client=self.docker_client, identifier='123')
image.id = '123'
self.assertFalse(image.delete())
@mock.patch.object(docker.api.ImageApiMixin, 'inspect_image')
@mock.patch('freight_forwarder.image.ContainerConfig')
def test_inspect_and_map(self, mock_container_config, mock_docker_image_inspect):
mock_container_config.return_value = mock.Mock(spec=Config())
mock_docker_image_inspect.return_value = {
'Comment': '',
'Id': '123',
'VirtualSize': 491552314,
'Container': '123',
'Os': 'linux',
'Parent': '456',
'Created': '2015-11-23T23:11:31.282086214Z',
'Architecture': 'amd64',
'DockerVersion': '1.8.2',
'Size': 0,
}
image = Image(client=self.docker_client, identifier='123')
self.assertEqual(image.parent, '456')
@mock.patch.object(docker.api.ImageApiMixin, 'inspect_image')
def test_inspect_and_map_failure(self, mock_docker_image_inspect):
mock_docker_image_inspect.side_effect = Exception
with self.assertRaises(Exception):
Image(client=self.docker_client, identifier='123')
@mock.patch.object(docker.api.ImageApiMixin, 'images')
@mock.patch.object(Image, '_inspect_and_map')
def test_all(self, mock_image_inspect, mock_docker_image_images):
mock_docker_image_images.return_value = [{
'VirtualSize': 817117650,
'RepoTags': [],
'Labels': {},
'Size': 0,
'Created': 1453314552,
'Id': '123',
'ParentId': '456',
'RepoDigests': []
}]
images = Image.all(client=self.docker_client)
self.assertIsInstance(images['123'], Image)
def test_all_failure(self):
with self.assertRaises(TypeError):
Image.all(client=False)
@mock.patch.object(docker.api.ImageApiMixin, 'images')
@mock.patch.object(Image, '_inspect_and_map')
def test_find_by_name(self, mock_image_inspect, mock_docker_image_images):
mock_docker_image_images.return_value = [{
'VirtualSize': 817117650,
'RepoTags': ['foo'],
'Labels': {},
'Size': 0,
'Created': 1453314552,
'Id': '123',
'ParentId': '456',
'RepoDigests': []
}]
image = Image.find_by_name(client=self.docker_client, name='foo')
self.assertIsInstance(image, Image)
self.assertEqual(image.identifier, 'foo')
def test_find_by_name_failure(self):
with self.assertRaises(TypeError):
Image.find_by_name(client=False, name='foo')
@mock.patch.object(docker.api.ImageApiMixin, 'images')
@mock.patch.object(Image, '_inspect_and_map')
def test_find_all_by_name(self, mock_image_inspect, mock_docker_image_images):
mock_docker_image_images.return_value = [{
'VirtualSize': 817117650,
'RepoTags': ['foo'],
'Labels': {},
'Size': 0,
'Created': 1453314552,
'Id': '123',
'ParentId': '456',
'RepoDigests': []
}]
images = Image.find_all_by_name(client=self.docker_client, name='foo')
self.assertIsInstance(images['123'], Image)
self.assertEqual(images['123'].identifier, 'foo')
@mock.patch.object(docker.api.ImageApiMixin, 'pull')
@mock.patch.object(requests.Session, 'close')
@mock.patch.object(Image, '_inspect_and_map')
def test_pull(self, mock_image_inspect, mock_request_session_close, mock_docker_image_pull):
mock_registry = mock.Mock(spec=V2(address='https://v2.com'))
mock_registry.ping.return_value = True
mock_registry.location = ''
mock_docker_image_pull.return_value = []
image = Image.pull(client=self.docker_client, registry=mock_registry, repository_tag='foo:bar')
self.assertIsInstance(image, Image)
self.assertEqual(image.identifier, '/foo:bar')
def test_pull_failure(self):
mock_registry = mock.Mock(spec=V2(address='https://v2.com'))
mock_registry.ping.return_value = False
with self.assertRaises(TypeError):
Image.pull(client=False, registry=mock_registry, repository_tag='foo')
with self.assertRaises(Exception):
Image.pull(client=self.docker_client, registry=False, repository_tag='foo')
with self.assertRaises(TypeError):
Image.pull(client=self.docker_client, registry=mock_registry, repository_tag=False)
with self.assertRaises(AttributeError):
Image.pull(client=self.docker_client, registry=mock_registry, repository_tag='foo:bar', tag='abc')
with self.assertRaises(ValueError):
Image.pull(client=self.docker_client, registry=mock_registry, repository_tag='foo:bar:abc')
with self.assertRaises(Exception):
Image.pull(client=self.docker_client, registry=mock_registry, repository_tag='foo:bar')
@mock.patch.object(docker.api.BuildApiMixin, 'build')
@mock.patch.object(requests.Session, 'close')
@mock.patch.object(Image, '_inspect_and_map')
@mock.patch('freight_forwarder.image.os')
def test_build(self, mock_os, mock_image_inspect, mock_request_session_close, mock_docker_build):
mock_os.path.exists.return_value = True
mock_os.path.isfile.return_value = True
mock_os.getcwd.return_value = '/'
mock_os.path.realpath.return_value = '/'
mock_docker_build.return_value = []
image = Image.build(client=self.docker_client, repository_tag='foo', docker_file='abc')
self.assertIsInstance(image, Image)
self.assertEqual(image.identifier, 'foo:latest')
@mock.patch('freight_forwarder.image.os')
def test_build_failure(self, mock_os):
mock_os.path.exists.return_value = True
with self.assertRaises(TypeError):
Image.build(client=False, repository_tag='foo', docker_file='abc')
with self.assertRaises(Exception):
Image.build(client=self.docker_client, repository_tag='foo', docker_file=False)
with self.assertRaises(TypeError):
Image.build(client=self.docker_client, repository_tag=False, docker_file='abc')
with self.assertRaises(TypeError):
Image.build(client=self.docker_client, repository_tag='foo', docker_file='abc', use_cache='yes')
|
fin09pcap/freight_forwarder
|
tests/unit/image_test.py
|
Python
|
mit
| 11,378
|
import ddapp
import os
import sys
import argparse
import json
class DRCArgParser(object):
def __init__(self):
self._args = None
self._parser = None
self.strict = False
def getArgs(self):
if self._args is None:
self.parseArgs()
return self._args
def getParser(self):
if self._parser is None:
self._parser = argparse.ArgumentParser()
self.addDefaultArgs(self._parser)
return self._parser
def parseArgs(self):
parser = self.getParser()
sys.argv = [str(v) for v in sys.argv]
if not self.strict:
self._args, unknown = parser.parse_known_args()
else:
self._args = parser.parse_args()
def getDefaultBotConfigFile(self):
return os.path.join(ddapp.getDRCBaseDir(), 'software/config/drc_robot.cfg')
def getDefaultDirectorConfigFile(self):
return self.getDefaultAtlasV5DirectorConfigFile();
def getDefaultAtlasV3DirectorConfigFile(self):
return os.path.join(ddapp.getDRCBaseDir(),
'software/models/atlas_v3/director_config.json')
def getDefaultAtlasV4DirectorConfigFile(self):
return os.path.join(ddapp.getDRCBaseDir(),
'software/models/atlas_v4/director_config.json')
def getDefaultAtlasV5DirectorConfigFile(self):
return os.path.join(ddapp.getDRCBaseDir(),
'software/models/atlas_v5/director_config.json')
def getDefaultValkyrieV1DirectorConfigFile(self):
return os.path.join(ddapp.getDRCBaseDir(),
'software/models/valkyrie/director_config.json')
def getDefaultValkyrieV2DirectorConfigFile(self):
return os.path.join(ddapp.getDRCBaseDir(),
'software/models/val_description/director_config.json')
def getDefaultKukaLWRConfigFile(self):
return os.path.join(ddapp.getDRCBaseDir(),
'software/models/lwr_defs/director_config.json')
def addDefaultArgs(self, parser):
parser.add_argument('-c', '--config_file', type=str, help='Robot cfg file',
default=self.getDefaultBotConfigFile())
parser.add_argument('--matlab-host', type=str, help='Hostname to use external matlab server')
parser.add_argument('-exo', '--exo', action='store_true', dest='exo', help='Publish plannig requests to external planner instead of Drake')
directorConfig = parser.add_mutually_exclusive_group(required=False)
directorConfig.add_argument('-v3', '--atlas_v3', dest='directorConfigFile',
action='store_const',
const=self.getDefaultAtlasV3DirectorConfigFile(),
help='Use Atlas V3')
directorConfig.add_argument('-v4', '--atlas_v4', dest='directorConfigFile',
action='store_const',
const=self.getDefaultAtlasV4DirectorConfigFile(),
help='Use Atlas V4')
directorConfig.add_argument('-v5', '--atlas_v5', dest='directorConfigFile',
action='store_const',
const=self.getDefaultAtlasV5DirectorConfigFile(),
help='Use Atlas V5')
directorConfig.add_argument('-val1', '--valkyrie_v1', dest='directorConfigFile',
action='store_const',
const=self.getDefaultValkyrieV1DirectorConfigFile(),
help='Use Valkyrie')
directorConfig.add_argument('-val2', '--valkyrie_v2', dest='directorConfigFile',
action='store_const',
const=self.getDefaultValkyrieV2DirectorConfigFile(),
help='Use Valkyrie')
directorConfig.add_argument('-lwr', '--lwr', dest='directorConfigFile',
action='store_const',
const=self.getDefaultKukaLWRConfigFile(),
help='Use Kuka LWR')
directorConfig.add_argument('--director_config', dest='directorConfigFile',
type=str,
help='JSON file specifying which urdfs to use')
parser.set_defaults(directorConfigFile=self.getDefaultDirectorConfigFile())
parser.add_argument('data_files', type=str, nargs='*',
help='data files to load at startup')
_argParser = None
def getGlobalArgParser():
global _argParser
if not _argParser:
_argParser = DRCArgParser()
return _argParser
def requireStrict():
global _argParser
_argParser = None
getGlobalArgParser().strict = True
def args():
return getGlobalArgParser().getArgs()
_directorConfig = None
def getDirectorConfig():
global _directorConfig
if not _directorConfig:
with open(args().directorConfigFile) as directorConfigFile:
_directorConfig = json.load(directorConfigFile)
return _directorConfig
|
edowson/director
|
src/python/ddapp/drcargs.py
|
Python
|
bsd-3-clause
| 5,142
|
from treebeard._ref import ref
from treebeard._rel import rel, up
from mock import sentinel
def test_ref_converts_to_leaf_value_specified_by_relative_path(example_path):
example_path.monkey.here_is_a_reference = "foo.bar.baz"
example_path.monkey.lover = sentinel.monkeylover
r = ref(rel.here_is_a_reference)
assert r._convert(example_path.monkey.lover._match()[0]) == sentinel.foobarbaz
def test_ref_converts_to_leaf_value_specified_by_relative_wildcarded_path(example_path):
example_path.monkey.monkey.here_is_a_reference = "foo.bar.baz"
example_path.monkey.lover = sentinel.monkeylover
r = ref(rel['%i'].here_is_a_reference)
assert r._convert(example_path['%i'].lover._match()[0]) == sentinel.foobarbaz
def test_ref_converts_to_leaf_value_specified_by_relative_up_path(example_path):
example_path.badger.here_is_a_reference = "foo.bar.baz"
example_path.monkey.lover = sentinel.monkeylover
r = ref(up(1).badger.here_is_a_reference)
assert r._convert(example_path.monkey.lover._match()[0]) == sentinel.foobarbaz
def test_ref_converts_to_leaf_value_specified_by_relative_wildcarded_up_path(example_path):
example_path.refs.monkey.here_is_a_reference = "foo.bar.baz"
example_path.monkey.lover = sentinel.monkeylover
r = ref(up(1).refs['%i'].here_is_a_reference)
assert r._convert(example_path['%i'].lover._match()[0]) == sentinel.foobarbaz
|
dbew/treebeard
|
tests/unit/test_ref_and_rel.py
|
Python
|
bsd-2-clause
| 1,450
|
"""Qsignature: detection of sample mixups.
https://sourceforge.net/p/adamajava/wiki/qSignature/
"""
import os
import shutil
import subprocess
import lxml
import pysam
import toolz as tz
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.provenance import do
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
def run(bam_file, data, out_dir):
""" Run SignatureGenerator to create normalize vcf that later will be input of qsignature_summary
:param bam_file: (str) path of the bam_file
:param data: (list) list containing the all the dictionary
for this sample
:param out_dir: (str) path of the output
:returns: (string) output normalized vcf file
"""
qsig = config_utils.get_program("qsignature", data["config"])
res_qsig = config_utils.get_resources("qsignature", data["config"])
jvm_opts = " ".join(res_qsig.get("jvm_opts", ["-Xms750m", "-Xmx8g"]))
if not qsig:
logger.info("There is no qsignature tool. Skipping...")
return None
position = dd.get_qsig_file(data)
mixup_check = dd.get_mixup_check(data)
if mixup_check and mixup_check.startswith("qsignature"):
utils.safe_makedir(out_dir)
if not position:
logger.info("There is no qsignature for this species: %s"
% tz.get_in(['genome_build'], data))
return None
if mixup_check == "qsignature_full":
down_bam = bam_file
else:
down_bam = _slice_bam_chr21(bam_file, data)
position = _slice_vcf_chr21(position, out_dir)
out_name = os.path.basename(down_bam).replace("bam", "qsig.vcf")
out_file = os.path.join(out_dir, out_name)
log_file = os.path.join(out_dir, "qsig.log")
cores = dd.get_cores(data)
base_cmd = ("{qsig} {jvm_opts} "
"org.qcmg.sig.SignatureGenerator "
"--noOfThreads {cores} "
"-log {log_file} -i {position} "
"-i {down_bam} ")
if not os.path.exists(out_file):
file_qsign_out = "{0}.qsig.vcf".format(down_bam)
do.run(base_cmd.format(**locals()), "qsignature vcf generation: %s" % dd.get_sample_name(data))
if os.path.exists(file_qsign_out):
with file_transaction(data, out_file) as file_txt_out:
shutil.move(file_qsign_out, file_txt_out)
else:
raise IOError("File doesn't exist %s" % file_qsign_out)
return out_file
return None
def summary(*samples):
"""Run SignatureCompareRelatedSimple module from qsignature tool.
Creates a matrix of pairwise comparison among samples. The
function will not run if the output exists
:param samples: list with only one element containing all samples information
:returns: (dict) with the path of the output to be joined to summary
"""
warnings, similar = [], []
qsig = config_utils.get_program("qsignature", samples[0][0]["config"])
if not qsig:
return [[]]
res_qsig = config_utils.get_resources("qsignature", samples[0][0]["config"])
jvm_opts = " ".join(res_qsig.get("jvm_opts", ["-Xms750m", "-Xmx8g"]))
work_dir = samples[0][0]["dirs"]["work"]
count = 0
for data in samples:
data = data[0]
vcf = tz.get_in(["summary", "qc", "qsignature", "base"], data)
if vcf:
count += 1
vcf_name = dd.get_sample_name(data) + ".qsig.vcf"
out_dir = utils.safe_makedir(os.path.join(work_dir, "qsignature"))
if not os.path.lexists(os.path.join(out_dir, vcf_name)):
os.symlink(vcf, os.path.join(out_dir, vcf_name))
if count > 0:
qc_out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "qsignature"))
out_file = os.path.join(qc_out_dir, "qsignature.xml")
out_ma_file = os.path.join(qc_out_dir, "qsignature.ma")
out_warn_file = os.path.join(qc_out_dir, "qsignature.warnings")
log = os.path.join(work_dir, "qsignature", "qsig-summary.log")
if not os.path.exists(out_file):
with file_transaction(samples[0][0], out_file) as file_txt_out:
base_cmd = ("{qsig} {jvm_opts} "
"org.qcmg.sig.SignatureCompareRelatedSimple "
"-log {log} -dir {out_dir} "
"-o {file_txt_out} ")
do.run(base_cmd.format(**locals()), "qsignature score calculation")
error, warnings, similar = _parse_qsignature_output(out_file, out_ma_file,
out_warn_file, samples[0][0])
return [{'total samples': count,
'similar samples pairs': len(similar),
'warnings samples pairs': len(warnings),
'error samples': list(error),
'out_dir': qc_out_dir}]
else:
return []
def _parse_qsignature_output(in_file, out_file, warning_file, data):
""" Parse xml file produced by qsignature
:param in_file: (str) with the path to the xml file
:param out_file: (str) with the path to output file
:param warning_file: (str) with the path to warning file
:returns: (list) with samples that could be duplicated
"""
name = {}
error, warnings, similar = set(), set(), set()
same, replicate, related = 0, 0.1, 0.18
mixup_check = dd.get_mixup_check(data)
if mixup_check == "qsignature_full":
same, replicate, related = 0, 0.01, 0.061
with open(in_file, 'r') as in_handle:
with file_transaction(data, out_file) as out_tx_file:
with file_transaction(data, warning_file) as warn_tx_file:
with open(out_tx_file, 'w') as out_handle:
with open(warn_tx_file, 'w') as warn_handle:
et = lxml.etree.parse(in_handle)
for i in list(et.iter('file')):
name[i.attrib['id']] = os.path.basename(i.attrib['name']).replace(".qsig.vcf", "")
for i in list(et.iter('comparison')):
msg = None
pair = "-".join([name[i.attrib['file1']], name[i.attrib['file2']]])
out_handle.write("%s\t%s\t%s\n" %
(name[i.attrib['file1']], name[i.attrib['file2']], i.attrib['score']))
if float(i.attrib['score']) == same:
msg = 'qsignature ERROR: read same samples:%s\n'
error.add(pair)
elif float(i.attrib['score']) < replicate:
msg = 'qsignature WARNING: read similar/replicate samples:%s\n'
warnings.add(pair)
elif float(i.attrib['score']) < related:
msg = 'qsignature NOTE: read relative samples:%s\n'
similar.add(pair)
if msg:
logger.info(msg % pair)
warn_handle.write(msg % pair)
return error, warnings, similar
def _slice_bam_chr21(in_bam, data):
"""
return only one BAM file with only chromosome 21
"""
sambamba = config_utils.get_program("sambamba", data["config"])
out_file = "%s-chr%s" % os.path.splitext(in_bam)
if not utils.file_exists(out_file):
bam.index(in_bam, data['config'])
with pysam.Samfile(in_bam, "rb") as bamfile:
bam_contigs = [c["SN"] for c in bamfile.header["SQ"]]
chromosome = "21"
if "chr21" in bam_contigs:
chromosome = "chr21"
with file_transaction(data, out_file) as tx_out_file:
cmd = ("{sambamba} slice -o {tx_out_file} {in_bam} {chromosome}").format(**locals())
out = subprocess.check_output(cmd, shell=True)
return out_file
def _slice_vcf_chr21(vcf_file, out_dir):
"""
Slice chr21 of qsignature SNPs to reduce computation time
"""
tmp_file = os.path.join(out_dir, "chr21_qsignature.vcf")
if not utils.file_exists(tmp_file):
cmd = ("grep chr21 {vcf_file} > {tmp_file}").format(**locals())
out = subprocess.check_output(cmd, shell=True)
return tmp_file
|
brainstorm/bcbio-nextgen
|
bcbio/qc/qsignature.py
|
Python
|
mit
| 8,491
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Red Hat, inc
# Written by Seth Vidal
# based on the mount modules from salt and puppet
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: mount
short_description: Control active and configured mount points
description:
- This module controls active and configured mount points in C(/etc/fstab).
version_added: "0.6"
options:
name:
description:
- "path to the mount point, eg: C(/mnt/files)"
required: true
default: null
aliases: []
src:
description:
- device to be mounted on I(name).
required: true
default: null
fstype:
description:
- file-system type
required: true
default: null
opts:
description:
- mount options (see fstab(8))
required: false
default: null
dump:
description:
- dump (see fstab(8))
required: false
default: null
passno:
description:
- passno (see fstab(8))
required: false
default: null
state:
description:
- If C(mounted) or C(unmounted), the device will be actively mounted or unmounted
as needed and appropriately configured in I(fstab).
C(absent) and C(present) only deal with
I(fstab) but will not affect current mounting. If specifying C(mounted) and the mount
point is not present, the mount point will be created. Similarly, specifying C(absent) will remove the mount point directory.
required: true
choices: [ "present", "absent", "mounted", "unmounted" ]
default: null
fstab:
description:
- file to use instead of C(/etc/fstab). You shouldn't use that option
unless you really know what you are doing. This might be useful if
you need to configure mountpoints in a chroot environment.
required: false
default: /etc/fstab
notes: []
requirements: []
author: Seth Vidal
'''
EXAMPLES = '''
# Mount DVD read-only
- mount: name=/mnt/dvd src=/dev/sr0 fstype=iso9660 opts=ro state=present
# Mount up device by label
- mount: name=/srv/disk src='LABEL=SOME_LABEL' fstype=ext4 state=present
# Mount up device by UUID
- mount: name=/home src='UUID=b3e48f45-f933-4c8e-a700-22a159ec9077' fstype=xfs opts=noatime state=present
'''
def write_fstab(lines, dest):
fs_w = open(dest, 'w')
for l in lines:
fs_w.write(l)
fs_w.flush()
fs_w.close()
def set_mount(module, **kwargs):
""" set/change a mount point location in fstab """
# kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab
args = dict(
opts = 'defaults',
dump = '0',
passno = '0',
fstab = '/etc/fstab'
)
args.update(kwargs)
new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n'
to_write = []
exists = False
changed = False
for line in open(args['fstab'], 'r').readlines():
if not line.strip():
to_write.append(line)
continue
if line.strip().startswith('#'):
to_write.append(line)
continue
if len(line.split()) != 6:
# not sure what this is or why it is here
# but it is not our fault so leave it be
to_write.append(line)
continue
ld = {}
ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split()
if ld['name'] != args['name']:
to_write.append(line)
continue
# it exists - now see if what we have is different
exists = True
for t in ('src', 'fstype','opts', 'dump', 'passno'):
if ld[t] != args[t]:
changed = True
ld[t] = args[t]
if changed:
to_write.append(new_line % ld)
else:
to_write.append(line)
if not exists:
to_write.append(new_line % args)
changed = True
if changed and not module.check_mode:
write_fstab(to_write, args['fstab'])
return (args['name'], changed)
def unset_mount(module, **kwargs):
""" remove a mount point from fstab """
# kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab
args = dict(
opts = 'default',
dump = '0',
passno = '0',
fstab = '/etc/fstab'
)
args.update(kwargs)
to_write = []
changed = False
for line in open(args['fstab'], 'r').readlines():
if not line.strip():
to_write.append(line)
continue
if line.strip().startswith('#'):
to_write.append(line)
continue
if len(line.split()) != 6:
# not sure what this is or why it is here
# but it is not our fault so leave it be
to_write.append(line)
continue
ld = {}
ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split()
if ld['name'] != args['name']:
to_write.append(line)
continue
# if we got here we found a match - continue and mark changed
changed = True
if changed and not module.check_mode:
write_fstab(to_write, args['fstab'])
return (args['name'], changed)
def mount(module, **kwargs):
""" mount up a path or remount if needed """
mount_bin = module.get_bin_path('mount')
name = kwargs['name']
if os.path.ismount(name):
cmd = [ mount_bin , '-o', 'remount', name ]
else:
cmd = [ mount_bin, name ]
rc, out, err = module.run_command(cmd)
if rc == 0:
return 0, ''
else:
return rc, out+err
def umount(module, **kwargs):
""" unmount a path """
umount_bin = module.get_bin_path('umount')
name = kwargs['name']
cmd = [umount_bin, name]
rc, out, err = module.run_command(cmd)
if rc == 0:
return 0, ''
else:
return rc, out+err
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(required=True, choices=['present', 'absent', 'mounted', 'unmounted']),
name = dict(required=True),
opts = dict(default=None),
passno = dict(default=None),
dump = dict(default=None),
src = dict(required=True),
fstype = dict(required=True),
fstab = dict(default='/etc/fstab')
),
supports_check_mode=True
)
changed = False
rc = 0
args = {
'name': module.params['name'],
'src': module.params['src'],
'fstype': module.params['fstype']
}
if module.params['passno'] is not None:
args['passno'] = module.params['passno']
if module.params['opts'] is not None:
args['opts'] = module.params['opts']
if ' ' in args['opts']:
module.fail_json(msg="unexpected space in 'opts' parameter")
if module.params['dump'] is not None:
args['dump'] = module.params['dump']
if module.params['fstab'] is not None:
args['fstab'] = module.params['fstab']
# if fstab file does not exist, we first need to create it. This mainly
# happens when fstab optin is passed to the module.
if not os.path.exists(args['fstab']):
if not os.path.exists(os.path.dirname(args['fstab'])):
os.makedirs(os.path.dirname(args['fstab']))
open(args['fstab'],'a').close()
# absent == remove from fstab and unmounted
# unmounted == do not change fstab state, but unmount
# present == add to fstab, do not change mount state
# mounted == add to fstab if not there and make sure it is mounted, if it has changed in fstab then remount it
state = module.params['state']
name = module.params['name']
if state == 'absent':
name, changed = unset_mount(module, **args)
if changed and not module.check_mode:
if os.path.ismount(name):
res,msg = umount(module, **args)
if res:
module.fail_json(msg="Error unmounting %s: %s" % (name, msg))
if os.path.exists(name):
try:
os.rmdir(name)
except (OSError, IOError), e:
module.fail_json(msg="Error rmdir %s: %s" % (name, str(e)))
module.exit_json(changed=changed, **args)
if state == 'unmounted':
if os.path.ismount(name):
if not module.check_mode:
res,msg = umount(module, **args)
if res:
module.fail_json(msg="Error unmounting %s: %s" % (name, msg))
changed = True
module.exit_json(changed=changed, **args)
if state in ['mounted', 'present']:
if state == 'mounted':
if not os.path.exists(name) and not module.check_mode:
try:
os.makedirs(name)
except (OSError, IOError), e:
module.fail_json(msg="Error making dir %s: %s" % (name, str(e)))
name, changed = set_mount(module, **args)
if state == 'mounted':
res = 0
if os.path.ismount(name):
if changed and not module.check_mode:
res,msg = mount(module, **args)
else:
changed = True
if not module.check_mode:
res,msg = mount(module, **args)
if res:
module.fail_json(msg="Error mounting %s: %s" % (name, msg))
module.exit_json(changed=changed, **args)
module.fail_json(msg='Unexpected position reached')
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
main()
|
fti7/ansible-modules-core
|
system/mount.py
|
Python
|
gpl-3.0
| 10,314
|
import datetime
from django.conf import settings
from django.contrib.auth.models import User
from LabtrackerCore.Email import EmailSection
from Email import NewIssueEmail
import models as im
import forms
import utils
from django.core.exceptions import ObjectDoesNotExist
class IssueUpdater(object):
def __init__(self, request, issue):
self.request = request
self.issue_pk = issue.pk
self.valid = True
self.data = request.POST.copy()
self.issue = issue
self.updateForm = forms.UpdateIssueForm(self.data, instance=issue)
try:
if self.issue.assignee:
assigner = self.issue.assignee
else:
assigner = None
except:
assigner = None
if self.issue.resolved_state:
resolver= self.issue.resolved_state
else:
resolver = None
if self.issue.cc.all():
ccers = self.issue.cc.all()
else:
ccers = None
self.old = {
'cc': ccers,
'ptypes': self.issue.problem_type.all(),
'assignee': assigner,
'resolved': resolver,
}
# validate and bind form
if not self.updateForm.is_valid():
self.valid = False
else:
self.valid = True
self.commentForm = None
if self.data.has_key('comment'):
self.data.__setitem__('user', request.user.id)
self.data.__setitem__('issue', issue.pk)
self.commentForm = forms.AddCommentForm(self.data)
if not self.commentForm.is_valid():
self.valid = False
#else:
#self.commentForm = forms.AddCommentForm()
# deal with hooks
self.extraForm = None
if issue.it:
hook = utils.issueHooks.getHook("updateForm", issue.it.name)
#hook is valid for testChangeAssignee, empty for devsite
if hook:
self.extraForm = hook(issue, request)
if self.extraForm:
if not self.extraForm.is_valid():
#when called by test change Assignee, fails
self.valid = False
def getEmail(self):
"""
Create the email, and return it
"""
if not self.is_valid():
raise ValueError("Invalid update, cannot get email")
issue = im.Issue.objects.get(pk=self.issue_pk)
issue_email = NewIssueEmail(issue)
update_data = self.updateForm.cleaned_data
# need to add the CC users as well
if self.data.has_key('cc'):
issue_email.addCCSection(self.old['cc'],
update_data['cc'])
# need to add the CC users as well
if self.data.has_key('assignee') and \
(self.old['assignee'] !=update_data['assignee']):
issue_email.addAssigneeSection(str(self.old['assignee']),
str(update_data['assignee']))
if self.data.has_key('problem_type'):
issue_email.addProblemTypeSection(self.old['ptypes'],
self.data.getlist('problem_type'))
if self.data.has_key('resolved_state') and \
self.old['resolved'] != update_data['resolved_state']:
issue_email.addResolveStateSection(update_data['resolved_state'])
if self.data.has_key('comment'):
issue_email.addCommentSection(self.request.user,
self.commentForm.cleaned_data['comment'])
title = self.issue.title
try:
title = title.replace('@', '[at]')
except:
pass
issue_email.subject = "[" + settings.EMAIL_SUBJECT_PREFIX + "]" + ' Change to the Issue: %s' % (title)
if self.data.has_key('cc'):
for user in update_data['cc']:
issue_email.addCC(user.email)
return issue_email
def getUpdateActionString(self):
"""
Caller is in charge of calling updateHistory on the actionStrings
"""
if not self.is_valid():
raise ValueError("Invalid update, cannot get action string")
update_data = self.updateForm.cleaned_data
actionStrings = []
if not update_data.has_key('cc'):
data = None
else:
data = update_data['cc']
if str(self.old['cc']) != str(data):
old_cc_list = self.old['cc']
cc_list = data
cc_list1 = cc_list
if cc_list:
for cc in cc_list:
pkval = cc._get_pk_val
if not old_cc_list:
actionStrings.append("Added %s to the CC list" % (str(cc)))
else:
try:
old_cc_list.get(pk = pkval)
except ObjectDoesNotExist:
actionStrings.append("Added %s to the CC list" % (str(cc)))
if old_cc_list:
for old_cc in old_cc_list:
pkval = old_cc._get_pk_val
if not cc_list1:
actionStrings.append("Removed %s from the CC list" % (str(old_cc)))
else:
try:
cc_list1.get(pk = pkval)
except ObjectDoesNotExist:
actionStrings.append("Removed %s from the CC list" % (str(old_cc)))
if self.data.has_key('assignee')and \
(self.old['assignee'] != update_data['assignee']):
actionStrings.append("Assigned to %s" % (update_data['assignee']))
if self.data.has_key('problem_type') and (str(self.old['ptypes']) != str(update_data['problem_type'])):
old_problems = self.old['ptypes']
problems = update_data['problem_type']
problems1 = problems
if problems:
for problem in problems:
if not old_problems:
actionStrings.append("Added the problem type %s" % (str(problem)))
else:
pkval = problem._get_pk_val
try:
old_problems.get(pk = pkval)
except ObjectDoesNotExist:
actionStrings.append("Added the problem type %s" % (str(problem)))
if old_problems:
for old_problem in old_problems:
if not problems1:
actionStrings.append("Removed the problem type %s" % (str(old_problem)))
else:
pkval = old_problem._get_pk_val
try:
problems1.get(pk = pkval)
except ObjectDoesNotExist:
actionStrings.append("Removed the problem type %s" % (str(old_problem)))
if self.data.has_key('resolved_state') and \
self.old['resolved']!= update_data['resolved_state']:
actionStrings.append("Changed state to %s" % \
(update_data['resolved_state']))
return actionStrings
def save(self):
if self.commentForm:
if self.commentForm.is_valid():
self.commentForm.save()
self.updateForm.save()
if self.extraForm:
if self.extraForm.is_valid():
self.extraForm.save()
def is_valid(self):
return self.valid
|
abztrakt/labtracker
|
IssueTracker/issue.py
|
Python
|
apache-2.0
| 7,685
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''Script that raises ValueError'''
raise ValueError
|
apache/allura
|
Allura/allura/tests/tscript_error.py
|
Python
|
apache-2.0
| 923
|
# https://www.khronos.org/collada/
import os
import io
import re
import traceback
import copy
from collections import OrderedDict
import numpy as np
from PyEngine3D.Common import logger
from PyEngine3D.Utilities import *
def convert_float(data, default=0.0):
try:
return float(data)
except:
pass
return default
def convert_int(data, default=0):
try:
return int(data)
except:
pass
return default
def convert_list(data, data_type=float, stride=1):
if data:
data_list = [data_type(x) for x in data.strip().split()]
else:
return []
if stride < 2:
return data_list
else:
return [data_list[i * stride:i * stride + stride] for i in range(int(len(data_list) / stride))]
def parsing_source_data(xml_element):
"""
:param xml_element:
:return: {'source_id':source_data}
"""
sources = {}
for xml_source in xml_element.findall('source'):
source_id = get_xml_attrib(xml_source, 'id')
stride = get_xml_attrib(xml_source.find('technique_common/accessor'), 'stride')
stride = convert_int(stride, 0)
source_data = None
for tag, data_type in [('float_array', float), ('Name_array', str)]:
xml_array = xml_source.find(tag)
if xml_array is not None:
source_text = get_xml_text(xml_array)
if source_text:
source_data = convert_list(source_text, data_type, stride)
break
sources[source_id] = source_data
return sources
def parsing_sematic(xml_element):
"""
:param xml_element:
:return: {'semantic':{'source', 'offset', 'set'}
"""
semantics = {}
for xml_semantic in xml_element.findall('input'):
set_number = get_xml_attrib(xml_semantic, 'set', '0')
semantic = get_xml_attrib(xml_semantic, 'semantic')
if set_number != '' and set_number != '0':
semantic += set_number # ex) VERTEX0, TEXCOORD0
source = get_xml_attrib(xml_semantic, 'source')
if source.startswith("#"):
source = source[1:]
offset = convert_int(get_xml_attrib(xml_semantic, 'offset'), 0)
semantics[semantic] = dict(source=source, offset=offset, set=set_number)
return semantics
class ColladaNode:
"""
Parsing Visual Scene Node
"""
def __init__(self, xml_node, parent=None, depth=0):
self.valid = False
self.name = get_xml_attrib(xml_node, 'name').replace('.', '_')
self.id = get_xml_attrib(xml_node, 'id').replace('.', '_')
self.type = get_xml_attrib(xml_node, 'type')
self.matrix = Matrix4()
self.parent = parent
self.children = []
self.instance_controller = get_xml_attrib(xml_node.find('instance_controller'), 'url')
if self.instance_controller.startswith('#'):
self.instance_controller = self.instance_controller[1:]
self.instance_geometry = get_xml_attrib(xml_node.find('instance_geometry'), 'url')
if self.instance_geometry.startswith('#'):
self.instance_geometry = self.instance_geometry[1:]
self.parsing_matrix(xml_node)
for xml_child_node in xml_node.findall('node'):
child = ColladaNode(xml_child_node, self, depth + 1)
self.children.append(child)
def parsing_matrix(self, xml_node):
xml_matrix = xml_node.find('matrix')
if xml_matrix is not None:
# transform matrix
matrix = get_xml_text(xml_matrix)
matrix = [eval(x) for x in matrix.split()]
if len(matrix) == 16:
self.matrix = np.array(matrix, dtype=np.float32).reshape(4, 4)
else:
# location, rotation, scale
xml_translate = xml_node.find('translate')
if xml_translate is not None:
translation = [eval(x) for x in get_xml_text(xml_translate).split()]
if len(translation) == 3:
matrix_translate(self.matrix, *translation)
else:
logger.error('%s node has a invalid translate.' % self.name)
xml_rotates = xml_node.findall('rotate')
for xml_rotate in xml_rotates:
rotation = [eval(x) for x in get_xml_text(xml_rotate).split()]
if len(rotation) == 4:
axis = get_xml_attrib(xml_rotate, 'sid')
if axis == 'rotationX':
matrix_rotate_x(self.matrix, rotation[3])
elif axis == 'rotationY':
matrix_rotate_y(self.matrix, rotation[3])
elif axis == 'rotationZ':
matrix_rotate_z(self.matrix, rotation[3])
else:
logger.error('%s node has a invalid rotate.' % self.name)
xml_scale = xml_node.find('scale')
if xml_scale is not None:
scale = [eval(x) for x in get_xml_text(xml_scale).split()]
if len(scale) == 3:
matrix_scale(self.matrix, *scale)
else:
logger.error('%s node has a invalid scale.' % self.name)
class ColladaContoller:
def __init__(self, xml_controller):
self.valid = False
self.name = get_xml_attrib(xml_controller, 'name').replace('.', '_')
self.id = get_xml_attrib(xml_controller, 'id').replace('.', '_')
self.skin_source = ""
self.bind_shape_matrix = Matrix4()
self.bone_names = []
self.bone_indicies = []
self.bone_weights = []
self.inv_bind_matrices = []
self.parsing(xml_controller)
def parsing(self, xml_controller):
xml_skin = xml_controller.find('skin')
if xml_skin is not None:
self.skin_source = get_xml_attrib(xml_skin, 'source', "")
if self.skin_source and self.skin_source.startswith('#'):
self.skin_source = self.skin_source[1:]
# parsing bind_shape_matrix
bind_shape_matrix = get_xml_text(xml_skin.find('bind_shape_matrix'), None)
if bind_shape_matrix:
self.bind_shape_matrix = np.array(convert_list(bind_shape_matrix), dtype=np.float32).reshape(4, 4)
else:
self.bind_shape_matrix = Matrix4()
# parse sources
sources = parsing_source_data(xml_skin)
# get vertex position source id
xml_joints = xml_skin.find('joints')
joins_semantics = {}
if xml_joints is not None:
joins_semantics = parsing_sematic(xml_joints)
# parse vertex weights
xml_vertex_weights = xml_skin.find('vertex_weights')
if xml_vertex_weights is not None:
# parse semantic
weights_semantics = parsing_sematic(xml_vertex_weights)
# parse vertex weights
vcount_text = get_xml_text(xml_vertex_weights.find('vcount'))
v_text = get_xml_text(xml_vertex_weights.find('v'))
vcount_list = convert_list(vcount_text, int)
v_list = convert_list(v_text, int)
# make geomtry data
self.build(sources, joins_semantics, weights_semantics, vcount_list, v_list)
return # done
def build(self, sources, joins_semantics, weights_semantics, vcount_list, v_list):
semantic_stride = len(weights_semantics)
# build weights and indicies
max_bone = 4 # max influence bone count per vertex
weight_source_id = weights_semantics['WEIGHT']['source']
weight_sources = sources[weight_source_id]
index = 0
for vcount in vcount_list:
bone_indicies = []
bone_weights = []
indicies = v_list[index: index + vcount * semantic_stride]
index += vcount * semantic_stride
for v in range(max_bone):
if 'JOINT' in weights_semantics:
offset = weights_semantics['JOINT']['offset']
if v < vcount:
bone_indicies.append(indicies[offset + v * semantic_stride])
else:
bone_indicies.append(0)
if 'WEIGHT' in weights_semantics:
offset = weights_semantics['WEIGHT']['offset']
if v < vcount:
bone_weights.append(weight_sources[indicies[offset + v * semantic_stride]])
else:
bone_weights.append(0.0)
self.bone_indicies.append(bone_indicies)
self.bone_weights.append(bone_weights)
# joints
if 'JOINT' in joins_semantics:
joints_source = joins_semantics['JOINT'].get('source', '')
self.bone_names = sources.get(joints_source, [])
# INV_BIND_MATRIX
if 'INV_BIND_MATRIX' in joins_semantics:
inv_bind_matrix_source = joins_semantics['INV_BIND_MATRIX'].get('source', '')
self.inv_bind_matrices = sources.get(inv_bind_matrix_source, [])
self.inv_bind_matrices = [np.array(inv_bind_matrix, dtype=np.float32).reshape(4, 4) for inv_bind_matrix in self.inv_bind_matrices]
self.valid = True
class ColladaAnimation:
def __init__(self, xml_animation, node_name_map):
self.valid = False
self.id = get_xml_attrib(xml_animation, 'id').replace('.', '_')
self.target = "" # target bone name
self.type = "" # transform(Matrix), location.X ... scale.z
self.inputs = []
self.outputs = []
self.interpolations = []
self.in_tangents = []
self.out_tangents = []
self.parsing(xml_animation, node_name_map)
def parsing(self, xml_animation, node_name_map):
sources = parsing_source_data(xml_animation)
joins_semantics = {}
xml_sampler = xml_animation.find('sampler')
if xml_sampler is not None:
joins_semantics = parsing_sematic(xml_sampler)
xml_channel = xml_animation.find('channel')
target = get_xml_attrib(xml_channel, 'target')
if '/' in target:
self.target, self.type = target.split('/', 1)
self.target = node_name_map.get(self.target, self.target)
if 'INPUT' in joins_semantics:
source_name = joins_semantics['INPUT'].get('source', '')
self.inputs = sources.get(source_name, [])
if 'OUTPUT' in joins_semantics:
source_name = joins_semantics['OUTPUT'].get('source', '')
self.outputs = sources.get(source_name, [])
if 'INTERPOLATION' in joins_semantics:
source_name = joins_semantics['INTERPOLATION'].get('source', '')
self.interpolations = sources.get(source_name, [])
if 'IN_TANGENT' in joins_semantics:
source_name = joins_semantics['IN_TANGENT'].get('source', '')
self.in_tangents = sources.get(source_name, [])
if 'OUT_TANGENT' in joins_semantics:
source_name = joins_semantics['OUT_TANGENT'].get('source', '')
self.out_tangents = sources.get(source_name, [])
if self.type == "" or self.target == "" or self.target is None or 0 == len(self.inputs):
self.valid = False
logger.error('%s has a invalid animation.\n%s' % (self.target, sources))
else:
self.valid = True
# print()
# for key in self.__dict__:
# print(key, self.__dict__[key])
class ColladaGeometry:
def __init__(self, xml_geometry, controllers, nodes):
self.valid = False
self.name = get_xml_attrib(xml_geometry, 'name').replace('.', '_')
self.id = get_xml_attrib(xml_geometry, 'id').replace('.', '_')
self.positions = []
self.bone_indicies = []
self.bone_weights = []
self.normals = []
self.colors = []
self.texcoords = []
self.indices = []
# find matched controller
self.controller = None
for controller in controllers:
if self.id == controller.skin_source:
self.controller = controller
break
# find matrix
self.bind_shape_matrix = Matrix4()
for node in nodes:
if self.name == node.name:
self.bind_shape_matrix = node.matrix
break
if self.controller:
# precompute bind_shape_matrix as coulmn-major matrix calculation.
self.bind_shape_matrix = np.dot(controller.bind_shape_matrix, self.bind_shape_matrix)
self.parsing(xml_geometry)
def parsing(self, xml_geometry):
xml_mesh = xml_geometry.find('mesh')
if xml_mesh is not None:
# parse sources
sources = parsing_source_data(xml_mesh)
# get vertex position source id
position_source_id = ""
for xml_position in xml_mesh.findall('vertices/input'):
if get_xml_attrib(xml_position, 'semantic') == 'POSITION':
position_source_id = get_xml_attrib(xml_position, 'source')
if position_source_id.startswith("#"):
position_source_id = position_source_id[1:]
break
# parse polygons
for tag in ('polygons', 'polylist', 'triangles'):
xml_polygons = xml_mesh.find(tag)
if xml_polygons is not None:
# parse semantic
semantics = parsing_sematic(xml_polygons)
semantic_stride = len(semantics)
# parse polygon indices
vertex_index_list = [] # flatten vertex list as triangle
if tag == 'triangles':
vertex_index_list = get_xml_text(xml_polygons.find('p'))
vertex_index_list = convert_list(vertex_index_list, int)
elif tag == 'polylist' or tag == 'polygons':
vcount_list = []
polygon_index_list = []
if tag == 'polylist':
vcount_list = convert_list(get_xml_text(xml_polygons.find('vcount')), int)
# flatten list
polygon_index_list = convert_list(get_xml_text(xml_polygons.find('p')), int)
elif tag == 'polygons':
for xml_p in xml_polygons.findall('p'):
polygon_indices = convert_list(get_xml_text(xml_p), int)
# flatten list
polygon_index_list += polygon_indices
vcount_list.append(int(len(polygon_indices) / semantic_stride))
# triangulate
elapsed_vindex = 0
for vcount in vcount_list:
if vcount == 3:
vertex_index_list += polygon_index_list[
elapsed_vindex: elapsed_vindex + vcount * semantic_stride]
else:
polygon_indices = polygon_index_list[
elapsed_vindex: elapsed_vindex + vcount * semantic_stride]
vertex_index_list += convert_triangulate(polygon_indices, vcount, semantic_stride)
elapsed_vindex += vcount * semantic_stride
# make geomtry data
self.build(sources, position_source_id, semantics, semantic_stride, vertex_index_list)
return # done
def build(self, sources, position_source_id, semantics, semantic_stride, vertex_index_list):
# check vertex count with bone weight count
if self.controller:
vertex_count = len(sources[position_source_id]) if position_source_id else 0
bone_weight_count = len(self.controller.bone_indicies)
if vertex_count != bone_weight_count:
logger.error(
"Different count. vertex_count : %d, bone_weight_count : %d" % (vertex_count, bone_weight_count))
return
indexMap = {}
for i in range(int(len(vertex_index_list) / semantic_stride)):
vertIndices = tuple(vertex_index_list[i * semantic_stride: i * semantic_stride + semantic_stride])
if vertIndices in indexMap:
self.indices.append(indexMap[vertIndices])
else:
self.indices.append(len(indexMap))
indexMap[vertIndices] = len(indexMap)
if 'VERTEX' in semantics:
source_id = position_source_id
offset = semantics['VERTEX']['offset']
posisiton = sources[source_id][vertIndices[offset]]
self.positions.append(posisiton)
if self.controller:
self.bone_indicies.append(self.controller.bone_indicies[vertIndices[offset]])
self.bone_weights.append(self.controller.bone_weights[vertIndices[offset]])
if 'NORMAL' in semantics:
source_id = semantics['NORMAL']['source']
offset = semantics['NORMAL']['offset']
normal = sources[source_id][vertIndices[offset]]
self.normals.append(normal)
if 'COLOR' in semantics:
source_id = semantics['COLOR']['source']
offset = semantics['COLOR']['offset']
self.colors.append(sources[source_id][vertIndices[offset]])
if 'TEXCOORD' in semantics:
source_id = semantics['TEXCOORD']['source']
offset = semantics['TEXCOORD']['offset']
self.texcoords.append(sources[source_id][vertIndices[offset]])
self.valid = True
class Collada:
def __init__(self, filepath):
try:
xml_root = load_xml(filepath)
except:
logger.error(traceback.format_exc())
return
self.name = os.path.splitext(os.path.split(filepath)[1])[0]
self.collada_version = get_xml_attrib(xml_root, 'version')
self.author = get_xml_text(xml_root.find("asset/contributor/author"))
self.authoring_tool = get_xml_text(xml_root.find("asset/contributor/authoring_tool"))
self.created = get_xml_text(xml_root.find("asset/created"))
self.modified = get_xml_text(xml_root.find("asset/modified"))
self.unit_name = get_xml_attrib(xml_root.find("asset/unit"), 'name', 'meter')
self.unit_meter = convert_float(get_xml_attrib(xml_root.find("asset/unit"), 'meter'))
self.up_axis = get_xml_text(xml_root.find("asset/up_axis"))
self.nodes = []
self.node_name_map = {} # { target: name }
self.geometries = []
self.controllers = []
self.animations = []
for xml_node in xml_root.findall('library_visual_scenes/visual_scene/node'):
# recursive hierachy nodes
node = ColladaNode(xml_node)
self.nodes.append(node)
def gather_node_name_map(nodes, node_name_map):
for node in nodes:
node_name_map[node.id] = node.name
gather_node_name_map(node.children, node_name_map)
gather_node_name_map(self.nodes, self.node_name_map)
for xml_controller in xml_root.findall('library_controllers/controller'):
controller = ColladaContoller(xml_controller)
self.controllers.append(controller)
xml_animations = xml_root.findall('library_animations/animation')
if 0 < len(xml_animations):
temp = xml_animations[0].findall('animation')
if 0 < len(temp):
xml_animations = temp
for xml_animation in xml_animations:
animation = ColladaAnimation(xml_animation, self.node_name_map)
if animation.valid:
self.animations.append(animation)
for xml_geometry in xml_root.findall('library_geometries/geometry'):
geometry = ColladaGeometry(xml_geometry, self.controllers, self.nodes)
self.geometries.append(geometry)
def get_mesh_data(self):
geometry_datas = self.get_geometry_data()
skeleton_datas = self.get_skeleton_data()
animation_datas = self.get_animation_data(skeleton_datas)
mesh_data = dict(
geometry_datas=geometry_datas,
skeleton_datas=skeleton_datas,
animation_datas=animation_datas
)
return mesh_data
def get_skeleton_data(self):
skeleton_datas = []
check_duplicated = []
for controller in self.controllers:
if controller.name not in check_duplicated:
check_duplicated.append(controller.name)
hierachy = {}
root_node = None
# find root amature
for node in self.nodes:
if node.name == controller.name:
root_node = node
break
def build_hierachy(parent_node, hierachy_tree):
for child in parent_node.children:
if child.name in controller.bone_names:
hierachy_tree[child.name] = dict()
build_hierachy(child, hierachy_tree[child.name])
if root_node:
# recursive build hierachy of bones
build_hierachy(root_node, hierachy)
inv_bind_matrices = [swap_up_axis_matrix(matrix, True, True, self.up_axis) for matrix in controller.inv_bind_matrices]
skeleton_data = dict(
name=controller.name,
hierachy=hierachy, # bone names map as hierachy
bone_names=controller.bone_names, # bone name list ordered by index
inv_bind_matrices=inv_bind_matrices # inverse matrix of bone
)
skeleton_datas.append(skeleton_data)
return skeleton_datas
def get_animation_data(self, skeleton_datas):
precompute_parent_matrix = True
precompute_inv_bind_matrix = True
def get_empty_animation_node_data(animation_node_name, bone_name):
return dict(
name=animation_node_name,
target=bone_name
)
def get_animation_node_data(animation_node_name, animation_node):
return dict(
name=animation_node_name,
precompute_parent_matrix=precompute_parent_matrix,
precompute_inv_bind_matrix=precompute_inv_bind_matrix,
target=animation_node.target,
times=animation_node.inputs,
# transforms=[matrix for matrix in transforms],
locations=[extract_location(np.array(matrix, dtype=np.float32).reshape(4, 4)) for matrix in animation_node.outputs],
rotations=[extract_quaternion(np.array(matrix, dtype=np.float32).reshape(4, 4)) for matrix in animation_node.outputs],
scales=[np.array([1.0, 1.0, 1.0], dtype=np.float32) for matrix in animation_node.outputs],
interpoations=animation_node.interpolations,
in_tangents=animation_node.in_tangents,
out_tangents=animation_node.out_tangents
)
def precompute_animation(children_hierachy, bone_names, inv_bind_matrices, parent_matrix, frame=0):
for child in children_hierachy:
for child_anim in self.animations:
if child_anim.target == child:
# just Transpose child bones, no swap y-z.
child_transform = np.array(child_anim.outputs[frame], dtype=np.float32).reshape(4, 4).T
if precompute_parent_matrix:
child_transform = np.dot(child_transform, parent_matrix)
if precompute_inv_bind_matrix:
child_bone_index = bone_names.index(child_anim.target)
child_inv_bind_matrix = inv_bind_matrices[child_bone_index]
child_anim.outputs[frame] = np.dot(child_inv_bind_matrix, child_transform)
else:
child_anim.outputs[frame] = child_transform
# recursive precompute animation
precompute_animation(children_hierachy[child_anim.target], bone_names, inv_bind_matrices, child_transform, frame)
break
# precompute_animation
animation_datas = []
for skeleton_data in skeleton_datas:
hierachy = skeleton_data['hierachy'] # tree data
bone_names = skeleton_data['bone_names']
inv_bind_matrices = skeleton_data['inv_bind_matrices']
for animation in self.animations:
# Currently, parsing only Transform Matrix. Future will parsing from location, rotation, scale.
if animation.type != 'transform':
continue
# Find root bone and skeleton data
if animation.target in hierachy:
# precompute all animation frames
for frame, transform in enumerate(animation.outputs):
# only root bone adjust convert_matrix for swap Y-Z Axis
transform = swap_up_axis_matrix(np.array(transform, dtype=np.float32).reshape(4, 4), True, False, self.up_axis)
if precompute_inv_bind_matrix:
bone_index = bone_names.index(animation.target)
inv_bind_matrix = inv_bind_matrices[bone_index]
animation.outputs[frame] = np.dot(inv_bind_matrix, transform)
else:
animation.outputs[frame] = transform
# recursive precompute animation
precompute_animation(hierachy[animation.target], bone_names, inv_bind_matrices, transform, frame)
# generate animation data
animation_data = [] # bone animation data list order by bone index
animation_datas.append(animation_data)
for bone_name in bone_names:
for animation in self.animations:
if animation.target == bone_name:
animation_node_name = "%s_%s_%s" % (self.name, skeleton_data['name'], bone_name)
animation_data.append(get_animation_node_data(animation_node_name, animation))
break
else:
logger.warn('not found %s animation datas' % bone_name)
animation_node_name = "%s_%s_%s" % (self.name, skeleton_data['name'], bone_name)
animation_data.append(get_empty_animation_node_data(animation_node_name, bone_name))
return animation_datas
def get_geometry_data(self):
geometry_datas = []
for geometry in self.geometries:
skeleton_name = ""
bone_indicies = []
bone_weights = []
if geometry.controller:
skeleton_name = geometry.controller.name
bone_indicies = copy.deepcopy(geometry.bone_indicies)
bone_weights = copy.deepcopy(geometry.bone_weights)
# swap y and z
geometry.bind_shape_matrix = swap_up_axis_matrix(geometry.bind_shape_matrix, True, False, self.up_axis)
# precompute bind_shape_matrix
bound_min = Float3(FLOAT32_MAX, FLOAT32_MAX, FLOAT32_MAX)
bound_max = Float3(FLOAT32_MIN, FLOAT32_MIN, FLOAT32_MIN)
for i, position in enumerate(geometry.positions):
geometry.positions[i] = np.dot([position[0], position[1], position[2], 1.0], geometry.bind_shape_matrix)[:3]
position = geometry.positions[i]
for j in range(3):
if bound_min[j] > position[j]:
bound_min[j] = position[j]
if bound_max[j] < position[j]:
bound_max[j] = position[j]
for i, normal in enumerate(geometry.normals):
geometry.normals[i] = np.dot([normal[0], normal[1], normal[2], 0.0], geometry.bind_shape_matrix)[:3]
geometry.normals[i] = normalize(geometry.normals[i])
geometry_data = dict(
name=geometry.name,
positions=copy.deepcopy(geometry.positions),
normals=copy.deepcopy(geometry.normals),
colors=copy.deepcopy(geometry.colors),
texcoords=copy.deepcopy(geometry.texcoords),
indices=copy.deepcopy(geometry.indices),
skeleton_name=skeleton_name,
bone_indicies=copy.deepcopy(bone_indicies),
bone_weights=copy.deepcopy(bone_weights),
bound_min=copy.deepcopy(bound_min),
bound_max=copy.deepcopy(bound_max),
radius=length(bound_max - bound_min)
)
geometry_datas.append(geometry_data)
return geometry_datas
if __name__ == '__main__':
mesh = Collada(os.path.join('..', 'Resource', 'Externals', 'Meshes', 'skeleton1.dae'))
mesh.get_mesh_data()
|
ubuntunux/PyEngine3D
|
PyEngine3D/ResourceManager/ColladaLoader.py
|
Python
|
bsd-2-clause
| 29,857
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='dyneav',
version='1.0',
description='A little EAV package that lets you create dynamic classes using EAV to store attributes.',
author='Jason Goodell',
author_email='jgoodell77@gmail.com',
packages=['dyneav'],
)
|
jgoodell/dyneav
|
setup.py
|
Python
|
mit
| 317
|
from flask.ext.wtf import Form
from wtforms.fields import StringField, PasswordField, SubmitField
from wtforms.fields.html5 import EmailField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import InputRequired, DataRequired, Length, Email, EqualTo
from wtforms import ValidationError
from ..models import User, Role, Tag
from .. import db
class ChangeUserEmailForm(Form):
email = EmailField('New email', validators=[
InputRequired(),
Length(1, 64),
Email()
])
submit = SubmitField('Update email')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
class InviteUserForm(Form):
role = QuerySelectField('Account type',
validators=[InputRequired()],
get_label='name',
query_factory=lambda: db.session.query(Role).
order_by('permissions'))
email = EmailField('Email', validators=[InputRequired(), Length(1, 64),
Email()])
submit = SubmitField('Invite')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
class NewUserForm(InviteUserForm):
first_name = StringField('First name', validators=[InputRequired(),
Length(1, 64)])
last_name = StringField('Last name', validators=[InputRequired(),
Length(1, 64)])
company_name = StringField('Company name', validators=[InputRequired(),
Length(1, 64)])
password = PasswordField('Password', validators=[
InputRequired(), EqualTo('password2',
'Passwords must match.')
])
password2 = PasswordField('Confirm password', validators=[InputRequired()])
submit = SubmitField('Create')
"""
class NewCategoryForm(Form):
category_name = StringField('Category', validators=[InputRequired(),
Length(1, 64)])
unit = StringField('Unit', validators=[InputRequired(),
Length(1, 32)])
submit = SubmitField('Add category')
"""
class AdminCreateTagForm(Form):
tag_name = StringField('Tag Name',
validators=[InputRequired(), Length(1, 1000)])
submit = SubmitField('Create New Tag')
class AdminAddTagToVendorForm(Form):
tag_name = QuerySelectField('Tag',
validators=[DataRequired()],
get_label='tag_name',
query_factory=lambda: db.session.query(Tag).order_by('id'))
submit = SubmitField('Assign this Tag to Vendor')
class AdminCreateItemTagForm(Form):
item_tag_name = StringField('Tag Name',
validators=[InputRequired(), Length(1, 1000)])
tag_color = StringField('Tag Color',
validators=[InputRequired(), Length(1,1000)])
submit = SubmitField('Create New Item Tag')
|
hack4impact/reading-terminal-market
|
app/admin/forms.py
|
Python
|
mit
| 3,316
|
#cnn file for tf tutorial
#Lukas Odrzywolski
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#imports
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
#application of logic added here
def cnn_model_fn(feature, labels, mode):
"""Model function for CNN."""
#input layer
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
#convloutional laqyer #1
conv1 = tf.layer.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
#pooling layer #1
pool1 = tf.layer.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
#convolutional layer #2 and pooling layer #2
conv2 = tf.layer.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.ax_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
#dense layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator. ModeKeys.TRAIN)
#logits layer
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
#generate prediction (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
#add 'softmax_tensor' to the graph. it is used for PREDICT and by the 'logging_hook'.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
#calculate loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
#configure the training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
#add evaluation metric (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimaorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(usused_argv):
#load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnent_model")
#set up logging for predictions
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_inter=50)
#train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True)
mnist_classifier.train(
input_fn=train_input_fn,
steps=20000,
hooks=[logging_hook])
#evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
|
odrzywolski-lukas/odrzywolskiSprintScripts
|
python/cnn_mnist.py
|
Python
|
mit
| 3,457
|
# -*- encoding: utf-8 -*-
'''
This module is used to test import scm data module: iris/etl/scm.py
'''
import unittest
from django.contrib.auth.models import User
from iris.core.models import Domain
from iris.core.models import DomainRole
from iris.etl.scm import from_string, ROLES
#pylint: skip-file
class DomainTest(unittest.TestCase):
def tearDown(self):
Domain.objects.all().delete()
def test_add_one_domain(self):
from_string("D: System")
assert Domain.objects.get(name='System')
def test_domain_name_include_colon(self):
from_string("D: System:Test")
assert Domain.objects.get(name='System:Test')
def test_add_domain_dont_delete_others(self):
from_string("D: Another")
from_string('''
D: Another
D: System
''')
assert Domain.objects.get(name="Another")
def test_add_two_domains(self):
from_string('''
D: Another
D: System
''')
self.assertEqual(
[('Another',), ('System',), ('Uncategorized',)],
list(Domain.objects.all().order_by('name').values_list('name'))
)
def test_delete_two_domains(self):
from_string('''
D: Another
D: System
D: App Framework
''')
from_string('D: App Framework')
self.assertEqual(
[('App Framework',)],
list(Domain.objects.exclude(name='Uncategorized').values_list('name'))
)
def test_delete_all_domains(self):
from_string('''
D: Another
D: System
D: App Framework
''')
from_string('')
self.assertEqual(
[('Uncategorized',)],
list(Domain.objects.all().values_list('name'))
)
class TestDomainRole(unittest.TestCase):
def tearDown(self):
DomainRole.objects.all().delete()
Domain.objects.all().delete()
User.objects.all().delete()
def test_adding_domain_maintainer(self):
from_string('''
D: System
M: Mike <mike@i.com>
''')
self.assertEquals(
[('mike@i.com',)],
list(DomainRole.objects.get(
domain__name='System',
role='MAINTAINER').user_set.all(
).values_list('email'))
)
def test_adding_two_domain_reviewers(self):
from_string('''
D: System
R: Mike <mike@i.com>
R: Lucy David <lucy.david@inher.com>
''')
self.assertEqual(
[(u'Lucy',), (u'Mike',)],
list(DomainRole.objects.get(
domain__name='System',
role='REVIEWER').user_set.all(
).order_by('first_name').values_list('first_name'))
)
def test_delete_integrators(self):
from_string('''
D: System
I: Mike <mike@i.com>
I: Lucy David <lucy.david@inher.com>
I: <lily.edurd@inher.com>
''')
from_string('''
D: System
I: Lucy David <lucy.david@inher.com>
I: <lily.edurd@inher.com>
''')
self.assertEqual(
[('lily.edurd@inher.com',), ('lucy.david@inher.com',)],
list(DomainRole.objects.get(
domain__name='System', role="INTEGRATOR").user_set.all(
).order_by('email').values_list('email'))
)
def test_delete_all_roles(self):
from_string('''
D: System
R: Mike <mike@i.com>
A: Lucy David <lucy.david@inher.com>
I: <lily.edurd@inher.com>
M: <tom.edurd@inher.com>
''')
from_string('''
D: System
''')
for role in ROLES:
self.assertEqual(
[],
[r.role for r in DomainRole.objects.filter(
domain__name='System', role=role)])
def test_update_architectures(self):
from_string('''
D: System
A: Mike <mike@i.com>
''')
self.assertEqual(
['mike@i.com'],
[u.email for u in User.objects.all()])
from_string('''
D: System
A: Mike Chung <mike@i.com>
''')
self.assertEqual(
[u'Chung'],
[i.last_name for i in DomainRole.objects.get(
domain__name='System', role="ARCHITECT").user_set.all()])
self.assertEqual(
['mike@i.com'],
[u.email for u in User.objects.all()])
def test_add_same_user_in_different_domain(self):
from_string('''
D: System
A: Mike <mike@i.com>
D: Appframework
M: Mike <mike@i.com>
''')
self.assertEqual(
['mike@i.com'],
[i.email for i in DomainRole.objects.get(
domain__name='System', role="ARCHITECT").user_set.all()])
self.assertEqual(
['mike@i.com'],
[i.email for i in DomainRole.objects.get(
domain__name='Appframework', role="MAINTAINER").user_set.all()])
self.assertEqual(
['mike@i.com'],
[u.email for u in User.objects.all()])
def test_roles_transform(self):
from_string('''
D: System
A: Mike <mike@i.com>
M: Lily David <lily.david@hello.com>
R: Tom Frédéric <tom.adwel@hello.com>
I: <lucy.chung@wel.com>
''')
from_string('''
D: System
M: Mike <mike@i.com>
R: Lily David <lily.david@hello.com>
A: <lucy.chung@wel.com>
I: Tom Frédéric <tom.adwel@hello.com>
''')
self.assertEqual(
['lucy.chung@wel.com'],
[i.email for i in DomainRole.objects.get(
domain__name='System', role="ARCHITECT").user_set.all()])
self.assertEqual(
['lily.david@hello.com'],
[i.email for i in DomainRole.objects.get(
domain__name='System', role="REVIEWER").user_set.all()])
self.assertEqual(
['mike@i.com'],
[i.email for i in DomainRole.objects.get(
domain__name='System', role="MAINTAINER").user_set.all()])
self.assertEqual(
[u'Frédéric'],
[i.last_name for i in DomainRole.objects.get(
domain__name='System', role="INTEGRATOR").user_set.all()])
self.assertEqual(
['lily.david@hello.com',
'lucy.chung@wel.com',
'mike@i.com',
'tom.adwel@hello.com'],
[u.email for u in User.objects.all().order_by('email')])
|
117111302/iris
|
iris/etl/tests/test_import_scm_domain.py
|
Python
|
gpl-2.0
| 6,724
|
from collections import Counter
import numpy as np
def run(word_gen, index, window_size, out_file):
context = []
pair_counts = Counter()
for word in word_gen:
context.append(index[word])
if len(context) > window_size * 2 + 1:
context.pop(0)
pair_counts = _process_context(context, pair_counts, window_size)
import pyximport
pyximport.install(setup_args={"include_dirs": np.get_include()})
from representations import sparse_io
sparse_io.export_mat_from_dict(pair_counts, out_file)
def _process_context(context, pair_counts, window_size):
if len(context) < window_size + 1:
return pair_counts
target = context[window_size]
indices = range(0, window_size)
indices.extend(range(window_size + 1, 2 * window_size + 1))
for i in indices:
if i >= len(context):
break
pair_counts[(target, context[i])] += 1
return pair_counts
|
williamleif/histwords
|
representations/cooccurgen.py
|
Python
|
apache-2.0
| 944
|
#! /usr/bin/env python
import numpy as np
import scipy as cp
import pandas as pd
import sklearn as sk
import tensorflow as tf
print('Hello World!')
print('start Python and MachineLearning!')
# edit in home
|
jiangzhiwen/pytest
|
pytest.py
|
Python
|
apache-2.0
| 211
|
# -*- coding: utf-8 -*-
class AnalyticBeastObject(object):
"""
main beast analytics class
"""
def __init__(self, name, beastObject):
self.name = name
self.beastObject = beastObject
self.type = beastObject.beast.__class__.__name__
#OUTPUT variables
#general Data
self.energyAtGameEnd = 0
self.diedAtRound = 0 #done
self.rainedFoodOnBeast = 0 #done
#fight Data
self.fightsWon = 0 #done
self.energyGainedThroughFights = 0 #done
self.energyBeforeDeath = 0 #done
self.avgEnergyGainedPerFight = 0
#food Data
self.foodConsumed = 0 #done
self.avgEnergyPerMoveWithFights = 0 #with movingcosts
self.avgEnergyPerMoveWithoutFights = 0 #with movingcosts
self.roundsWithoutEnergyGain = 0 #done
self.energyGainedThisRound = False #done
self.madeMoveThisRound = False #done
#MOVING Data
self.moves = 0 #done
self.moveCosts = 0 #done
self.hides = 0 #done
self.stays = 0 #done
self.sprintsHorizontal = 0 #done
self.sprintsVertical = 0 #done
self.sprintsDiagonal = 0 #done
self.horizontal = 0 #done
self.vertical = 0 #done
self.diagonal = 0 #done
self.avgMovingCost = 0
self.sprints = 0 #done
self.notAllowedMoves = 0 #done
|
eyeswideopen/beastArena
|
AnalyticBeastObject.py
|
Python
|
gpl-3.0
| 1,438
|
"""Lower level API, configuration, and HTTP stuff."""
import six
import time
from ubersmith.compat import total_ordering, file_type
import requests
from ubersmith.exceptions import (
RequestError,
ResponseError,
UpdatingTokenResponse,
MaintenanceResponse,
)
from ubersmith.utils import (
append_qs,
to_nested_php_args,
get_filename,
)
__all__ = [
'METHODS',
'RequestHandler',
'get_default_request_handler',
'set_default_request_handler',
]
_DEFAULT_REQUEST_HANDLER = None
"""A dict of all methods returned by uber.method_list()"""
METHODS = {
u'client.ach_add': u'Add a New Bank Account',
u'client.ach_delete': u'Delete a Bank Account',
u'client.ach_update': u'Update a Bank Account',
u'client.add': u'Add a New Client',
u'client.avatar_get': u"Retrieve a Client Avatar",
u'client.avatar_set': u"Set a Client Avatar",
u'client.cc_add': u'Add a New Credit Card',
u'client.cc_delete': u'Delete a Credit Card',
u'client.cc_info': u"List a Client's Credit Card Details",
u'client.cc_update': u'Update a Credit Card',
u'client.comment_list': u"List a Client's Comments",
u'client.contact_add': u'Add a New Contact',
u'client.contact_delete': u'Delete a Contact',
u'client.contact_get': u'Get Contact Details',
u'client.contact_list': u"List a Client's Contacts",
u'client.contact_metadata_get': u"Get a Contact's Metadata",
u'client.contact_metadata_single': u"Get a Contact's Metadata Value",
u'client.contact_update': u'Update a Contact',
u'client.count': u'Count Active Clients',
u'client.credit_add': u'Add an Account Credit',
u'client.credit_apply': u"Apply a Credit to an Invoice",
u'client.credit_comment_list': u"List a Credit's Comments",
u'client.credit_deactivate': u'Deactivate an Account Credit',
u'client.credit_list': u"List a Client's Credits",
u'client.deactivate': u'Deactivate a Client',
u'client.domain_add': u'Add a Domain',
u'client.domain_list': u"List a Client's Domains",
u'client.domain_lookup': u'Look Up a Domain',
u'client.domain_register': u'Register a Domain',
u'client.domain_transfer': u'Transfer a Domain',
u'client.get': u'Get Client Details',
u'client.invoice_charge': u'Charge an Invoice',
u'client.invoice_count': u'Count Invoices',
u'client.invoice_disregard': u'Disregard an Invoice',
u'client.invoice_generate': u'Generate an Invoice',
u'client.invoice_get': u'Get an Invoice',
u'client.invoice_list': u"List a Client's Invoices",
u'client.invoice_payments': u"List an Invoice's Payments",
u'client.invoice_post_gw_payment': u'Record a Payment',
u'client.latest_client': u'Get the Latest Client',
u'client.list': u'List Clients',
u'client.lookup': u'Look Up a Client',
u'client.metadata_get': u"Get a Client's Metadata",
u'client.metadata_single': u"Get a Client's Metadata Value",
u'client.payment_method_list': u"List a Client's Payment Methods",
u'client.payment_refund': u'Refund a payment.',
u'client.reactivate': u'Reactivate a Client',
u'client.renewal_list': u'List Services for Renewal',
u'client.send_welcome': u'Send a Welcome Letter',
u'client.service_add': u'Add a New Service',
u'client.service_comment_list': u"List a Service's Comments",
u'client.service_deactivate': u'Deactivate a Service',
u'client.service_get': u'Get a Service',
u'client.service_list': u"List a Client's Services",
u'client.service_metadata_get': u"Get a Service's Metadata",
u'client.service_metadata_single': u"Get a Service's Metadata Value",
u'client.service_module_call': u'Call a Service Module Function',
u'client.service_prorate': u'Prorate a Service',
u'client.service_update': u'Update a Service',
u'client.set_login': u"Set a Client's Login",
u'client.tax_exemption_add': u"Add a new Tax Exemption",
u'client.tax_exemption_get': u"Get a Client's Tax Exemption",
u'client.tax_exemption_list': u"List a Client's Tax Exemptions",
u'client.tax_exemption_update': u"Update a Client's Tax Exemption",
u'client.update': u'Update a Client',
u'device.add': u'Add a New Device',
u'device.comment_list': u"List a Device's Comments",
u'device.connection_list': u"List a Device's Connections",
u'device.cpanel_add': u'Add a cPanel Account',
u'device.delete': u'Delete a Device',
u'device.event_list': u'List Device Events',
u'device.facility_list': u'List Device Facilities',
u'device.get': u'Get a Device',
u'device.hostname_get': u'Get a Device Hostname',
u'device.ip_assign': u'Assign an IP to a Device',
u'device.ip_assignment_add': u'Create a New IP Assignment',
u'device.ip_assignment_delete': u'Delete a Device IP Assignment',
u'device.ip_assignment_list': u'List Device IP Assignments',
u'device.ip_assignment_update': u'Update a Device IP Assignment',
u'device.ip_block_list': u"List IP Blocks",
u'device.ip_get_available': u'List Available IP Addresses',
u'device.ip_get_unassigned': u'Get Unassigned IP Addresses',
u'device.ip_group_add': u'Add a Device IP Group',
u'device.ip_group_delete': u'Delete a Device IP Group',
u'device.ip_group_list': u'List a Device IP Group',
u'device.ip_group_update': u'Update a Device IP Group',
u'device.ip_lookup': u'Look Up a Device IP',
u'device.ip_pool_list': u"List IP Pools",
u'device.ip_unassign': u'Unassign a Device IP',
u'device.list': u'List Devices',
u'device.module_call': u'Call a Device Module Function',
u'device.module_call_aggregate': u'Call an Aggregate Device Module Function',
u'device.module_graph': u'Generate Device Module Graph',
u'device.monitor_add': u'Add a New Device Monitor',
u'device.monitor_delete': u'Delete a Device Monitor',
u'device.monitor_disable': u'Disable a Device Monitor',
u'device.monitor_enable': u'Enable a Device Monitor',
u'device.monitor_list': u'List Device Monitors',
u'device.monitor_update': u'Update a Device Monitor',
u'device.reboot': u"Set a Device's Power State",
u'device.reboot_graph': u'Get a Reboot Graph',
u'device.tag': u'Tag a Device',
u'device.type_list': u'List Device Types',
u'device.untag': u'Untag a Device',
u'device.update': u'Update a Device',
u'device.vlan_get_available': u'List Available VLANs',
u'order.cancel': u'Cancel an Order',
u'order.client_respond': u'Post a Client/Lead Order Response',
u'order.coupon_get': u'Get Order Coupon Details',
u'order.create': u'Create a New Order',
u'order.get': u'Get Order Details',
u'order.list': u'List Orders',
u'order.process': u'Process an Order',
u'order.queue_list': u'List Order Queues',
u'order.respond': u'Post an Order Response',
u'order.submit': u'Submit An Order',
u'order.update': u'Update an Order',
u'sales.opportunity_add': u'Add an Opportunity',
u'sales.opportunity_list': u'List Opportunities',
u'sales.opportunity_stage_list': u'List Opportunity Stages',
u'sales.opportunity_status_list': u'List Opportunity Statuses',
u'sales.opportunity_type_list': u'List Opportunity Types',
u'sales.opportunity_update': u'Update an Opportunity',
u'support.department_get': u'Get Ticket Departments',
u'support.department_list': u'List Ticket Departments',
u'support.ticket_count': u'Count Support Tickets',
u'support.ticket_get': u'Get Support Ticket Details',
u'support.ticket_list': u'Get a List of Tickets',
u'support.ticket_merge': u'Merge Tickets',
u'support.ticket_post_client_response': u'Post a Client Response to a Ticket',
u'support.ticket_post_list': u'Get all Posts for a Ticket',
u'support.ticket_post_staff_response': u'Post a Staff Response to a Ticket',
u'support.ticket_submit': u'Submit a New Ticket',
u'support.ticket_submit_outgoing': u'Create a New Outgoing Ticket',
u'support.ticket_type_list': u"Get a List of Ticket Types",
u'support.ticket_update': u'Update a Ticket',
u'uber.admin_avatar_get': u"Retrieve an Admin Avatar",
u'uber.admin_avatar_set': u"Set an Admin Avatar",
u'uber.admin_get': u"User Information",
u'uber.admin_list': u"List User Logins",
u'uber.api_export': u'Export Data',
u'uber.attachment_get': u'Get an attachment',
u'uber.attachment_list': u'List Attachments',
u'uber.check_login': u'Verify a login and password',
u'uber.client_permission_list': u"List available permissions",
u'uber.client_welcome_stats': u'Display Client Statistics',
u'uber.comment_add': u'Add Comment',
u'uber.comment_delete': u'Delete Comment',
u'uber.comment_get': u'Get Comments',
u'uber.comment_list': u'List Comments',
u'uber.comment_update': u'Update Comment',
u'uber.documentation': u'Download API Documentation',
u'uber.event_list': u'Access the Event Log',
u'uber.file_add': u"Add a file",
u'uber.file_delete': u"Delete a file",
u'uber.file_get': u"Get a File",
u'uber.file_list': u"Get a List of Files",
u'uber.file_update': u"Update a file",
u'uber.forgot_pass': u'Send a Password Reminder',
u'uber.login_list': u'List User Logins',
u'uber.mail_get': u'Get an Email From the Log',
u'uber.mail_list': u'Access the Mail Log',
u'uber.message_list': u'List Message Board Messages',
u'uber.metadata_bulk_get': u'Bulk Get Metadata Values',
u'uber.metadata_get': u'Get Metadata Values',
u'uber.method_get': u'Get API Method Details',
u'uber.method_list': u'List Available API Methods',
u'uber.quick_stats': u'Get Quick System Stats',
u'uber.quick_stats_detail': u'Get Detailed System Stats',
u'uber.service_plan_get': u'Get Service Plan Details',
u'uber.service_plan_list': u'List Service Plans',
u'uber.tax_exemption_type_get': u"Get a Tax Exemption Type",
u'uber.tax_exemption_type_list': u"List Tax Exemption Types",
u'uber.user_exists': u'Check whether a Client Exists',
u'uber.username_exists': u'Check Whether a Username Exists',
}
class _ProxyModule(object):
def __init__(self, handler, module):
self.handler = handler
self.module = module
def __getattr__(self, name):
"""Return the call with request_handler prefilled."""
call_func = getattr(self.module, name)
if callable(call_func):
call_p = call_func.handler(self.handler)
# store partial on proxy so it doesn't have to be created again
setattr(self, name, call_p)
return call_p
raise AttributeError("'{0}' object has no attribute '{1}'".format(
type(self).__name__, name))
class RequestHandler(object):
"""Handles HTTP requests and authentication."""
def __init__(self, base_url, username=None, password=None, verify=True,
session=None):
"""Initialize HTTP request handler with optional authentication.
base_url: URL to send API requests
username: Username for API access
password: Password for API access
verify: Verify HTTPS certificate
session: requests.Session to send requests with
"""
self.base_url = base_url
self.username = username
self.password = password
self.verify = verify
if session is None:
session = requests.session()
self._session = session
@property
def session(self):
return self._session
def process_request(self, method, data=None):
"""Process request over HTTP to ubersmith instance.
method: Ubersmith API method string
data: dict of method arguments
"""
# make sure requested method is valid
self._validate_request_method(method)
# attempt the request multiple times
attempts = 3
for i in range(attempts):
response = self._send_request(method, data)
# handle case where ubersmith is 'updating token'
# see: https://github.com/jasonkeene/python-ubersmith/issues/1
if self._is_token_response(response):
if i < attempts - 1:
# wait 2 secs before retrying request
time.sleep(2)
continue
else:
raise UpdatingTokenResponse
break
resp = BaseResponse(response)
# test for error in json response
if response.headers.get('content-type') == 'application/json':
if not resp.json.get('status'):
if all([
resp.json.get('error_code') == 1,
resp.json.get('error_message') == u"We are currently "
"undergoing maintenance, please check back shortly.",
]):
raise MaintenanceResponse(response=resp.json)
else:
raise ResponseError(response=resp.json)
return resp
@staticmethod
def _is_token_response(response):
return ('text/html' in response.headers.get('content-type', '') and
'Updating Token' in response.content)
def _send_request(self, method, data):
url = append_qs(self.base_url, {'method': method})
data, files, headers = self._encode_data(data)
return self.session.post(url, data=data, files=files, headers=headers,
auth=(self.username, self.password),
verify=self.verify)
@staticmethod
def _validate_request_method(method):
"""Make sure requested method is valid."""
if method not in METHODS:
raise RequestError("Requested method is not valid.")
@staticmethod
def _encode_data(data):
"""URL encode data."""
data = data if data is not None else {}
data = to_nested_php_args(data)
files = dict([
(key, value) for key, value in
data.items() if isinstance(value, file_type)])
for fname in files:
del data[fname]
return data, files or None, None
def __getattr__(self, name):
"""If attribute accessed is a call module, return a proxy."""
if name in set(m.split('.')[0] for m in METHODS):
module_name = 'ubersmith.{0}'.format(name)
module = __import__(module_name, fromlist=[''])
proxy = _ProxyModule(self, module)
# store proxy on handler so it doesn't have to be created again
setattr(self, name, proxy)
return proxy
raise AttributeError("'{0}' object has no attribute '{1}'".format(
type(self).__name__, name))
class BaseResponse(object):
"""Wraps response object and emulates different types."""
def __init__(self, response):
self.response = response # requests' response object
@classmethod
def from_cleaned(cls, response, cleaned):
resp = cls(response.response)
resp.cleaned = cleaned
return resp
@property
def json(self):
return self.response.json()
@property
def data(self):
if hasattr(self, "cleaned"):
return self.cleaned
else:
return self.json['data']
@property
def type(self):
return self.response.headers.get('content-type')
def __str__(self):
return str(self.data)
def __repr__(self):
return repr(self.data)
def __nonzero__(self):
return bool(self.data)
def __json__(self):
"""This method returns the JSON-serializable representation of the
Response. To utilize this, create a JSONEncoder which calls the
__json__ methods of supporting objects. e.g.::
import json
class MyJSONEncoder(json.JSONEncoder):
def default(self, o):
if hasattr(obj, '__json__') and callable(obj.__json__):
return obj.__json__()
else:
return super(MyJSONEncoder, self).default(o)
json.dumps(my_response, cls=MyJSONEncoder)
"""
return self.data
@total_ordering
class DictResponse(BaseResponse):
__marker = object()
def keys(self):
return self.data.keys()
def iterkeys(self):
return six.iterkeys(self.data)
def values(self):
return self.data.values()
def itervalues(self):
return six.itervalues(self.data)
def items(self):
return self.data.items()
def iteritems(self):
return six.iteritems(self.data)
def get(self, key, default=None):
return self.data.get(key, default)
def update(self, d):
self.data.update(d)
def setdefault(self, key, value):
self.data.setdefault(key, value)
def pop(self, key, default=__marker):
if default is self.__marker:
return self.data.pop(key)
else:
return self.data.pop(key, default)
def popitem(self):
return self.data.popitem()
def clear(self):
self.data.clear()
def __setitem__(self, key, value):
self.data[key] = value
def __iter__(self):
return iter(self.data)
def __getitem__(self, key):
return self.data[key]
def __len__(self):
return len(self.data)
def __eq__(self, other):
return self.data == other
def __lt__(self, other):
return self.data < other
def __contains__(self, item):
return item in self.data
@total_ordering
class IntResponse(BaseResponse):
@property
def numerator(self):
return self.data
@property
def denominator(self):
return 1
@property
def real(self):
return self.data
@property
def imag(self):
return 0
def bit_length(self):
if hasattr(self.data, 'bit_length'):
return self.data.bit_length()
else:
return len(bin(abs(self.data))) - 2
def conjugate(self):
return self.data
def __int__(self):
return self.data
__index__ = __long__ = __trunc__ = __int__
def __float__(self):
return float(self.data)
def __oct__(self):
return oct(self.data)
def __hex__(self):
return hex(self.data)
def __eq__(self, other):
return self.data == other
def __lt__(self, other):
return self.data < other
def __add__(self, other):
return int(self) + other
__radd__ = __add__
def __sub__(self, other):
return int(self) - other
def __rsub__(self, other):
return other - int(self)
def __mul__(self, other):
return int(self) * other
__rmul__ = __mul__
def __div__(self, other):
return int(self) / other
def __rdiv__(self, other):
return other / int(self)
def __floordiv__(self, other):
return int(self) // other
def __rfloordiv__(self, other):
return other // int(self)
def __truediv__(self, other):
return float(self) / other
def __rtruediv__(self, other):
return other / float(self)
def __mod__(self, other):
return int(self) % other
def __rmod__(self, other):
return other % int(self)
def __pow__(self, other):
return int(self) ** other
def __rpow__(self, other):
return other ** int(self)
def __abs__(self):
return abs(self.data)
def __neg__(self):
return -self.data
def __pos__(self):
return self.data
def __divmod__(self, other):
return self // other, self % other
def __rdivmod__(self, other):
return other // self, other % self
def __and__(self, other):
return self.data & other
__rand__ = __and__
def __or__(self, other):
return self.data | other
__ror__ = __or__
def __xor__(self, other):
return self.data ^ other
__rxor__ = __xor__
def __lshift__(self, other):
return self.data << other
def __rlshift__(self, other):
return other << self.data
def __rshift__(self, other):
return self.data >> other
def __rrshift__(self, other):
return other >> self.data
def __invert__(self):
return ~self.data
def __nonzero__(self):
return bool(self.data)
class FileResponse(BaseResponse):
@property
def json(self):
raise NotImplementedError
@property
def data(self):
return self.response.content
@property
def filename(self):
disposition = self.response.headers.get('content-disposition')
return get_filename(disposition)
def get_default_request_handler():
"""Return the default request handler."""
if not _DEFAULT_REQUEST_HANDLER:
raise Exception("Request handler required but no default was found.")
return _DEFAULT_REQUEST_HANDLER
def set_default_request_handler(request_handler):
"""Set the default request handler."""
if not isinstance(request_handler, RequestHandler):
raise TypeError(
"Attempted to set an invalid request handler as default.")
global _DEFAULT_REQUEST_HANDLER
_DEFAULT_REQUEST_HANDLER = request_handler
|
jasonkeene/python-ubersmith
|
ubersmith/api.py
|
Python
|
mit
| 21,356
|
from django import template
from django.utils.text import capfirst
register = template.Library()
@register.filter
def verbose_name(obj, arg):
return capfirst(obj._meta.get_field(arg).verbose_name)
|
sbsdev/daisyproducer
|
daisyproducer/documents/templatetags/verbose_name.py
|
Python
|
agpl-3.0
| 214
|
from __future__ import print_function
import csv
import os
import copy
import numpy as np
import GPy
import scipy.io
import zipfile
import tarfile
import datetime
import json
import re
import sys
from io import open
from .config import *
ipython_available=True
try:
import IPython
except ImportError:
ipython_available=False
try:
#In Python 2, cPickle is faster. It does not exist in Python 3 but the underlying code is always used
#if available
import cPickle as pickle
except ImportError:
import pickle
#A Python2/3 import handler - urllib2 changed its name in Py3 and was also reorganised
try:
from urllib2 import urlopen
from urllib2 import URLError
except ImportError:
from urllib.request import urlopen
from urllib.error import URLError
def reporthook(a,b,c):
# ',' at the end of the line is important!
#print "% 3.1f%% of %d bytes\r" % (min(100, float(a * b) / c * 100), c),
#you can also use sys.stdout.write
sys.stdout.write("\r% 3.1f%% of %d bytes" % (min(100, float(a * b) / c * 100), c))
sys.stdout.flush()
# Global variables
data_path = os.path.expandvars(config.get('datasets', 'dir'))
#data_path = os.path.join(os.path.dirname(__file__), 'datasets')
default_seed = 10000
overide_manual_authorize=False
neil_url = 'http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/dataset_mirror/'
# Read data resources from json file.
# Don't do this when ReadTheDocs is scanning as it breaks things
on_rtd = os.environ.get('READTHEDOCS', None) == 'True' #Checks if RTD is scanning
if not (on_rtd):
path = os.path.join(os.path.dirname(__file__), 'data_resources.json')
json_data = open(path, encoding='utf-8').read()
data_resources = json.loads(json_data)
if not (on_rtd):
path = os.path.join(os.path.dirname(__file__), 'football_teams.json')
json_data = open(path, encoding='utf-8').read()
football_dict = json.loads(json_data)
def prompt_user(prompt):
"""Ask user for agreeing to data set licenses."""
# raw_input returns the empty string for "enter"
yes = set(['yes', 'y'])
no = set(['no','n'])
try:
print(prompt)
choice = input().lower()
# would like to test for exception here, but not sure if we can do that without importing IPython
except:
print('Stdin is not implemented.')
print('You need to set')
print('overide_manual_authorize=True')
print('to proceed with the download. Please set that variable and continue.')
raise
if choice in yes:
return True
elif choice in no:
return False
else:
print(("Your response was a " + choice))
print("Please respond with 'yes', 'y' or 'no', 'n'")
#return prompt_user()
def data_available(dataset_name=None):
"""Check if the data set is available on the local machine already."""
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
dr = data_resources[dataset_name]
zip_urls = (dr['files'], )
if 'save_names' in dr: zip_urls += (dr['save_names'], )
else: zip_urls += ([],)
for file_list, save_list in zip_longest(*zip_urls, fillvalue=[]):
for f, s in zip_longest(file_list, save_list, fillvalue=None):
if s is not None: f=s # If there is a save_name given, use that one
if not os.path.exists(os.path.join(data_path, dataset_name, f)):
return False
return True
def download_url(url, store_directory, save_name=None, messages=True, suffix=''):
"""Download a file from a url and save it to disk."""
i = url.rfind('/')
file = url[i+1:]
print(file)
dir_name = os.path.join(data_path, store_directory)
if save_name is None: save_name = os.path.join(dir_name, file)
else: save_name = os.path.join(dir_name, save_name)
if suffix is None: suffix=''
print("Downloading ", url, "->", save_name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
try:
response = urlopen(url+suffix)
except URLError as e:
if not hasattr(e, "code"):
raise
response = e
if response.code > 399 and response.code<500:
raise ValueError('Tried url ' + url + suffix + ' and received client error ' + str(response.code))
elif response.code > 499:
raise ValueError('Tried url ' + url + suffix + ' and received server error ' + str(response.code))
with open(save_name, 'wb') as f:
meta = response.info()
content_length_str = meta.get("Content-Length")
if content_length_str:
file_size = int(content_length_str)
else:
file_size = None
status = ""
file_size_dl = 0
block_sz = 8192
line_length=30
while True:
buff = response.read(block_sz)
if not buff:
break
file_size_dl += len(buff)
f.write(buff)
sys.stdout.write(" "*(len(status)) + "\r")
if file_size:
status = r"[{perc: <{ll}}] {dl:7.3f}/{full:.3f}MB".format(dl=file_size_dl/(1048576.),
full=file_size/(1048576.), ll=line_length,
perc="="*int(line_length*float(file_size_dl)/file_size))
else:
status = r"[{perc: <{ll}}] {dl:7.3f}MB".format(dl=file_size_dl/(1048576.),
ll=line_length,
perc="."*int(line_length*float(file_size_dl/(10*1048576.))))
sys.stdout.write(status)
sys.stdout.flush()
sys.stdout.write(" "*(len(status)) + "\r")
print(status)
# if we wanted to get more sophisticated maybe we should check the response code here again even for successes.
#with open(save_name, 'wb') as f:
# f.write(response.read())
#urllib.urlretrieve(url+suffix, save_name, reporthook)
def authorize_download(dataset_name=None):
"""Check with the user that the are happy with terms and conditions for the data set."""
print(('Acquiring resource: ' + dataset_name))
# TODO, check resource is in dictionary!
print('')
dr = data_resources[dataset_name]
print('Details of data: ')
print((dr['details']))
print('')
if dr['citation']:
print('Please cite:')
print((dr['citation']))
print('')
if dr['size']:
print(('After downloading the data will take up ' + str(dr['size']) + ' bytes of space.'))
print('')
print(('Data will be stored in ' + os.path.join(data_path, dataset_name) + '.'))
print('')
if overide_manual_authorize:
if dr['license']:
print('You have agreed to the following license:')
print((dr['license']))
print('')
return True
else:
if dr['license']:
print('You must also agree to the following license:')
print((dr['license']))
print('')
return prompt_user('Do you wish to proceed with the download? [yes/no]')
def download_data(dataset_name=None):
"""Check with the user that the are happy with terms and conditions for the data set, then download it."""
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
dr = data_resources[dataset_name]
if not authorize_download(dataset_name):
raise Exception("Permission to download data set denied.")
zip_urls = (dr['urls'], dr['files'])
if 'save_names' in dr: zip_urls += (dr['save_names'], )
else: zip_urls += ([],)
if 'suffices' in dr: zip_urls += (dr['suffices'], )
else: zip_urls += ([],)
for url, files, save_names, suffices in zip_longest(*zip_urls, fillvalue=[]):
for f, save_name, suffix in zip_longest(files, save_names, suffices, fillvalue=None):
download_url(os.path.join(url,f), dataset_name, save_name, suffix=suffix)
return True
def data_details_return(data, data_set):
"""Update the data component of the data dictionary with details drawn from the data_resources."""
data.update(data_resources[data_set])
return data
def cmu_urls_files(subj_motions, messages = True):
'''
Find which resources are missing on the local disk for the requested CMU motion capture motions.
'''
dr = data_resources['cmu_mocap_full']
cmu_url = dr['urls'][0]
subjects_num = subj_motions[0]
motions_num = subj_motions[1]
resource = {'urls' : [], 'files' : []}
# Convert numbers to strings
subjects = []
motions = [list() for _ in range(len(subjects_num))]
for i in range(len(subjects_num)):
curSubj = str(int(subjects_num[i]))
if int(subjects_num[i]) < 10:
curSubj = '0' + curSubj
subjects.append(curSubj)
for j in range(len(motions_num[i])):
curMot = str(int(motions_num[i][j]))
if int(motions_num[i][j]) < 10:
curMot = '0' + curMot
motions[i].append(curMot)
all_skels = []
assert len(subjects) == len(motions)
all_motions = []
for i in range(len(subjects)):
skel_dir = os.path.join(data_path, 'cmu_mocap')
cur_skel_file = os.path.join(skel_dir, subjects[i] + '.asf')
url_required = False
file_download = []
if not os.path.exists(cur_skel_file):
# Current skel file doesn't exist.
if not os.path.isdir(skel_dir):
os.makedirs(skel_dir)
# Add skel file to list.
url_required = True
file_download.append(subjects[i] + '.asf')
for j in range(len(motions[i])):
file_name = subjects[i] + '_' + motions[i][j] + '.amc'
cur_motion_file = os.path.join(skel_dir, file_name)
if not os.path.exists(cur_motion_file):
url_required = True
file_download.append(subjects[i] + '_' + motions[i][j] + '.amc')
if url_required:
resource['urls'].append(cmu_url + '/' + subjects[i] + '/')
resource['files'].append(file_download)
return resource
try:
import gpxpy
import gpxpy.gpx
gpxpy_available = True
except ImportError:
gpxpy_available = False
if gpxpy_available:
def epomeo_gpx(data_set='epomeo_gpx', sample_every=4):
if not data_available(data_set):
download_data(data_set)
files = ['endomondo_1', 'endomondo_2', 'garmin_watch_via_endomondo','viewranger_phone', 'viewranger_tablet']
X = []
for file in files:
gpx_file = open(os.path.join(data_path, 'epomeo_gpx', file + '.gpx'), 'r')
gpx = gpxpy.parse(gpx_file)
segment = gpx.tracks[0].segments[0]
points = [point for track in gpx.tracks for segment in track.segments for point in segment.points]
data = [[(point.time-datetime.datetime(2013,8,21)).total_seconds(), point.latitude, point.longitude, point.elevation] for point in points]
X.append(np.asarray(data)[::sample_every, :])
gpx_file.close()
return data_details_return({'X' : X, 'info' : 'Data is an array containing time in seconds, latitude, longitude and elevation in that order.'}, data_set)
#del gpxpy_available
# Some general utilities.
def sample_class(f):
p = 1. / (1. + np.exp(-f))
c = np.random.binomial(1, p)
c = np.where(c, 1, -1)
return c
def boston_housing(data_set='boston_housing'):
if not data_available(data_set):
download_data(data_set)
all_data = np.genfromtxt(os.path.join(data_path, data_set, 'housing.data'))
X = all_data[:, 0:13]
Y = all_data[:, 13:14]
return data_details_return({'X' : X, 'Y': Y}, data_set)
def brendan_faces(data_set='brendan_faces'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'frey_rawface.mat'))
Y = mat_data['ff'].T
return data_details_return({'Y': Y}, data_set)
def della_gatta_TRP63_gene_expression(data_set='della_gatta', gene_number=None):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'DellaGattadata.mat'))
X = np.double(mat_data['timepoints'])
if gene_number == None:
Y = mat_data['exprs_tp53_RMA']
else:
Y = mat_data['exprs_tp53_RMA'][:, gene_number]
if len(Y.shape) == 1:
Y = Y[:, None]
return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set)
def football_data(season='1314', data_set='football_data'):
"""Football data from English games since 1993. This downloads data from football-data.co.uk for the given season. """
def league2num(string):
league_dict = {'E0':0, 'E1':1, 'E2': 2, 'E3': 3, 'EC':4}
return league_dict[string]
def football2num(string):
if string in football_dict:
return football_dict[string]
else:
football_dict[string] = len(football_dict)+1
return len(football_dict)+1
data_set_season = data_set + '_' + season
data_resources[data_set_season] = copy.deepcopy(data_resources[data_set])
data_resources[data_set_season]['urls'][0]+=season + '/'
start_year = int(season[0:2])
end_year = int(season[2:4])
files = ['E0.csv', 'E1.csv', 'E2.csv', 'E3.csv']
if start_year>4 and start_year < 93:
files += ['EC.csv']
data_resources[data_set_season]['files'] = [files]
if not data_available(data_set_season):
download_data(data_set_season)
from matplotlib import pyplot as pb
for file in reversed(files):
filename = os.path.join(data_path, data_set_season, file)
# rewrite files removing blank rows.
writename = os.path.join(data_path, data_set_season, 'temp.csv')
input = open(filename, 'rb')
output = open(writename, 'wb')
writer = csv.writer(output)
for row in csv.reader(input):
if any(field.strip() for field in row):
writer.writerow(row)
input.close()
output.close()
table = np.loadtxt(writename,skiprows=1, usecols=(0, 1, 2, 3, 4, 5), converters = {0: league2num, 1: pb.datestr2num, 2:football2num, 3:football2num}, delimiter=',')
X = table[:, :4]
Y = table[:, 4:]
return data_details_return({'X': X, 'Y': Y}, data_set)
def sod1_mouse(data_set='sod1_mouse'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'sod1_C57_129_exprs.csv')
Y = read_csv(filename, header=0, index_col=0)
num_repeats=4
num_time=4
num_cond=4
X = 1
return data_details_return({'X': X, 'Y': Y}, data_set)
def spellman_yeast(data_set='spellman_yeast'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'combined.txt')
Y = read_csv(filename, header=0, index_col=0, sep='\t')
return data_details_return({'Y': Y}, data_set)
def spellman_yeast_cdc15(data_set='spellman_yeast'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'combined.txt')
Y = read_csv(filename, header=0, index_col=0, sep='\t')
t = np.asarray([10, 30, 50, 70, 80, 90, 100, 110, 120, 130, 140, 150, 170, 180, 190, 200, 210, 220, 230, 240, 250, 270, 290])
times = ['cdc15_'+str(time) for time in t]
Y = Y[times].T
t = t[:, None]
return data_details_return({'Y' : Y, 't': t, 'info': 'Time series of synchronized yeast cells from the CDC-15 experiment of Spellman et al (1998).'}, data_set)
def lee_yeast_ChIP(data_set='lee_yeast_ChIP'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
import zipfile
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'binding_by_gene.tsv')
S = read_csv(filename, header=1, index_col=0, sep='\t')
transcription_factors = [col for col in S.columns if col[:7] != 'Unnamed']
annotations = S[['Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3']]
S = S[transcription_factors]
return data_details_return({'annotations' : annotations, 'Y' : S, 'transcription_factors': transcription_factors}, data_set)
def fruitfly_tomancak(data_set='fruitfly_tomancak', gene_number=None):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'tomancak_exprs.csv')
Y = read_csv(filename, header=0, index_col=0).T
num_repeats = 3
num_time = 12
xt = np.linspace(0, num_time-1, num_time)
xr = np.linspace(0, num_repeats-1, num_repeats)
xtime, xrepeat = np.meshgrid(xt, xr)
X = np.vstack((xtime.flatten(), xrepeat.flatten())).T
return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set)
def drosophila_protein(data_set='drosophila_protein'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'becker_et_al.csv')
Y = read_csv(filename, header=0)
return data_details_return({'Y': Y}, data_set)
def drosophila_knirps(data_set='drosophila_protein'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'becker_et_al.csv')
# in the csv file we have facts_kni and ext_kni. We treat facts_kni as protein and ext_kni as mRNA
df = read_csv(filename, header=0)
t = df['t'][:,None]
x = df['x'][:,None]
g = df['expression1'][:,None]
p = df['expression2'][:,None]
leng = x.shape[0]
T = np.vstack([t,t])
S = np.vstack([x,x])
inx = np.zeros(leng*2)[:,None]
inx[leng*2/2:leng*2]=1
X = np.hstack([T,S,inx])
Y = np.vstack([g,p])
return data_details_return({'Y': Y, 'X': X}, data_set)
# This will be for downloading google trends data.
def google_trends(query_terms=['big data', 'machine learning', 'data science'], data_set='google_trends', refresh_data=False):
"""Data downloaded from Google trends for given query terms. Warning, if you use this function multiple times in a row you get blocked due to terms of service violations. The function will cache the result of your query, if you wish to refresh an old query set refresh_data to True. The function is inspired by this notebook: http://nbviewer.ipython.org/github/sahuguet/notebooks/blob/master/GoogleTrends%20meet%20Notebook.ipynb"""
query_terms.sort()
import pandas
# Create directory name for data
dir_path = os.path.join(data_path,'google_trends')
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
dir_name = '-'.join(query_terms)
dir_name = dir_name.replace(' ', '_')
dir_path = os.path.join(dir_path,dir_name)
file = 'data.csv'
file_name = os.path.join(dir_path,file)
if not os.path.exists(file_name) or refresh_data:
print("Accessing Google trends to acquire the data. Note that repeated accesses will result in a block due to a google terms of service violation. Failure at this point may be due to such blocks.")
# quote the query terms.
quoted_terms = []
for term in query_terms:
quoted_terms.append(urllib2.quote(term))
print("Query terms: ", ', '.join(query_terms))
print("Fetching query:")
query = 'http://www.google.com/trends/fetchComponent?q=%s&cid=TIMESERIES_GRAPH_0&export=3' % ",".join(quoted_terms)
data = urlopen(query).read()
print("Done.")
# In the notebook they did some data cleaning: remove Javascript header+footer, and translate new Date(....,..,..) into YYYY-MM-DD.
header = """// Data table response\ngoogle.visualization.Query.setResponse("""
data = data[len(header):-2]
data = re.sub('new Date\((\d+),(\d+),(\d+)\)', (lambda m: '"%s-%02d-%02d"' % (m.group(1).strip(), 1+int(m.group(2)), int(m.group(3)))), data)
timeseries = json.loads(data)
columns = [k['label'] for k in timeseries['table']['cols']]
rows = map(lambda x: [k['v'] for k in x['c']], timeseries['table']['rows'])
df = pandas.DataFrame(rows, columns=columns)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
df.to_csv(file_name)
else:
print("Reading cached data for google trends. To refresh the cache set 'refresh_data=True' when calling this function.")
print("Query terms: ", ', '.join(query_terms))
df = pandas.read_csv(file_name, parse_dates=[0])
columns = df.columns
terms = len(query_terms)
import datetime
X = np.asarray([(row, i) for i in range(terms) for row in df.index])
Y = np.asarray([[df.ix[row][query_terms[i]]] for i in range(terms) for row in df.index ])
output_info = columns[1:]
return data_details_return({'data frame' : df, 'X': X, 'Y': Y, 'query_terms': output_info, 'info': "Data downloaded from google trends with query terms: " + ', '.join(output_info) + '.'}, data_set)
# The data sets
def oil(data_set='three_phase_oil_flow'):
"""The three phase oil data from Bishop and James (1993)."""
if not data_available(data_set):
download_data(data_set)
oil_train_file = os.path.join(data_path, data_set, 'DataTrn.txt')
oil_trainlbls_file = os.path.join(data_path, data_set, 'DataTrnLbls.txt')
oil_test_file = os.path.join(data_path, data_set, 'DataTst.txt')
oil_testlbls_file = os.path.join(data_path, data_set, 'DataTstLbls.txt')
oil_valid_file = os.path.join(data_path, data_set, 'DataVdn.txt')
oil_validlbls_file = os.path.join(data_path, data_set, 'DataVdnLbls.txt')
fid = open(oil_train_file)
X = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_test_file)
Xtest = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_valid_file)
Xvalid = np.fromfile(fid, sep='\t').reshape((-1, 12))
fid.close()
fid = open(oil_trainlbls_file)
Y = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_testlbls_file)
Ytest = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
fid = open(oil_validlbls_file)
Yvalid = np.fromfile(fid, sep='\t').reshape((-1, 3)) * 2. - 1.
fid.close()
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'Xtest' : Xtest, 'Xvalid': Xvalid, 'Yvalid': Yvalid}, data_set)
#else:
# throw an error
def oil_100(seed=default_seed, data_set = 'three_phase_oil_flow'):
np.random.seed(seed=seed)
data = oil()
indices = np.random.permutation(1000)
indices = indices[0:100]
X = data['X'][indices, :]
Y = data['Y'][indices, :]
return data_details_return({'X': X, 'Y': Y, 'info': "Subsample of the full oil data extracting 100 values randomly without replacement, here seed was " + str(seed)}, data_set)
def pumadyn(seed=default_seed, data_set='pumadyn-32nm'):
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar = tarfile.open(os.path.join(path, 'pumadyn-32nm.tar.gz'))
print('Extracting file.')
tar.extractall(path=path)
tar.close()
# Data is variance 1, no need to normalize.
data = np.loadtxt(os.path.join(data_path, data_set, 'pumadyn-32nm', 'Dataset.data.gz'))
indices = np.random.permutation(data.shape[0])
indicesTrain = indices[0:7168]
indicesTest = indices[7168:-1]
indicesTrain.sort(axis=0)
indicesTest.sort(axis=0)
X = data[indicesTrain, 0:-2]
Y = data[indicesTrain, -1][:, None]
Xtest = data[indicesTest, 0:-2]
Ytest = data[indicesTest, -1][:, None]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed': seed}, data_set)
def robot_wireless(data_set='robot_wireless'):
# WiFi access point strengths on a tour around UW Paul Allen building.
if not data_available(data_set):
download_data(data_set)
file_name = os.path.join(data_path, data_set, 'uw-floor.txt')
all_time = np.genfromtxt(file_name, usecols=(0))
macaddress = np.genfromtxt(file_name, usecols=(1), dtype=str)
x = np.genfromtxt(file_name, usecols=(2))
y = np.genfromtxt(file_name, usecols=(3))
strength = np.genfromtxt(file_name, usecols=(4))
addresses = np.unique(macaddress)
times = np.unique(all_time)
addresses.sort()
times.sort()
allY = np.zeros((len(times), len(addresses)))
allX = np.zeros((len(times), 2))
allY[:]=-92.
strengths={}
for address, j in zip(addresses, range(len(addresses))):
ind = np.nonzero(address==macaddress)
temp_strengths=strength[ind]
temp_x=x[ind]
temp_y=y[ind]
temp_times = all_time[ind]
for time in temp_times:
vals = time==temp_times
if any(vals):
ind2 = np.nonzero(vals)
i = np.nonzero(time==times)
allY[i, j] = temp_strengths[ind2]
allX[i, 0] = temp_x[ind2]
allX[i, 1] = temp_y[ind2]
allY = (allY + 85.)/15.
X = allX[0:215, :]
Y = allY[0:215, :]
Xtest = allX[215:, :]
Ytest = allY[215:, :]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'addresses' : addresses, 'times' : times}, data_set)
def silhouette(data_set='ankur_pose_data'):
# Ankur Agarwal and Bill Trigg's silhoutte data.
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'ankurDataPoseSilhouette.mat'))
inMean = np.mean(mat_data['Y'])
inScales = np.sqrt(np.var(mat_data['Y']))
X = mat_data['Y'] - inMean
X = X / inScales
Xtest = mat_data['Y_test'] - inMean
Xtest = Xtest / inScales
Y = mat_data['Z']
Ytest = mat_data['Z_test']
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest}, data_set)
def decampos_digits(data_set='decampos_characters', which_digits=[0,1,2,3,4,5,6,7,8,9]):
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
digits = np.load(os.path.join(path, 'digits.npy'))
digits = digits[which_digits,:,:,:]
num_classes, num_samples, height, width = digits.shape
Y = digits.reshape((digits.shape[0]*digits.shape[1],digits.shape[2]*digits.shape[3]))
lbls = np.array([[l]*num_samples for l in which_digits]).reshape(Y.shape[0], 1)
str_lbls = np.array([[str(l)]*num_samples for l in which_digits])
return data_details_return({'Y': Y, 'lbls': lbls, 'str_lbls' : str_lbls, 'info': 'Digits data set from the de Campos characters data'}, data_set)
def ripley_synth(data_set='ripley_prnn_data'):
if not data_available(data_set):
download_data(data_set)
train = np.genfromtxt(os.path.join(data_path, data_set, 'synth.tr'), skip_header=1)
X = train[:, 0:2]
y = train[:, 2:3]
test = np.genfromtxt(os.path.join(data_path, data_set, 'synth.te'), skip_header=1)
Xtest = test[:, 0:2]
ytest = test[:, 2:3]
return data_details_return({'X': X, 'Y': y, 'Xtest': Xtest, 'Ytest': ytest, 'info': 'Synthetic data generated by Ripley for a two class classification problem.'}, data_set)
def global_average_temperature(data_set='global_temperature', num_train=1000, refresh_data=False):
path = os.path.join(data_path, data_set)
if data_available(data_set) and not refresh_data:
print('Using cached version of the data set, to use latest version set refresh_data to True')
else:
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'GLBTS.long.data'))
print('Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0])
allX = data[data[:, 3]!=-99.99, 2:3]
allY = data[data[:, 3]!=-99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Mauna Loa data with " + str(num_train) + " values used as training points."}, data_set)
def mauna_loa(data_set='mauna_loa', num_train=545, refresh_data=False):
path = os.path.join(data_path, data_set)
if data_available(data_set) and not refresh_data:
print('Using cached version of the data set, to use latest version set refresh_data to True')
else:
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'co2_mm_mlo.txt'))
print('Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0])
allX = data[data[:, 3]!=-99.99, 2:3]
allY = data[data[:, 3]!=-99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Mauna Loa data with " + str(num_train) + " values used as training points."}, data_set)
def boxjenkins_airline(data_set='boxjenkins_airline', num_train=96):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
data = np.loadtxt(os.path.join(data_path, data_set, 'boxjenkins_airline.csv'), delimiter=',')
Y = data[:num_train, 1:2]
X = data[:num_train, 0:1]
Xtest = data[num_train:, 0:1]
Ytest = data[num_train:, 1:2]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Montly airline passenger data from Box & Jenkins 1976."}, data_set)
def osu_run1(data_set='osu_run1', sample_every=4):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
zip = zipfile.ZipFile(os.path.join(data_path, data_set, 'run1TXT.ZIP'), 'r')
for name in zip.namelist():
zip.extract(name, path)
Y, connect = GPy.util.mocap.load_text_data('Aug210106', path)
Y = Y[0:-1:sample_every, :]
return data_details_return({'Y': Y, 'connect' : connect}, data_set)
def swiss_roll_generated(num_samples=1000, sigma=0.0):
with open(os.path.join(os.path.dirname(__file__), 'datasets', 'swiss_roll.pickle')) as f:
data = pickle.load(f)
Na = data['Y'].shape[0]
perm = np.random.permutation(np.r_[:Na])[:num_samples]
Y = data['Y'][perm, :]
t = data['t'][perm]
c = data['colors'][perm, :]
so = np.argsort(t)
Y = Y[so, :]
t = t[so]
c = c[so, :]
return {'Y':Y, 't':t, 'colors':c}
def hapmap3(data_set='hapmap3'):
"""
The HapMap phase three SNP dataset - 1184 samples out of 11 populations.
SNP_matrix (A) encoding [see Paschou et all. 2007 (PCA-Correlated SNPs...)]:
Let (B1,B2) be the alphabetically sorted bases, which occur in the j-th SNP, then
/ 1, iff SNPij==(B1,B1)
Aij = | 0, iff SNPij==(B1,B2)
\ -1, iff SNPij==(B2,B2)
The SNP data and the meta information (such as iid, sex and phenotype) are
stored in the dataframe datadf, index is the Individual ID,
with following columns for metainfo:
* family_id -> Family ID
* paternal_id -> Paternal ID
* maternal_id -> Maternal ID
* sex -> Sex (1=male; 2=female; other=unknown)
* phenotype -> Phenotype (-9, or 0 for unknown)
* population -> Population string (e.g. 'ASW' - 'YRI')
* rest are SNP rs (ids)
More information is given in infodf:
* Chromosome:
- autosomal chromosemes -> 1-22
- X X chromosome -> 23
- Y Y chromosome -> 24
- XY Pseudo-autosomal region of X -> 25
- MT Mitochondrial -> 26
* Relative Positon (to Chromosome) [base pairs]
"""
try:
from pandas import read_pickle, DataFrame
from sys import stdout
import bz2
except ImportError as i:
raise i("Need pandas for hapmap dataset, make sure to install pandas (http://pandas.pydata.org/) before loading the hapmap dataset")
dir_path = os.path.join(data_path,'hapmap3')
hapmap_file_name = 'hapmap3_r2_b36_fwd.consensus.qc.poly'
unpacked_files = [os.path.join(dir_path, hapmap_file_name+ending) for ending in ['.ped', '.map']]
unpacked_files_exist = reduce(lambda a, b:a and b, map(os.path.exists, unpacked_files))
if not unpacked_files_exist and not data_available(data_set):
download_data(data_set)
preprocessed_data_paths = [os.path.join(dir_path,hapmap_file_name + file_name) for file_name in \
['.snps.pickle',
'.info.pickle',
'.nan.pickle']]
if not reduce(lambda a,b: a and b, map(os.path.exists, preprocessed_data_paths)):
if not overide_manual_authorize and not prompt_user("Preprocessing requires ~25GB "
"of memory and can take a (very) long time, continue? [Y/n]"):
print("Preprocessing required for further usage.")
return
status = "Preprocessing data, please be patient..."
print(status)
def write_status(message, progress, status):
stdout.write(" "*len(status)); stdout.write("\r"); stdout.flush()
status = r"[{perc: <{ll}}] {message: <13s}".format(message=message, ll=20,
perc="="*int(20.*progress/100.))
stdout.write(status); stdout.flush()
return status
if not unpacked_files_exist:
status=write_status('unpacking...', 0, '')
curr = 0
for newfilepath in unpacked_files:
if not os.path.exists(newfilepath):
filepath = newfilepath + '.bz2'
file_size = os.path.getsize(filepath)
with open(newfilepath, 'wb') as new_file, open(filepath, 'rb') as f:
decomp = bz2.BZ2Decompressor()
file_processed = 0
buffsize = 100 * 1024
for data in iter(lambda : f.read(buffsize), b''):
new_file.write(decomp.decompress(data))
file_processed += len(data)
status=write_status('unpacking...', curr+12.*file_processed/(file_size), status)
curr += 12
status=write_status('unpacking...', curr, status)
os.remove(filepath)
status=write_status('reading .ped...', 25, status)
# Preprocess data:
snpstrnp = np.loadtxt(unpacked_files[0], dtype=str)
status=write_status('reading .map...', 33, status)
mapnp = np.loadtxt(unpacked_files[1], dtype=str)
status=write_status('reading relationships.txt...', 42, status)
# and metainfo:
infodf = DataFrame.from_csv(os.path.join(dir_path,'./relationships_w_pops_121708.txt'), header=0, sep='\t')
infodf.set_index('IID', inplace=1)
status=write_status('filtering nan...', 45, status)
snpstr = snpstrnp[:,6:].astype('S1').reshape(snpstrnp.shape[0], -1, 2)
inan = snpstr[:,:,0] == '0'
status=write_status('filtering reference alleles...', 55, status)
ref = np.array(map(lambda x: np.unique(x)[-2:], snpstr.swapaxes(0,1)[:,:,:]))
status=write_status('encoding snps...', 70, status)
# Encode the information for each gene in {-1,0,1}:
status=write_status('encoding snps...', 73, status)
snps = (snpstr==ref[None,:,:])
status=write_status('encoding snps...', 76, status)
snps = (snps*np.array([1,-1])[None,None,:])
status=write_status('encoding snps...', 78, status)
snps = snps.sum(-1)
status=write_status('encoding snps...', 81, status)
snps = snps.astype('i8')
status=write_status('marking nan values...', 88, status)
# put in nan values (masked as -128):
snps[inan] = -128
status=write_status('setting up meta...', 94, status)
# get meta information:
metaheader = np.r_[['family_id', 'iid', 'paternal_id', 'maternal_id', 'sex', 'phenotype']]
metadf = DataFrame(columns=metaheader, data=snpstrnp[:,:6])
metadf.set_index('iid', inplace=1)
metadf = metadf.join(infodf.population)
metadf.to_pickle(preprocessed_data_paths[1])
# put everything together:
status=write_status('setting up snps...', 96, status)
snpsdf = DataFrame(index=metadf.index, data=snps, columns=mapnp[:,1])
with open(preprocessed_data_paths[0], 'wb') as f:
pickle.dump(f, snpsdf, protocoll=-1)
status=write_status('setting up snps...', 98, status)
inandf = DataFrame(index=metadf.index, data=inan, columns=mapnp[:,1])
inandf.to_pickle(preprocessed_data_paths[2])
status=write_status('done :)', 100, status)
print('')
else:
print("loading snps...")
snpsdf = read_pickle(preprocessed_data_paths[0])
print("loading metainfo...")
metadf = read_pickle(preprocessed_data_paths[1])
print("loading nan entries...")
inandf = read_pickle(preprocessed_data_paths[2])
snps = snpsdf.values
populations = metadf.population.values.astype('S3')
hapmap = dict(name=data_set,
description='The HapMap phase three SNP dataset - '
'1184 samples out of 11 populations. inan is a '
'boolean array, containing wheather or not the '
'given entry is nan (nans are masked as '
'-128 in snps).',
snpsdf=snpsdf,
metadf=metadf,
snps=snps,
inan=inandf.values,
inandf=inandf,
populations=populations)
return hapmap
def singlecell(data_set='singlecell'):
if not data_available(data_set):
download_data(data_set)
from pandas import read_csv
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'singlecell.csv')
Y = read_csv(filename, header=0, index_col=0)
genes = Y.columns
labels = Y.index
# data = np.loadtxt(os.path.join(dir_path, 'singlecell.csv'), delimiter=",", dtype=str)
return data_details_return({'Y': Y, 'info' : "qPCR singlecell experiment in Mouse, measuring 48 gene expressions in 1-64 cell states. The labels have been created as in Guo et al. [2010]",
'genes': genes, 'labels':labels,
}, data_set)
def singlecell_rna_seq_islam(dataset='singlecell_islam'):
if not data_available(dataset):
download_data(dataset)
from pandas import read_csv, DataFrame, concat
dir_path = os.path.join(data_path, dataset)
filename = os.path.join(dir_path, 'GSE29087_L139_expression_tab.txt.gz')
data = read_csv(filename, sep='\t', skiprows=6, compression='gzip', header=None)
header1 = read_csv(filename, sep='\t', header=None, skiprows=5, nrows=1, compression='gzip')
header2 = read_csv(filename, sep='\t', header=None, skiprows=3, nrows=1, compression='gzip')
data.columns = np.concatenate((header1.ix[0, :], header2.ix[0, 7:]))
Y = data.set_index("Feature").ix[8:, 6:-4].T.astype(float)
# read the info .soft
filename = os.path.join(dir_path, 'GSE29087_family.soft.gz')
info = read_csv(filename, sep='\t', skiprows=0, compression='gzip', header=None)
# split at ' = '
info = DataFrame(info.ix[:,0].str.split(' = ').tolist())
# only take samples:
info = info[info[0].str.contains("!Sample")]
info[0] = info[0].apply(lambda row: row[len("!Sample_"):])
groups = info.groupby(0).groups
# remove 'GGG' from barcodes
barcode = info[1][groups['barcode']].apply(lambda row: row[:-3])
title = info[1][groups['title']]
title.index = barcode
title.name = 'title'
geo_accession = info[1][groups['geo_accession']]
geo_accession.index = barcode
geo_accession.name = 'geo_accession'
case_id = info[1][groups['source_name_ch1']]
case_id.index = barcode
case_id.name = 'source_name_ch1'
info = concat([title, geo_accession, case_id], axis=1)
labels = info.join(Y).source_name_ch1[:-4]
labels[labels=='Embryonic stem cell'] = "ES"
labels[labels=='Embryonic fibroblast'] = "MEF"
return data_details_return({'Y': Y,
'info': '92 single cells (48 mouse ES cells, 44 mouse embryonic fibroblasts and 4 negative controls) were analyzed by single-cell tagged reverse transcription (STRT)',
'genes': Y.columns,
'labels': labels,
'datadf': data,
'infodf': info}, dataset)
def singlecell_rna_seq_deng(dataset='singlecell_deng'):
if not data_available(dataset):
download_data(dataset)
from pandas import read_csv, isnull
dir_path = os.path.join(data_path, dataset)
# read the info .soft
filename = os.path.join(dir_path, 'GSE45719_series_matrix.txt.gz')
info = read_csv(filename, sep='\t', skiprows=0, compression='gzip', header=None, nrows=29, index_col=0)
summary = info.loc['!Series_summary'][1]
design = info.loc['!Series_overall_design']
# only take samples:
sample_info = read_csv(filename, sep='\t', skiprows=30, compression='gzip', header=0, index_col=0).T
sample_info.columns = sample_info.columns.to_series().apply(lambda row: row[len("!Sample_"):])
sample_info.columns.name = sample_info.columns.name[len("!Sample_"):]
sample_info = sample_info[['geo_accession', 'characteristics_ch1', 'description']]
sample_info = sample_info.iloc[:, np.r_[0:4, 5:sample_info.shape[1]]]
c = sample_info.columns.to_series()
c[1:4] = ['strain', 'cross', 'developmental_stage']
sample_info.columns = c
# get the labels right:
rep = re.compile('\(.*\)')
def filter_dev_stage(row):
if isnull(row):
row = "2-cell stage embryo"
if row.startswith("developmental stage: "):
row = row[len("developmental stage: "):]
if row == 'adult':
row += " liver"
row = row.replace(' stage ', ' ')
row = rep.sub(' ', row)
row = row.strip(' ')
return row
labels = sample_info.developmental_stage.apply(filter_dev_stage)
# Extract the tar file
filename = os.path.join(dir_path, 'GSE45719_Raw.tar')
with tarfile.open(filename, 'r') as files:
print("Extracting Archive {}...".format(files.name))
data = None
gene_info = None
message = ''
members = files.getmembers()
overall = len(members)
for i, file_info in enumerate(members):
f = files.extractfile(file_info)
inner = read_csv(f, sep='\t', header=0, compression='gzip', index_col=0)
print(' '*(len(message)+1) + '\r', end=' ')
message = "{: >7.2%}: Extracting: {}".format(float(i+1)/overall, file_info.name[:20]+"...txt.gz")
print(message, end=' ')
if data is None:
data = inner.RPKM.to_frame()
data.columns = [file_info.name[:-18]]
gene_info = inner.Refseq_IDs.to_frame()
gene_info.columns = ['NCBI Reference Sequence']
else:
data[file_info.name[:-18]] = inner.RPKM
#gene_info[file_info.name[:-18]] = inner.Refseq_IDs
# Strip GSM number off data index
rep = re.compile('GSM\d+_')
from pandas import MultiIndex
columns = MultiIndex.from_tuples([row.split('_', 1) for row in data.columns])
columns.names = ['GEO Accession', 'index']
data.columns = columns
data = data.T
# make sure the same index gets used
sample_info.index = data.index
# get the labels from the description
#rep = re.compile('fibroblast|\d+-cell|embryo|liver|early blastocyst|mid blastocyst|late blastocyst|blastomere|zygote', re.IGNORECASE)
sys.stdout.write(' '*len(message) + '\r')
sys.stdout.flush()
print()
print("Read Archive {}".format(files.name))
return data_details_return({'Y': data,
'series_info': info,
'sample_info': sample_info,
'gene_info': gene_info,
'summary': summary,
'design': design,
'genes': data.columns,
'labels': labels,
}, dataset)
def swiss_roll_1000():
return swiss_roll(num_samples=1000)
def swiss_roll(num_samples=3000, data_set='swiss_roll'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'swiss_roll_data.mat'))
Y = mat_data['X_data'][:, 0:num_samples].transpose()
return data_details_return({'Y': Y, 'X': mat_data['X_data'], 'info': "The first " + str(num_samples) + " points from the swiss roll data of Tennenbaum, de Silva and Langford (2001)."}, data_set)
def isomap_faces(num_samples=698, data_set='isomap_face_data'):
if not data_available(data_set):
download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'face_data.mat'))
Y = mat_data['images'][:, 0:num_samples].transpose()
return data_details_return({'Y': Y, 'poses' : mat_data['poses'], 'lights': mat_data['lights'], 'info': "The first " + str(num_samples) + " points from the face data of Tennenbaum, de Silva and Langford (2001)."}, data_set)
def simulation_BGPLVM():
mat_data = scipy.io.loadmat(os.path.join(data_path, 'BGPLVMSimulation.mat'))
Y = np.array(mat_data['Y'], dtype=float)
S = np.array(mat_data['initS'], dtype=float)
mu = np.array(mat_data['initMu'], dtype=float)
#return data_details_return({'S': S, 'Y': Y, 'mu': mu}, data_set)
return {'Y': Y, 'S': S,
'mu' : mu,
'info': "Simulated test dataset generated in MATLAB to compare BGPLVM between python and MATLAB"}
def toy_rbf_1d(seed=default_seed, num_samples=500):
"""
Samples values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1.
:param seed: seed to use for random sampling.
:type seed: int
:param num_samples: number of samples to sample in the function (default 500).
:type num_samples: int
"""
np.random.seed(seed=seed)
num_in = 1
X = np.random.uniform(low= -1.0, high=1.0, size=(num_samples, num_in))
X.sort(axis=0)
rbf = GPy.kern.RBF(num_in, variance=1., lengthscale=np.array((0.25,)))
white = GPy.kern.White(num_in, variance=1e-2)
kernel = rbf + white
K = kernel.K(X)
y = np.reshape(np.random.multivariate_normal(np.zeros(num_samples), K), (num_samples, 1))
return {'X':X, 'Y':y, 'info': "Sampled " + str(num_samples) + " values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1."}
def toy_rbf_1d_50(seed=default_seed):
np.random.seed(seed=seed)
data = toy_rbf_1d()
indices = np.random.permutation(data['X'].shape[0])
indices = indices[0:50]
indices.sort(axis=0)
X = data['X'][indices, :]
Y = data['Y'][indices, :]
return {'X': X, 'Y': Y, 'info': "Subsamples the toy_rbf_sample with 50 values randomly taken from the original sample.", 'seed' : seed}
def toy_linear_1d_classification(seed=default_seed):
np.random.seed(seed=seed)
x1 = np.random.normal(-3, 5, 20)
x2 = np.random.normal(3, 5, 20)
X = (np.r_[x1, x2])[:, None]
return {'X': X, 'Y': sample_class(2.*X), 'F': 2.*X, 'seed' : seed}
def olivetti_glasses(data_set='olivetti_glasses', num_training=200, seed=default_seed):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
y = np.load(os.path.join(path, 'has_glasses.np'))
y = np.where(y=='y',1,0).reshape(-1,1)
faces = scipy.io.loadmat(os.path.join(path, 'olivettifaces.mat'))['faces'].T
np.random.seed(seed=seed)
index = np.random.permutation(faces.shape[0])
X = faces[index[:num_training],:]
Xtest = faces[index[num_training:],:]
Y = y[index[:num_training],:]
Ytest = y[index[num_training:]]
return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed' : seed, 'info': "ORL Faces with labels identifiying who is wearing glasses and who isn't. Data is randomly partitioned according to given seed. Presence or absence of glasses was labelled by James Hensman."}, 'olivetti_faces')
def olivetti_faces(data_set='olivetti_faces'):
path = os.path.join(data_path, data_set)
if not data_available(data_set):
download_data(data_set)
zip = zipfile.ZipFile(os.path.join(path, 'att_faces.zip'), 'r')
for name in zip.namelist():
zip.extract(name, path)
Y = []
lbls = []
for subject in range(40):
for image in range(10):
image_path = os.path.join(path, 'orl_faces', 's'+str(subject+1), str(image+1) + '.pgm')
from GPy.util import netpbmfile
Y.append(netpbmfile.imread(image_path).flatten())
lbls.append(subject)
Y = np.asarray(Y)
lbls = np.asarray(lbls)[:, None]
return data_details_return({'Y': Y, 'lbls' : lbls, 'info': "ORL Faces processed to 64x64 images."}, data_set)
def xw_pen(data_set='xw_pen'):
if not data_available(data_set):
download_data(data_set)
Y = np.loadtxt(os.path.join(data_path, data_set, 'xw_pen_15.csv'), delimiter=',')
X = np.arange(485)[:, None]
return data_details_return({'Y': Y, 'X': X, 'info': "Tilt data from a personalized digital assistant pen. Plot in original paper showed regression between time steps 175 and 275."}, data_set)
def download_rogers_girolami_data(data_set='rogers_girolami_data'):
if not data_available('rogers_girolami_data'):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar_file = os.path.join(path, 'firstcoursemldata.tar.gz')
tar = tarfile.open(tar_file)
print('Extracting file.')
tar.extractall(path=path)
tar.close()
def olympic_100m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male100']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic sprint times for 100 m men from 1896 until 2008. Example is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_100m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female100']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic sprint times for 100 m women from 1896 until 2008. Example is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_200m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female200']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic 200 m winning times for women from 1896 until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_200m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male200']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Male 200 m winning times for women from 1896 until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_400m_women(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['female400']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Olympic 400 m winning times for women until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_400m_men(data_set='rogers_girolami_data'):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'data', 'olympics.mat'))['male400']
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return data_details_return({'X': X, 'Y': Y, 'info': "Male 400 m winning times for women until 2008. Data is from Rogers and Girolami's First Course in Machine Learning."}, data_set)
def olympic_marathon_men(data_set='olympic_marathon_men'):
if not data_available(data_set):
download_data(data_set)
olympics = np.genfromtxt(os.path.join(data_path, data_set, 'olympicMarathonTimes.csv'), delimiter=',')
X = olympics[:, 0:1]
Y = olympics[:, 1:2]
return data_details_return({'X': X, 'Y': Y}, data_set)
def olympic_sprints(data_set='rogers_girolami_data'):
"""All olympics sprint winning times for multiple output prediction."""
X = np.zeros((0, 2))
Y = np.zeros((0, 1))
for i, dataset in enumerate([olympic_100m_men,
olympic_100m_women,
olympic_200m_men,
olympic_200m_women,
olympic_400m_men,
olympic_400m_women]):
data = dataset()
year = data['X']
time = data['Y']
X = np.vstack((X, np.hstack((year, np.ones_like(year)*i))))
Y = np.vstack((Y, time))
data['X'] = X
data['Y'] = Y
data['info'] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning."
return data_details_return({
'X': X,
'Y': Y,
'info': "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
'output_info': {
0:'100m Men',
1:'100m Women',
2:'200m Men',
3:'200m Women',
4:'400m Men',
5:'400m Women'}
}, data_set)
# def movielens_small(partNo=1,seed=default_seed):
# np.random.seed(seed=seed)
# fileName = os.path.join(data_path, 'movielens', 'small', 'u' + str(partNo) + '.base')
# fid = open(fileName)
# uTrain = np.fromfile(fid, sep='\t', dtype=np.int16).reshape((-1, 4))
# fid.close()
# maxVals = np.amax(uTrain, axis=0)
# numUsers = maxVals[0]
# numFilms = maxVals[1]
# numRatings = uTrain.shape[0]
# Y = scipy.sparse.lil_matrix((numFilms, numUsers), dtype=np.int8)
# for i in range(numUsers):
# ind = pb.mlab.find(uTrain[:, 0]==i+1)
# Y[uTrain[ind, 1]-1, i] = uTrain[ind, 2]
# fileName = os.path.join(data_path, 'movielens', 'small', 'u' + str(partNo) + '.test')
# fid = open(fileName)
# uTest = np.fromfile(fid, sep='\t', dtype=np.int16).reshape((-1, 4))
# fid.close()
# numTestRatings = uTest.shape[0]
# Ytest = scipy.sparse.lil_matrix((numFilms, numUsers), dtype=np.int8)
# for i in range(numUsers):
# ind = pb.mlab.find(uTest[:, 0]==i+1)
# Ytest[uTest[ind, 1]-1, i] = uTest[ind, 2]
# lbls = np.empty((1,1))
# lblstest = np.empty((1,1))
# return {'Y':Y, 'lbls':lbls, 'Ytest':Ytest, 'lblstest':lblstest}
def crescent_data(num_data=200, seed=default_seed):
"""
Data set formed from a mixture of four Gaussians. In each class two of the Gaussians are elongated at right angles to each other and offset to form an approximation to the crescent data that is popular in semi-supervised learning as a toy problem.
:param num_data_part: number of data to be sampled (default is 200).
:type num_data: int
:param seed: random seed to be used for data generation.
:type seed: int
"""
np.random.seed(seed=seed)
sqrt2 = np.sqrt(2)
# Rotation matrix
R = np.array([[sqrt2 / 2, -sqrt2 / 2], [sqrt2 / 2, sqrt2 / 2]])
# Scaling matrices
scales = []
scales.append(np.array([[3, 0], [0, 1]]))
scales.append(np.array([[3, 0], [0, 1]]))
scales.append([[1, 0], [0, 3]])
scales.append([[1, 0], [0, 3]])
means = []
means.append(np.array([4, 4]))
means.append(np.array([0, 4]))
means.append(np.array([-4, -4]))
means.append(np.array([0, -4]))
Xparts = []
num_data_part = []
num_data_total = 0
for i in range(0, 4):
num_data_part.append(round(((i + 1) * num_data) / 4.))
num_data_part[i] -= num_data_total
part = np.random.normal(size=(num_data_part[i], 2))
part = np.dot(np.dot(part, scales[i]), R) + means[i]
Xparts.append(part)
num_data_total += num_data_part[i]
X = np.vstack((Xparts[0], Xparts[1], Xparts[2], Xparts[3]))
Y = np.vstack((np.ones((num_data_part[0] + num_data_part[1], 1)), -np.ones((num_data_part[2] + num_data_part[3], 1))))
return {'X':X, 'Y':Y, 'info': "Two separate classes of data formed approximately in the shape of two crescents."}
def creep_data(data_set='creep_rupture'):
"""Brun and Yoshida's metal creep rupture data."""
if not data_available(data_set):
download_data(data_set)
path = os.path.join(data_path, data_set)
tar_file = os.path.join(path, 'creeprupt.tar')
tar = tarfile.open(tar_file)
print('Extracting file.')
tar.extractall(path=path)
tar.close()
all_data = np.loadtxt(os.path.join(data_path, data_set, 'taka'))
y = all_data[:, 1:2].copy()
features = [0]
features.extend(range(2, 31))
X = all_data[:, features].copy()
return data_details_return({'X': X, 'y': y}, data_set)
def cifar10_patches(data_set='cifar-10'):
"""The Candian Institute for Advanced Research 10 image data set. Code for loading in this data is taken from this Boris Babenko's blog post, original code available here: http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of-code"""
dir_path = os.path.join(data_path, data_set)
filename = os.path.join(dir_path, 'cifar-10-python.tar.gz')
if not data_available(data_set):
download_data(data_set)
import tarfile
# This code is from Boris Babenko's blog post.
# http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of-code
tfile = tarfile.open(filename, 'r:gz')
tfile.extractall(dir_path)
with open(os.path.join(dir_path, 'cifar-10-batches-py','data_batch_1'),'rb') as f:
data = pickle.load(f)
images = data['data'].reshape((-1,3,32,32)).astype('float32')/255
images = np.rollaxis(images, 1, 4)
patches = np.zeros((0,5,5,3))
for x in range(0,32-5,5):
for y in range(0,32-5,5):
patches = np.concatenate((patches, images[:,x:x+5,y:y+5,:]), axis=0)
patches = patches.reshape((patches.shape[0],-1))
return data_details_return({'Y': patches, "info" : "32x32 pixel patches extracted from the CIFAR-10 data by Boris Babenko to demonstrate k-means features."}, data_set)
def cmu_mocap_49_balance(data_set='cmu_mocap'):
"""Load CMU subject 49's one legged balancing motion that was used by Alvarez, Luengo and Lawrence at AISTATS 2009."""
train_motions = ['18', '19']
test_motions = ['20']
data = cmu_mocap('49', train_motions, test_motions, sample_every=4, data_set=data_set)
data['info'] = "One legged balancing motions from CMU data base subject 49. As used in Alvarez, Luengo and Lawrence at AISTATS 2009. It consists of " + data['info']
return data
def cmu_mocap_35_walk_jog(data_set='cmu_mocap'):
"""Load CMU subject 35's walking and jogging motions, the same data that was used by Taylor, Roweis and Hinton at NIPS 2007. but without their preprocessing. Also used by Lawrence at AISTATS 2007."""
train_motions = ['01', '02', '03', '04', '05', '06',
'07', '08', '09', '10', '11', '12',
'13', '14', '15', '16', '17', '19',
'20', '21', '22', '23', '24', '25',
'26', '28', '30', '31', '32', '33', '34']
test_motions = ['18', '29']
data = cmu_mocap('35', train_motions, test_motions, sample_every=4, data_set=data_set)
data['info'] = "Walk and jog data from CMU data base subject 35. As used in Tayor, Roweis and Hinton at NIPS 2007, but without their pre-processing (i.e. as used by Lawrence at AISTATS 2007). It consists of " + data['info']
return data
def cmu_mocap(subject, train_motions, test_motions=[], sample_every=4, data_set='cmu_mocap'):
"""Load a given subject's training and test motions from the CMU motion capture data."""
# Load in subject skeleton.
subject_dir = os.path.join(data_path, data_set)
# Make sure the data is downloaded.
all_motions = train_motions + test_motions
resource = cmu_urls_files(([subject], [all_motions]))
data_resources[data_set] = data_resources['cmu_mocap_full'].copy()
data_resources[data_set]['files'] = resource['files']
data_resources[data_set]['urls'] = resource['urls']
if resource['urls']:
download_data(data_set)
skel = GPy.util.mocap.acclaim_skeleton(os.path.join(subject_dir, subject + '.asf'))
# Set up labels for each sequence
exlbls = np.eye(len(train_motions))
# Load sequences
tot_length = 0
temp_Y = []
temp_lbls = []
for i in range(len(train_motions)):
temp_chan = skel.load_channels(os.path.join(subject_dir, subject + '_' + train_motions[i] + '.amc'))
temp_Y.append(temp_chan[::sample_every, :])
temp_lbls.append(np.tile(exlbls[i, :], (temp_Y[i].shape[0], 1)))
tot_length += temp_Y[i].shape[0]
Y = np.zeros((tot_length, temp_Y[0].shape[1]))
lbls = np.zeros((tot_length, temp_lbls[0].shape[1]))
end_ind = 0
for i in range(len(temp_Y)):
start_ind = end_ind
end_ind += temp_Y[i].shape[0]
Y[start_ind:end_ind, :] = temp_Y[i]
lbls[start_ind:end_ind, :] = temp_lbls[i]
if len(test_motions) > 0:
temp_Ytest = []
temp_lblstest = []
testexlbls = np.eye(len(test_motions))
tot_test_length = 0
for i in range(len(test_motions)):
temp_chan = skel.load_channels(os.path.join(subject_dir, subject + '_' + test_motions[i] + '.amc'))
temp_Ytest.append(temp_chan[::sample_every, :])
temp_lblstest.append(np.tile(testexlbls[i, :], (temp_Ytest[i].shape[0], 1)))
tot_test_length += temp_Ytest[i].shape[0]
# Load test data
Ytest = np.zeros((tot_test_length, temp_Ytest[0].shape[1]))
lblstest = np.zeros((tot_test_length, temp_lblstest[0].shape[1]))
end_ind = 0
for i in range(len(temp_Ytest)):
start_ind = end_ind
end_ind += temp_Ytest[i].shape[0]
Ytest[start_ind:end_ind, :] = temp_Ytest[i]
lblstest[start_ind:end_ind, :] = temp_lblstest[i]
else:
Ytest = None
lblstest = None
info = 'Subject: ' + subject + '. Training motions: '
for motion in train_motions:
info += motion + ', '
info = info[:-2]
if len(test_motions) > 0:
info += '. Test motions: '
for motion in test_motions:
info += motion + ', '
info = info[:-2] + '.'
else:
info += '.'
if sample_every != 1:
info += ' Data is sub-sampled to every ' + str(sample_every) + ' frames.'
return data_details_return({'Y': Y, 'lbls' : lbls, 'Ytest': Ytest, 'lblstest' : lblstest, 'info': info, 'skel': skel}, data_set)
|
befelix/GPy
|
GPy/util/datasets.py
|
Python
|
bsd-3-clause
| 65,186
|
import urllib2
url = 'http://spark-public.s3.amazonaws.com/algo2/datasets/jobs.txt'
response = urllib2.urlopen(url)
data = response.read()
data = data.split('\n')
data = data[1:-1]
data = [x.split(' ') for x in data]
data = [ [ float(x[0]),float(x[1]) ] for x in data]
# deal with data
temp = [[] for x in data]
ratio = [x[0]/x[1] for x in data]
dic = dict(zip(ratio,temp))
for ind,item in enumerate(data):
dic[ratio[ind]].append(item)
ratio_keys = sorted(dic.keys(),reverse=True)
wlen = 0
len = 0
for ratio_item in ratio_keys:
for item in dic[ratio_item]:
len += item[1]
wlen += item[0] * len
print len,wlen
|
ayst123/mooc
|
algorithem2/programming1/b.py
|
Python
|
bsd-2-clause
| 630
|
from django.db import models
from django.core.validators import MinValueValidator, \
MaxValueValidator
class Coupon(models.Model):
code = models.CharField(max_length=50, unique=True)
valid_from = models.DateTimeField()
valid_to = models.DateTimeField()
discount = models.IntegerField(
validators=[MinValueValidator(0),
MaxValueValidator(100)])
active = models.BooleanField()
def __str__(self):
return self.code
|
PikaDm/clave-online-shop-template
|
coupons/models.py
|
Python
|
mit
| 525
|
'''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Ecs20140526ModifyInstanceAttributeRequest(RestApi):
def __init__(self,domain='ecs.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.Description = None
self.HostName = None
self.InstanceId = None
self.InstanceName = None
self.Password = None
def getapiname(self):
return 'ecs.aliyuncs.com.ModifyInstanceAttribute.2014-05-26'
|
francisar/rds_manager
|
aliyun/api/rest/Ecs20140526ModifyInstanceAttributeRequest.py
|
Python
|
mit
| 440
|
# -*- coding: utf-8 -*-
#
# pandas-gbq documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 8 10:52:12 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.todo',
'numpydoc', # used to parse numpy-style docstrings for autodoc
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pandas-gbq'
copyright = u'2017, PyData Development Team'
author = u'PyData Development Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1.0'
# The full version, including alpha/beta/rc tags.
release = u'0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# Taken from docs.readthedocs.io:
# on_rtd is whether we are on readthedocs.io
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'pandas-gbq v0.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pandas-gbqdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pandas-gbq.tex', u'pandas-gbq Documentation',
u'PyData Development Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pandas-gbq', u'pandas-gbq Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pandas-gbq', u'pandas-gbq Documentation',
author, 'pandas-gbq', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
extlinks = {'issue': ('https://github.com/pydata/pandas-gbq/issues/%s',
'GH#'),
'pr': ('https://github.com/pydata/pandas-gbq/pull/%s', 'GH#')}
|
RTBHOUSE/pandas-gbq
|
docs/source/conf.py
|
Python
|
bsd-3-clause
| 10,984
|
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Needed to avoid ambiguity in imports
from __future__ import absolute_import
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.api.labs import taskqueue
import logging
import os.path
import yaml
import time
import random
import re
import oauth
import buzz
import web.helper
import models.tokens
import models.board
OAUTH_CONFIG = yaml.load(open('oauth.yaml').read())
OAUTH_CONSUMER_KEY = OAUTH_CONFIG['oauth_consumer_key']
OAUTH_CONSUMER_SECRET = OAUTH_CONFIG['oauth_consumer_secret']
OAUTH_TOKEN_KEY = OAUTH_CONFIG['oauth_token_key']
OAUTH_TOKEN_SECRET = OAUTH_CONFIG['oauth_token_secret']
PRIORITY_PROFILES = yaml.load(open('polling.yaml').read())
BUZZ_BINGO_ID = '103831860836738334913'
class PollHandler(webapp.RequestHandler):
@property
def client(self):
if not hasattr(self, '_client') or not self._client:
access_token = oauth.OAuthToken(OAUTH_TOKEN_KEY, OAUTH_TOKEN_SECRET)
self._client = buzz.Client()
self._client.build_oauth_consumer(
OAUTH_CONSUMER_KEY, OAUTH_CONSUMER_SECRET
)
self._client.oauth_access_token = access_token
self._client.oauth_scopes.append(buzz.FULL_ACCESS_SCOPE)
return self._client
@property
def combined_results(self):
if not hasattr(self, '_combined_results') or not self._combined_results:
self._combined_results = []
try:
# Ignore the Buzz Bingo game itself
for post in self.client.posts(type_id='@consumption'):
if post.actor.id != BUZZ_BINGO_ID:
self._combined_results.append(post)
for post in self.client.search(query="buzzbingo"):
if post.actor.id != BUZZ_BINGO_ID:
self._combined_results.append(post)
except buzz.RetrieveError, e:
logging.warning(str(e))
logging.info('%d posts will be scored.' % len(self._combined_results))
return self._combined_results
def get(self):
cron = False
if self.request.headers.get('X-AppEngine-Cron') == 'true':
cron = True
elif self.request.headers.get('Referer') and \
self.request.headers.get('Referer').find('/_ah/admin/cron') != -1:
cron = True
if cron:
try:
result_task = taskqueue.Task(url='/worker/poll/')
result_task.add()
logging.info('Polling task enqueued...')
except (taskqueue.TaskAlreadyExistsError, taskqueue.TombstonedTaskError), e:
logging.error(str(e))
result_task = None
template_values = {
'http_get': True,
'message': None
}
path = os.path.join(
os.path.dirname(__file__), '..', 'templates', 'poll.html'
)
self.response.out.write(template.render(path, template_values))
def scan_post(self, post_id):
logging.info('Scanning post: %s' % post_id)
topics_found = set([])
players = set([])
nonexistent_players = set([])
ignored_players = set([])
scoring_players = set([])
post = self.client.post(post_id).data
if post.actor.id == BUZZ_BINGO_ID:
return None
post_uri = post.uri
comments = post.comments()
retrieved_comments = []
post_content = post.content.lower()
post_content = re.sub('<br />|\\r|\\n', ' ', post_content)
# Avoid false positive
post_content = re.sub('buzz ?bingo', 'BUZZBINGO', post_content)
if post_content.find('BUZZBINGO') != -1:
players.add(post.actor.id)
for topic in models.board.TOPIC_LIST:
if post_content.find(topic.lower()) != -1:
topics_found.add(topic)
if post_content.find('taco'.lower()) != -1:
topics_found.add('taco')
for comment in comments:
# Need to avoid making unnecessary HTTP requests
retrieved_comments.append(comment)
comment_content = comment.content.lower()
comment_content = re.sub('<br />|\\r|\\n', ' ', comment_content)
# Avoid false positive
comment_content = re.sub('buzz ?bingo', 'BUZZBINGO', comment_content)
if comment_content.find('BUZZBINGO') != -1:
players.add(comment.actor.id)
for topic in models.board.TOPIC_LIST:
if comment_content.find(topic.lower()) != -1:
topics_found.add(topic)
if comment_content.find('taco'.lower()) != -1:
topics_found.add('taco')
for player_id in players:
player = models.player.Player.get_by_key_name(player_id)
if player:
intersection = [
topic for topic in player.topics if topic in topics_found
]
if player.has_post_scored(post_id):
logging.info("Player already scored this.")
# Sometimes a bingo gets missed by retrying a transaction
db.run_in_transaction(player.verify_bingo)
elif intersection:
scoring_players.add(player)
scoring_topic = random.choice(intersection)
db.run_in_transaction(
player.score_post, post, scoring_topic
)
# Can't be run in the transaction, hopefully there won't be
# any nasty race conditions
player.award_leader_badge()
else:
ignored_players.add(player)
else:
nonexistent_players.add(player_id)
# Lots of logging, because this turns out to be tricky to get right.
topics_log_message = 'Topics found:\n'
for topic in topics_found:
topics_log_message += topic + '\n'
logging.info(topics_log_message)
scoring_log_message = 'Players scoring:\n'
for player in scoring_players:
scoring_log_message += '%s\n' % repr(player)
logging.info(scoring_log_message)
ignored_log_message = 'Players ignored and not scoring:\n'
for player in ignored_players:
ignored_log_message += '%s\n' % repr(player)
logging.info(ignored_log_message)
nonexistent_log_message = 'Players who might score if they signed up:\n'
for player_id in nonexistent_players:
nonexistent_log_message += '%s\n' % player_id
logging.info(nonexistent_log_message)
def post(self):
post_id = self.request.get('post_id')
message = ''
if post_id:
self.scan_post(post_id)
else:
for result in self.combined_results:
try:
if result.actor.profile_name in PRIORITY_PROFILES:
# Give priority access to profiles used in any demo.
countdown = 0
logging.info('Priority scan: %s' % result.id)
else:
# One second delay for everyone else, which should be fine.
countdown = 1
result_task = taskqueue.Task(
name="%s-%d" % (result.id[25:], int(time.time())),
params={
'post_id': result.id
},
url='/worker/poll/',
countdown=countdown
)
result_task.add()
logging.info('Scanning task enqueued: %s' % result.id)
except (taskqueue.TaskAlreadyExistsError, taskqueue.TombstonedTaskError), e:
logging.error(str(e))
result_task = None
message = 'Retrieved %d posts.' % len(self.combined_results)
template_values = {
'http_get': False,
'message': message
}
path = os.path.join(
os.path.dirname(__file__), '..', 'templates', 'poll.html'
)
self.response.out.write(template.render(path, template_values))
|
brianrock/brianrock-ringo
|
handlers/poll.py
|
Python
|
apache-2.0
| 7,906
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module define a WulffShape class to generate the Wulff shape from
a lattice, a list of indices and their corresponding surface energies,
and the total area and volume of the wulff shape,the weighted surface energy,
the anisotropy and shape_factor can also be calculated.
In support of plotting from a given view in terms of miller index.
The lattice is from the conventional unit cell, and (hkil) for hexagonal
lattices.
If you use this code extensively, consider citing the following:
Tran, R.; Xu, Z.; Radhakrishnan, B.; Winston, D.; Persson, K. A.; Ong, S. P.
(2016). Surface energies of elemental crystals. Scientific Data.
"""
from __future__ import division, unicode_literals
from pymatgen.core.structure import Structure
from pymatgen.core.surface import get_recp_symmetry_operation
from pymatgen.util.coord import get_angle
import numpy as np
import scipy as sp
from scipy.spatial import ConvexHull
import logging
__author__ = 'Zihan Xu, Richard Tran, Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Zihan Xu'
__email__ = 'zix009@eng.ucsd.edu'
__date__ = 'May 5 2016'
logger = logging.getLogger(__name__)
def hkl_tuple_to_str(hkl):
"""
Prepare for display on plots
"(hkl)" for surfaces
Agrs:
hkl: in the form of [h, k, l] or (h, k, l)
"""
str_format = '($'
for x in hkl:
if x < 0:
str_format += '\\overline{' + str(-x) + '}'
else:
str_format += str(x)
str_format += '$)'
return str_format
def get_tri_area(pts):
"""
Given a list of coords for 3 points,
Compute the area of this triangle.
Args:
pts: [a, b, c] three points
"""
a, b, c = pts[0], pts[1], pts[2]
v1 = np.array(b) - np.array(a)
v2 = np.array(c) - np.array(a)
area_tri = abs(sp.linalg.norm(sp.cross(v1, v2)) / 2)
return area_tri
class WulffFacet(object):
"""
Helper container for each Wulff plane.
"""
def __init__(self, normal, e_surf, normal_pt, dual_pt, index, m_ind_orig,
miller):
self.normal = normal
self.e_surf = e_surf
self.normal_pt = normal_pt
self.dual_pt = dual_pt
self.index = index
self.m_ind_orig = m_ind_orig
self.miller = miller
self.points = []
self.outer_lines = []
class WulffShape(object):
"""
Generate Wulff Shape from list of miller index and surface energies,
with given conventional unit cell.
surface energy (Jm^2) is the length of normal.
Wulff shape is the convex hull.
Based on:
http://scipy.github.io/devdocs/generated/scipy.spatial.ConvexHull.html
Process:
1. get wulff simplices
2. label with color
3. get wulff_area and other properties
.. attribute:: debug (bool)
.. attribute:: alpha
transparency
.. attribute:: color_set
.. attribute:: grid_off (bool)
.. attribute:: axis_off (bool)
.. attribute:: show_area
.. attribute:: off_color
color of facets off wulff
.. attribute:: structure
Structure object, input conventional unit cell (with H ) from lattice
.. attribute:: miller_list
list of input miller index, for hcp in the form of hkil
.. attribute:: hkl_list
modify hkill to hkl, in the same order with input_miller
.. attribute:: e_surf_list
list of input surface energies, in the same order with input_miller
.. attribute:: lattice
Lattice object, the input lattice for the conventional unit cell
.. attribute:: facets
[WulffFacet] for all facets considering symm
.. attribute:: dual_cv_simp
simplices from the dual convex hull (dual_pt)
.. attribute:: wulff_pt_list
.. attribute:: wulff_cv_simp
simplices from the convex hull of wulff_pt_list
.. attribute:: on_wulff
list for all input_miller, True is on wulff.
.. attribute:: color_area
list for all input_miller, total area on wulff, off_wulff = 0.
.. attribute:: miller_area
($hkl$): area for all input_miller
"""
def __init__(self, lattice, miller_list, e_surf_list, symprec=1e-5):
"""
Args:
lattice: Lattice object of the conventional unit cell
miller_list ([(hkl), ...]: list of hkl or hkil for hcp
e_surf_list ([float]): list of corresponding surface energies
symprec (float): for recp_operation, default is 1e-5.
"""
self.color_ind = list(range(len(miller_list)))
self.input_miller_fig = [hkl_tuple_to_str(x) for x in miller_list]
# store input data
self.structure = Structure(lattice, ["H"], [[0, 0, 0]])
self.miller_list = tuple([tuple(x) for x in miller_list])
self.hkl_list = tuple([(x[0], x[1], x[-1]) for x in miller_list])
self.e_surf_list = tuple(e_surf_list)
self.lattice = lattice
self.symprec = symprec
# 2. get all the data for wulff construction
# get all the surface normal from get_all_miller_e()
self.facets = self._get_all_miller_e()
logger.debug(len(self.facets))
# 3. consider the dual condition
dual_pts = [x.dual_pt for x in self.facets]
dual_convex = ConvexHull(dual_pts)
dual_cv_simp = dual_convex.simplices
# simplices (ndarray of ints, shape (nfacet, ndim))
# list of [i, j, k] , ndim = 3
# i, j, k: ind for normal_e_m
# recalculate the dual of dual, get the wulff shape.
# conner <-> surface
# get cross point from the simplices of the dual convex hull
wulff_pt_list = [self._get_cross_pt_dual_simp(dual_simp)
for dual_simp in dual_cv_simp]
wulff_convex = ConvexHull(wulff_pt_list)
wulff_cv_simp = wulff_convex.simplices
logger.debug(", ".join([str(len(x)) for x in wulff_cv_simp]))
# store simplices and convex
self.dual_cv_simp = dual_cv_simp
self.wulff_pt_list = wulff_pt_list
self.wulff_cv_simp = wulff_cv_simp
self.wulff_convex = wulff_convex
self.on_wulff, self.color_area = self._get_simpx_plane()
miller_area = []
for m, in_mill_fig in enumerate(self.input_miller_fig):
miller_area.append(
in_mill_fig + ' : ' + str(round(self.color_area[m], 4)))
self.miller_area = miller_area
def _get_all_miller_e(self):
"""
from self:
get miller_list(unique_miller), e_surf_list and symmetry
operations(symmops) according to lattice
apply symmops to get all the miller index, then get normal,
get all the facets functions for wulff shape calculation:
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
normal[0]x + normal[1]y + normal[2]z = e_surf
return:
[WulffFacet]
"""
all_hkl = []
color_ind = self.color_ind
planes = []
recp = self.structure.lattice.reciprocal_lattice_crystallographic
recp_symmops = get_recp_symmetry_operation(self.structure, self.symprec)
for i, (hkl, energy) in enumerate(zip(self.hkl_list,
self.e_surf_list)):
for op in recp_symmops:
miller = tuple([int(x) for x in op.operate(hkl)])
if miller not in all_hkl:
all_hkl.append(miller)
normal = recp.get_cartesian_coords(miller)
normal /= sp.linalg.norm(normal)
normal_pt = [x * energy for x in normal]
dual_pt = [x / energy for x in normal]
color_plane = color_ind[divmod(i, len(color_ind))[1]]
planes.append(WulffFacet(normal, energy, normal_pt,
dual_pt, color_plane, i, hkl))
# sort by e_surf
planes.sort(key=lambda x: x.e_surf)
return planes
def _get_cross_pt_dual_simp(self, dual_simp):
"""
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
plane function:
normal[0]x + normal[1]y + normal[2]z = e_surf
from self:
normal_e_m to get the plane functions
dual_simp: (i, j, k) simplices from the dual convex hull
i, j, k: plane index(same order in normal_e_m)
"""
matrix_surfs = [self.facets[dual_simp[i]].normal for i in range(3)]
matrix_e = [self.facets[dual_simp[i]].e_surf for i in range(3)]
cross_pt = sp.dot(sp.linalg.inv(matrix_surfs), matrix_e)
return cross_pt
def _get_simpx_plane(self):
"""
Locate the plane for simpx of on wulff_cv, by comparing the center of
the simpx triangle with the plane functions.
"""
on_wulff = [False] * len(self.miller_list)
surface_area = [0.0] * len(self.miller_list)
for simpx in self.wulff_cv_simp:
pts = [self.wulff_pt_list[simpx[i]] for i in range(3)]
center = np.sum(pts, 0) / 3.0
# check whether the center of the simplices is on one plane
for plane in self.facets:
abs_diff = abs(np.dot(plane.normal, center) - plane.e_surf)
if abs_diff < 1e-5:
on_wulff[plane.index] = True
surface_area[plane.index] += get_tri_area(pts)
plane.points.append(pts)
plane.outer_lines.append([simpx[0], simpx[1]])
plane.outer_lines.append([simpx[1], simpx[2]])
plane.outer_lines.append([simpx[0], simpx[2]])
# already find the plane, move to the next simplices
break
for plane in self.facets:
plane.outer_lines.sort()
plane.outer_lines = [line for line in plane.outer_lines
if plane.outer_lines.count(line) != 2]
return on_wulff, surface_area
def _get_colors(self, color_set, alpha, off_color, custom_colors={}):
"""
assign colors according to the surface energies of on_wulff facets.
return:
(color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff,
e_surf_on_wulff_list)
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
color_list = [off_color] * len(self.hkl_list)
color_proxy_on_wulff = []
miller_on_wulff = []
e_surf_on_wulff = [(i, e_surf)
for i, e_surf in enumerate(self.e_surf_list)
if self.on_wulff[i]]
c_map = plt.get_cmap(color_set)
e_surf_on_wulff.sort(key=lambda x: x[1], reverse=False)
e_surf_on_wulff_list = [x[1] for x in e_surf_on_wulff]
if len(e_surf_on_wulff) > 1:
cnorm = mpl.colors.Normalize(vmin=min(e_surf_on_wulff_list),
vmax=max(e_surf_on_wulff_list))
else:
# if there is only one hkl on wulff, choose the color of the median
cnorm = mpl.colors.Normalize(vmin=min(e_surf_on_wulff_list) - 0.1,
vmax=max(e_surf_on_wulff_list) + 0.1)
scalar_map = mpl.cm.ScalarMappable(norm=cnorm, cmap=c_map)
for i, e_surf in e_surf_on_wulff:
color_list[i] = scalar_map.to_rgba(e_surf, alpha=alpha)
if tuple(self.miller_list[i]) in custom_colors.keys():
color_list[i] = custom_colors[tuple(self.miller_list[i])]
color_proxy_on_wulff.append(
plt.Rectangle((2, 2), 1, 1, fc=color_list[i], alpha=alpha))
miller_on_wulff.append(self.input_miller_fig[i])
scalar_map.set_array([x[1] for x in e_surf_on_wulff])
color_proxy = [plt.Rectangle((2, 2), 1, 1, fc=x, alpha=alpha)
for x in color_list]
return color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff, \
e_surf_on_wulff_list
def show(self, *args, **kwargs):
"""
Show the Wulff plot.
Args:
\\*args: Passed to get_plot.
\\*\\*kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def get_plot(self, color_set='PuBu', grid_off=True, axis_off=True,
show_area=False, alpha=1, off_color='red', direction=None,
bar_pos=(0.75, 0.15, 0.05, 0.65), bar_on=False,
legend_on=True, aspect_ratio=(8, 8), custom_colors={}):
"""
Get the Wulff shape plot.
Args:
color_set: default is 'PuBu'
grid_off (bool): default is True
axis_off (bool): default is Ture
show_area (bool): default is False
alpha (float): chosen from 0 to 1 (float), default is 1
off_color: Default color for facets not present on the Wulff shape.
direction: default is (1, 1, 1)
bar_pos: default is [0.75, 0.15, 0.05, 0.65]
bar_on (bool): default is False
legend_on (bool): default is True
aspect_ratio: default is (8, 8)
custom_colors ({(h,k,l}: [r,g,b,alpha}): Customize color of each
facet with a dictionary. The key is the corresponding Miller
index and value is the color. Undefined facets will use default
color site. Note: If you decide to set your own colors, it
probably won't make any sense to have the color bar on.
Return:
(matplotlib.pyplot)
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as mpl3
color_list, color_proxy, color_proxy_on_wulff, \
miller_on_wulff, e_surf_on_wulff = self._get_colors(
color_set, alpha, off_color, custom_colors=custom_colors)
if not direction:
# If direction is not specified, use the miller indices of
# maximum area.
direction = max(self.area_fraction_dict.items(),
key=lambda x: x[1])[0]
fig = plt.figure()
fig.set_size_inches(aspect_ratio[0], aspect_ratio[1])
azim, elev = self._get_azimuth_elev([direction[0], direction[1],
direction[-1]])
wulff_pt_list = self.wulff_pt_list
ax = mpl3.Axes3D(fig, azim=azim, elev=elev)
for plane in self.facets:
# check whether [pts] is empty
if len(plane.points) < 1:
# empty, plane is not on_wulff.
continue
# assign the color for on_wulff facets according to its
# index and the color_list for on_wulff
plane_color = color_list[plane.index]
lines = list(plane.outer_lines)
pt = []
prev = None
while len(lines) > 0:
if prev is None:
l = lines.pop(0)
else:
for i, l in enumerate(lines):
if prev in l:
l = lines.pop(i)
if l[1] == prev:
l.reverse()
break
# make sure the lines are connected one by one.
# find the way covering all pts and facets
pt.append(self.wulff_pt_list[l[0]].tolist())
pt.append(self.wulff_pt_list[l[1]].tolist())
prev = l[1]
# plot from the sorted pts from [simpx]
tri = mpl3.art3d.Poly3DCollection([pt])
tri.set_color(plane_color)
tri.set_edgecolor("#808080")
ax.add_collection3d(tri)
# set ranges of x, y, z
# find the largest distance between on_wulff pts and the origin,
# to ensure complete and consistent display for all directions
r_range = max([np.linalg.norm(x) for x in wulff_pt_list])
ax.set_xlim([-r_range * 1.1, r_range * 1.1])
ax.set_ylim([-r_range * 1.1, r_range * 1.1])
ax.set_zlim([-r_range * 1.1, r_range * 1.1])
# add legend
if legend_on:
color_proxy = color_proxy
if show_area:
ax.legend(color_proxy, self.miller_area, loc='upper left',
bbox_to_anchor=(0, 1), fancybox=True, shadow=False)
else:
ax.legend(color_proxy_on_wulff, miller_on_wulff,
loc='upper center',
bbox_to_anchor=(0.5, 1), ncol=3, fancybox=True,
shadow=False)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Add colorbar
if bar_on:
cmap = plt.get_cmap(color_set)
cmap.set_over('0.25')
cmap.set_under('0.75')
bounds = [round(e, 2) for e in e_surf_on_wulff]
bounds.append(1.2 * bounds[-1])
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# display surface energies
ax1 = fig.add_axes(bar_pos)
cbar = mpl.colorbar.ColorbarBase(
ax1, cmap=cmap, norm=norm, boundaries=[0] + bounds + [10],
extend='both', ticks=bounds[:-1], spacing='proportional',
orientation='vertical')
cbar.set_label('Surface Energies ($J/m^2$)', fontsize=100)
if grid_off:
ax.grid('off')
if axis_off:
ax.axis('off')
return plt
def _get_azimuth_elev(self, miller_index):
"""
Args:
miller_index: viewing direction
Returns:
azim, elev for plotting
"""
if miller_index == (0, 0, 1) or miller_index == (0, 0, 0, 1):
return 0, 90
else:
cart = self.lattice.get_cartesian_coords(miller_index)
azim = get_angle([cart[0], cart[1], 0], (1, 0, 0))
v = [cart[0], cart[1], 0]
elev = get_angle(cart, v)
return azim, elev
@property
def volume(self):
"""
Volume of the Wulff shape
"""
return self.wulff_convex.volume
@property
def miller_area_dict(self):
"""
Returns {hkl: area_hkl on wulff}
"""
return dict(zip(self.miller_list, self.color_area))
@property
def miller_energy_dict(self):
"""
Returns {hkl: surface energy_hkl}
"""
return dict(zip(self.miller_list, self.e_surf_list))
@property
def surface_area(self):
"""
Total surface area of Wulff shape.
"""
return sum(self.miller_area_dict.values())
@property
def weighted_surface_energy(self):
"""
Returns:
sum(surface_energy_hkl * area_hkl)/ sum(area_hkl)
"""
return self.total_surface_energy / self.surface_area
@property
def area_fraction_dict(self):
"""
Returns:
(dict): {hkl: area_hkl/total area on wulff}
"""
return {hkl: self.miller_area_dict[hkl] / self.surface_area
for hkl in self.miller_area_dict.keys()}
@property
def anisotropy(self):
"""
Returns:
(float) Coefficient of Variation from weighted surface energy
The ideal sphere is 0.
"""
square_diff_energy = 0
weighted_energy = self.weighted_surface_energy
area_frac_dict = self.area_fraction_dict
miller_energy_dict = self.miller_energy_dict
for hkl in miller_energy_dict.keys():
square_diff_energy += (miller_energy_dict[hkl] - weighted_energy)\
** 2 * area_frac_dict[hkl]
return np.sqrt(square_diff_energy) / weighted_energy
@property
def shape_factor(self):
"""
This is useful for determining the critical nucleus size.
A large shape factor indicates great anisotropy.
See Ballufi, R. W., Allen, S. M. & Carter, W. C. Kinetics
of Materials. (John Wiley & Sons, 2005), p.461
Returns:
(float) Shape factor.
"""
return self.surface_area / (self.volume ** (2 / 3))
@property
def effective_radius(self):
"""
Radius of the Wulffshape when the
Wulffshape is approximated as a sphere.
Returns:
(float) radius.
"""
return ((3/4)*(self.volume/np.pi)) ** (1 / 3)
@property
def total_surface_energy(self):
"""
Total surface energy of the Wulff shape.
Returns:
(float) sum(surface_energy_hkl * area_hkl)
"""
tot_surface_energy = 0
for hkl in self.miller_energy_dict.keys():
tot_surface_energy += self.miller_energy_dict[hkl] * \
self.miller_area_dict[hkl]
return tot_surface_energy
|
czhengsci/pymatgen
|
pymatgen/analysis/wulff.py
|
Python
|
mit
| 21,287
|
import unittest
import mox
import pyclut
from pyclut.engine.user_interface import UserInterface
class UserInterfaceTestCase(unittest.TestCase):
"""UserInterface unit test cases"""
def setUp(self):
self.mox_factory = mox.Mox()
self._mock_config = self.mox_factory.CreateMockAnything()
def tearDown(self):
self.mox_factory.VerifyAll()
def test_ui_creation_01(self):
"""UserInterface : creation, check default values"""
default_resolution = (1024, 768)
self.mox_factory.ReplayAll()
ui = UserInterface()
self.failUnlessEqual(default_resolution, ui.get_resolution())
self.failUnlessEqual({}, ui.get_screens())
self.failUnlessEqual(0, ui.get_nb_screen())
def test_ui_creation_02(self):
"""UserInterface : creation, check attributes given in constructor"""
resolution = (800, 600)
screen_names = ["first", "second", "third"]
screens = {}
for name in screen_names:
screens[name] = self.mox_factory.CreateMockAnything()
self.mox_factory.ReplayAll()
ui = UserInterface(
resolution = resolution,
screens = screens,
)
self.failUnlessEqual(resolution, ui.get_resolution())
self.failUnlessEqual(screens, ui.get_screens())
self.failUnlessEqual(len(screen_names), ui.get_nb_screen())
def __prepare_ui_with_config(self, resolution=(800, 600), screens=None):
self._mock_config.get_resolution().AndReturn(resolution)
self._mock_config.get_screens().AndReturn(screens)
mox.Replay(self._mock_config)
ui = UserInterface(
config = self._mock_config,
)
mox.Reset(self._mock_config)
return ui
def test_ui_creation_03(self):
"""UserInterface : creation, check with configuration"""
resolution = (800, 600)
screen_names = ["first", "second", "third"]
screens = {}
for name in screen_names:
screens[name] = "something" #self.mox_factory.CreateMockAnything()
ui = self.__prepare_ui_with_config(resolution, screens)
self.failUnlessEqual(resolution, ui.get_resolution())
self.failUnlessEqual(screens, ui.get_screens())
self.failUnlessEqual(len(screen_names), ui.get_nb_screen())
def test_screen_creation_01(self):
"""UserInterface : create screen from config"""
resolution = (800, 600)
screen_names = ["first", "second"]
screens = {}
for name in screen_names:
screens[name] = self.mox_factory.CreateMockAnything()
ui = self.__prepare_ui_with_config(resolution, screens)
if __name__ == "__main__":
unittest.main()
|
ericcolleu/pyclutter-widgets
|
pyclut/engine/test/user_interface_test.py
|
Python
|
lgpl-2.1
| 2,406
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2014, Christoph Gohlke
# Copyright (c) 2008-2014, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG and CCITT compressed image data or EXIF, IPTC, GPS,
and XMP metadata is not implemented.
Only primary info records are read for STK, FluoView, MicroManager, and
NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SGI, SEQ, GEL,
and OME-TIFF, are custom extensions defined by Molecular Devices (Universal
Imaging Corporation), Carl Zeiss MicroImaging, Olympus, Silicon Graphics
International, Media Cybernetics, Molecular Dynamics, and the Open Microscopy
Environment consortium respectively.
For command line usage run ``python tifffile.py --help``
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2014.08.24
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.8.2 <http://www.numpy.org>`_
* `Matplotlib 1.4 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2013.11.05 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `Imread <http://luispedro.org/software/imread>`_
* `PyLibTiff <http://code.google.com/p/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_ (same as fabio.TiffIO)
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `CellProfiler.bioformats
<https://github.com/CellProfiler/python-bioformats>`_
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis for a bug fix and some read_cz_lsm functions.
* Hadrien Mary for help on reading MicroManager files.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010).
Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011
(5) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
(8) Micro-Manager File Formats.
http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
(9) Tags for TIFF and Related Specifications. Digital Preservation.
http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
Examples
--------
>>> data = numpy.random.rand(5, 301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> numpy.testing.assert_array_equal(image, data)
>>> with TiffFile('temp.tif') as tif:
... images = tif.asarray()
... for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
"""
from __future__ import division, print_function
import sys
import os
import re
import glob
import math
import zlib
import time
import json
import struct
import warnings
import tempfile
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as etree
import numpy
from . import _tifffile
__version__ = '0.4.1'
__docformat__ = 'restructuredtext en'
__all__ = ('imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter',
'TiffSequence')
def imsave(filename, data, **kwargs):
"""Write image data to TIFF file.
Refer to the TiffWriter class and member functions for documentation.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
kwargs : dict
Parameters 'byteorder', 'bigtiff', and 'software' are passed to
the TiffWriter class.
Parameters 'photometric', 'planarconfig', 'resolution',
'description', 'compress', 'volume', and 'extratags' are passed to
the TiffWriter.save function.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> description = '{"shape": %s}' % str(list(data.shape))
>>> imsave('temp.tif', data, compress=6,
... extratags=[(270, 's', 0, description, True)])
"""
tifargs = {}
for key in ('byteorder', 'bigtiff', 'software', 'writeshape'):
if key in kwargs:
tifargs[key] = kwargs[key]
del kwargs[key]
if 'writeshape' not in kwargs:
kwargs['writeshape'] = True
if 'bigtiff' not in tifargs and data.size*data.dtype.itemsize > 2000*2**20:
tifargs['bigtiff'] = True
with TiffWriter(filename, **tifargs) as tif:
tif.save(data, **kwargs)
class TiffWriter(object):
"""Write image data to TIFF file.
TiffWriter instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> with TiffWriter('temp.tif', bigtiff=True) as tif:
... for i in range(data.shape[0]):
... tif.save(data[i], compress=6)
"""
TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
TAGS = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324,
'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339,
'image_depth': 32997, 'tile_depth': 32998}
def __init__(self, filename, bigtiff=False, byteorder=None,
software='tifffile.py'):
"""Create a new TIFF file for writing.
Use bigtiff=True when creating files greater than 2 GB.
Parameters
----------
filename : str
Name of file to write.
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
software : str
Name of the software used to create the image.
Saved with the first page only.
"""
if byteorder not in (None, '<', '>'):
raise ValueError("invalid byteorder %s" % byteorder)
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
self._byteorder = byteorder
self._software = software
self._fh = open(filename, 'wb')
self._fh.write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
self._bigtiff = True
self._offset_size = 8
self._tag_size = 20
self._numtag_format = 'Q'
self._offset_format = 'Q'
self._val_format = '8s'
self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0))
else:
self._bigtiff = False
self._offset_size = 4
self._tag_size = 12
self._numtag_format = 'H'
self._offset_format = 'I'
self._val_format = '4s'
self._fh.write(struct.pack(byteorder+'H', 42))
# first IFD
self._ifd_offset = self._fh.tell()
self._fh.write(struct.pack(byteorder+self._offset_format, 0))
def save(self, data, photometric=None, planarconfig=None, resolution=None,
description=None, volume=False, writeshape=False, compress=0,
extratags=()):
"""Write image data to TIFF file.
Image data are written in one stripe per plane.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The 'sample_format' and 'bits_per_sample' TIFF tags are derived from
the data type.
Parameters
----------
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
photometric : {'minisblack', 'miniswhite', 'rgb'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
compress : int
Values from 0 to 9 controlling the level of zlib compression.
If 0, data are written uncompressed (default).
volume : bool
If True, volume data are stored in one tile (if applicable) using
the SGI image_depth and tile_depth tags.
Image width and depth must be multiple of 16.
Few software can read this format, e.g. MeVisLab.
writeshape : bool
If True, write the data shape to the image_description tag
if necessary and no other description is given.
extratags: sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
'Count' values compatible with 'dtype'.
writeonce : bool
If True, the tag is written to the first page only.
"""
if photometric not in (None, 'minisblack', 'miniswhite', 'rgb'):
raise ValueError("invalid photometric %s" % photometric)
if planarconfig not in (None, 'contig', 'planar'):
raise ValueError("invalid planarconfig %s" % planarconfig)
if not 0 <= compress <= 9:
raise ValueError("invalid compression level %s" % compress)
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
val_format = self._val_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
# normalize shape of data
samplesperpixel = 1
extrasamples = 0
if volume and data.ndim < 3:
volume = False
if photometric is None:
if planarconfig:
photometric = 'rgb'
elif data.ndim > 2 and shape[-1] in (3, 4):
photometric = 'rgb'
elif volume and data.ndim > 3 and shape[-4] in (3, 4):
photometric = 'rgb'
elif data.ndim > 2 and shape[-3] in (3, 4):
photometric = 'rgb'
else:
photometric = 'minisblack'
if planarconfig and len(shape) <= (3 if volume else 2):
planarconfig = None
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if len(shape) < 4:
volume = False
if planarconfig is None:
if shape[-1] in (3, 4):
planarconfig = 'contig'
elif shape[-4 if volume else -3] in (3, 4):
planarconfig = 'planar'
elif shape[-1] > shape[-4 if volume else -3]:
planarconfig = 'planar'
else:
planarconfig = 'contig'
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
if samplesperpixel > 3:
extrasamples = samplesperpixel - 3
elif planarconfig and len(shape) > (3 if volume else 2):
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
if False and (
len(shape) > (3 if volume else 2) and shape[-1] < 5 and
all(shape[-1] < i
for i in shape[(-4 if volume else -3):-1])):
# DISABLED: non-standard TIFF, e.g. (220, 320, 2)
planarconfig = 'contig'
samplesperpixel = shape[-1]
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
else:
data = data.reshape(
(-1, 1) + shape[(-3 if volume else -2):] + (1,))
if samplesperpixel == 2:
warnings.warn("writing non-standard TIFF (samplesperpixel 2)")
if volume and (data.shape[-2] % 16 or data.shape[-3] % 16):
warnings.warn("volume width or length are not multiple of 16")
volume = False
data = numpy.swapaxes(data, 1, 2)
data = data.reshape(
(data.shape[0] * data.shape[1],) + data.shape[2:])
# data.shape is now normalized 5D or 6D, depending on volume
# (pages, planar_samples, (depth,) height, width, contig_samples)
assert len(data.shape) in (5, 6)
shape = data.shape
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x)
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
if volume:
# use tiles to save volume data
tag_byte_counts = TiffWriter.TAGS['tile_byte_counts']
tag_offsets = TiffWriter.TAGS['tile_offsets']
else:
# else use strips
tag_byte_counts = TiffWriter.TAGS['strip_byte_counts']
tag_offsets = TiffWriter.TAGS['strip_offsets']
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# Compute ifdentry & ifdvalue bytes from code, dtype, count, value.
# Append (code, ifdentry, ifdvalue, writeonce) to tags list.
code = int(TiffWriter.TAGS.get(code, code))
try:
tifftype = TiffWriter.TYPES[dtype]
except KeyError:
raise ValueError("unknown dtype %s" % dtype)
rawcount = count
if dtype == 's':
value = bytestr(value) + b'\0'
count = rawcount = len(value)
value = (value, )
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offset_format, rawcount)]
ifdvalue = None
if count == 1:
if isinstance(value, (tuple, list)):
value = value[0]
ifdentry.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * count <= offset_size:
ifdentry.append(pack(val_format,
pack(str(count)+dtype, *value)))
else:
ifdentry.append(pack(offset_format, 0))
ifdvalue = pack(str(count)+dtype, *value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if self._software:
addtag('software', 's', 0, self._software, writeonce=True)
self._software = None # only save to first page
if description:
addtag('image_description', 's', 0, description, writeonce=True)
elif writeshape and shape[0] > 1 and shape != data_shape:
addtag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)),
writeonce=True)
addtag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"),
writeonce=True)
addtag('compression', 'H', 1, 32946 if compress else 1)
addtag('orientation', 'H', 1, 1)
addtag('image_width', 'I', 1, shape[-2])
addtag('image_length', 'I', 1, shape[-3])
if volume:
addtag('image_depth', 'I', 1, shape[-4])
addtag('tile_depth', 'I', 1, shape[-4])
addtag('tile_width', 'I', 1, shape[-2])
addtag('tile_length', 'I', 1, shape[-3])
addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
addtag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
addtag('photometric', 'H', 1,
{'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric])
addtag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig and samplesperpixel > 1:
addtag('planar_configuration', 'H', 1, 1
if planarconfig == 'contig' else 2)
addtag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb' and extrasamples == 1:
addtag('extra_samples', 'H', 1, 1) # associated alpha channel
else:
addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples)
if resolution:
addtag('x_resolution', '2I', 1, rational(resolution[0]))
addtag('y_resolution', '2I', 1, rational(resolution[1]))
addtag('resolution_unit', 'H', 1, 2)
addtag('rows_per_strip', 'I', 1,
shape[-3] * (shape[-4] if volume else 1))
# use one strip or tile per plane
strip_byte_counts = (data[0, 0].size * data.dtype.itemsize,) * shape[1]
addtag(tag_byte_counts, offset_format, shape[1], strip_byte_counts)
addtag(tag_offsets, offset_format, shape[1], (0, ) * shape[1])
# add extra tags from users
for t in extratags:
addtag(*t)
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
if not self._bigtiff and (fh.tell() + data.size*data.dtype.itemsize
> 2**31-1):
raise ValueError("data too large for non-bigtiff file")
for pageindex in range(shape[0]):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifdentries
fh.write(pack(numtag_format, len(tags)))
tag_offset = fh.tell()
fh.write(b''.join(t[1] for t in tags))
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == tag_offsets:
strip_offsets_offset = pos
elif tag[0] == tag_byte_counts:
strip_byte_counts_offset = pos
fh.write(tag[2])
# write image data
data_offset = fh.tell()
if compress:
strip_byte_counts = []
for plane in data[pageindex]:
plane = zlib.compress(plane, compress)
strip_byte_counts.append(len(plane))
fh.write(plane)
else:
# if this fails try update Python/numpy
data[pageindex].tofile(fh)
fh.flush()
# update strip and tile offsets and byte_counts if necessary
pos = fh.tell()
for tagindex, tag in enumerate(tags):
if tag[0] == tag_offsets: # strip or tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in strip_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
elif tag[0] == tag_byte_counts: # strip or tile byte_counts
if compress:
if tag[2]:
fh.seek(strip_byte_counts_offset)
for size in strip_byte_counts:
fh.write(pack(offset_format, size))
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, strip_byte_counts[0]))
break
fh.seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [t for t in tags if not t[-1]]
def close(self):
self._fh.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def imread(files, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
kwargs : dict
Additional parameters passed to the TiffFile or TiffSequence asarray
function.
Examples
--------
>>> im = imread('temp.tif', key=0)
>>> im.shape
(3, 301, 219)
>>> ims = imread(['temp.tif', 'temp.tif'])
>>> ims.shape
(2, 10, 3, 301, 219)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, basestring):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(**kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(**kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func', )
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and metadata from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Attributes
----------
pages : list
All TIFF pages in file.
series : list of Records(shape, dtype, axes, TiffPages)
TIFF pages with compatible shapes and types.
micromanager_metadata: dict
Extra MicroManager non-TIFF metadata in the file, if exists.
All attributes are read-only.
Examples
--------
>>> with TiffFile('temp.tif') as tif:
... data = tif.asarray()
... data.shape
(5, 301, 219)
"""
def __init__(self, arg, name=None, offset=None, size=None,
multifile=True, multifile_close=True):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
multifile_close : bool
If True (default), keep the handles of other files in multifile
series closed. This is inefficient when few files refer to
many pages. If False, the C runtime may run out of resources.
"""
self._fh = FileHandle(arg, name=name, offset=offset, size=size)
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
self._multifile_close = bool(multifile_close)
self._files = {self._fh.name: self} # cache of TiffFiles
try:
self._fromfile()
except Exception:
self._fh.close()
raise
@property
def filehandle(self):
"""Return file handle."""
return self._fh
@property
def filename(self):
"""Return name of file handle."""
return self._fh.name
def close(self):
"""Close open file handle(s)."""
for tif in self._files.values():
tif._fh.close()
self._files = {}
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if not self.pages:
raise ValueError("empty TIFF file")
if self.is_micromanager:
# MicroManager files contain metadata not stored in TIFF tags.
self.micromanager_metadata = read_micromanager_metadata(self._fh)
if self.is_lsm:
self._fix_lsm_strip_offsets()
self._fix_lsm_strip_byte_counts()
def _fix_lsm_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB."""
for series in self.series:
wrap = 0
previous_offset = 0
for page in series.pages:
strip_offsets = []
for current_offset in page.strip_offsets:
if current_offset < previous_offset:
wrap += 2**32
strip_offsets.append(current_offset + wrap)
previous_offset = current_offset
page.strip_offsets = tuple(strip_offsets)
def _fix_lsm_strip_byte_counts(self):
"""Set strip_byte_counts to size of compressed data.
The strip_byte_counts tag in LSM files contains the number of bytes
for the uncompressed data.
"""
if not self.pages:
return
strips = {}
for page in self.pages:
assert len(page.strip_offsets) == len(page.strip_byte_counts)
for offset, bytecount in zip(page.strip_offsets,
page.strip_byte_counts):
strips[offset] = bytecount
offsets = sorted(strips.keys())
offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size))
for i, offset in enumerate(offsets[:-1]):
strips[offset] = min(strips[offset], offsets[i+1] - offset)
for page in self.pages:
if page.compression:
page.strip_byte_counts = tuple(
strips[offset] for offset in page.strip_offsets)
@lazyattr
def series(self):
"""Return series of TiffPage with compatible shape and properties."""
if not self.pages:
return []
series = []
page0 = self.pages[0]
if self.is_ome:
series = self._omeseries()
elif self.is_fluoview:
dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(page0.mm_header.dimensions))
series = [Record(
axes=''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1),
shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
pages=self.pages, dtype=numpy.dtype(page0.dtype))]
elif self.is_lsm:
lsmi = page0.cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if page0.is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes)
pages = [p for p in self.pages if not p.is_reduced]
series = [Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype))]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + pages[0].shape
axes = axes[:i] + 'CYX'
series.append(Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype)))
elif self.is_imagej:
shape = []
axes = []
ij = page0.imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not self.is_rgb:
shape.append(ij['channels'])
axes.append('C')
remain = len(self.pages) // (product(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
shape.extend(page0.shape)
axes.extend(page0.axes)
axes = ''.join(axes)
series = [Record(pages=self.pages, shape=tuple(shape), axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif self.is_nih:
if len(self.pages) == 1:
shape = page0.shape
axes = page0.axes
else:
shape = (len(self.pages),) + page0.shape
axes = 'I' + page0.axes
series = [Record(pages=self.pages, shape=shape, axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif page0.is_shaped:
# TODO: shaped files can contain multiple series
shape = page0.tags['image_description'].value[7:-1]
shape = tuple(int(i) for i in shape.split(b','))
series = [Record(pages=self.pages, shape=shape,
axes='Q' * len(shape),
dtype=numpy.dtype(page0.dtype))]
# generic detection of series
if not series:
shapes = []
pages = {}
for page in self.pages:
if not page.shape:
continue
shape = page.shape + (page.axes,
page.compression in TIFF_DECOMPESSORS)
if shape not in pages:
shapes.append(shape)
pages[shape] = [page]
else:
pages[shape].append(page)
series = [Record(pages=pages[s],
axes=(('I' + s[-2])
if len(pages[s]) > 1 else s[-2]),
dtype=numpy.dtype(pages[s][0].dtype),
shape=((len(pages[s]), ) + s[:-2]
if len(pages[s]) > 1 else s[:-2]))
for s in shapes]
# remove empty series, e.g. in MD Gel files
series = [s for s in series if sum(s.shape) > 0]
return series
def asarray(self, key=None, series=None, memmap=False):
"""Return image data from multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
memmap : bool
If True, return an array stored in a binary file on disk
if possible.
"""
if key is None and series is None:
series = 0
if series is not None:
pages = self.series[series].pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if not len(pages):
raise ValueError("no pages selected")
if self.is_nih:
if pages[0].is_palette:
result = stack_pages(pages, colormapped=False, squeeze=False)
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
result = stack_pages(pages, memmap=memmap,
colormapped=False, squeeze=False)
elif len(pages) == 1:
return pages[0].asarray(memmap=memmap)
elif self.is_ome:
assert not self.is_palette, "color mapping disabled for ome-tiff"
if any(p is None for p in pages):
# zero out missing pages
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray(memmap=False))
s = self.series[series]
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=s.dtype, shape=s.shape)
result = result.reshape(-1)
else:
result = numpy.empty(s.shape, s.dtype).reshape(-1)
index = 0
class KeepOpen:
# keep Tiff files open between consecutive pages
def __init__(self, parent, close):
self.master = parent
self.parent = parent
self._close = close
def open(self, page):
if self._close and page and page.parent != self.parent:
if self.parent != self.master:
self.parent.filehandle.close()
self.parent = page.parent
self.parent.filehandle.open()
def close(self):
if self._close and self.parent != self.master:
self.parent.filehandle.close()
keep = KeepOpen(self, self._multifile_close)
for page in pages:
keep.open(page)
if page:
a = page.asarray(memmap=False, colormapped=False,
reopen=False)
else:
a = nopage
try:
result[index:index + a.size] = a.reshape(-1)
except ValueError as e:
warnings.warn("ome-tiff: %s" % e)
break
index += a.size
keep.close()
else:
result = stack_pages(pages, memmap=memmap)
if key is None:
try:
result.shape = self.series[series].shape
except ValueError:
try:
warnings.warn("failed to reshape %s to %s" % (
result.shape, self.series[series].shape))
# try series of expected shapes
result.shape = (-1,) + self.series[series].shape
except ValueError:
# revert to generic shape
result.shape = (-1,) + pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
def _omeseries(self):
"""Return image series in OME-TIFF file(s)."""
root = etree.fromstring(self.pages[0].tags['image_description'].value)
uuid = root.attrib.get('UUID', None)
self._files = {uuid: self}
dirname = self._fh.dirname
modulo = {}
result = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("ome-xml: not an ome-tiff master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
dtype = atr.get('Type', None)
axes = ''.join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size'+ax]) for ax in axes)
size = product(shape[:-2])
ifds = [None] * size
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
try:
idx = numpy.ravel_multi_index(idx, shape[:-2])
except ValueError:
# ImageJ produces invalid ome-xml when cropping
warnings.warn("ome-xml: invalid TiffData index")
continue
for uuid in data:
if not uuid.tag.endswith('UUID'):
continue
if uuid.text not in self._files:
if not self._multifile:
# abort reading multifile OME series
# and fall back to generic series
return []
fname = uuid.attrib['FileName']
try:
tif = TiffFile(os.path.join(dirname, fname))
except (IOError, ValueError):
tif.close()
warnings.warn(
"ome-xml: failed to read '%s'" % fname)
break
self._files[uuid.text] = tif
if self._multifile_close:
tif.close()
pages = self._files[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
# only process first uuid
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
if all(i is None for i in ifds):
# skip images without data
continue
dtype = next(i for i in ifds if i).dtype
result.append(Record(axes=axes, shape=shape, pages=ifds,
dtype=numpy.dtype(dtype)))
for record in result:
for axis, (newaxis, labels) in modulo.items():
i = record.axes.index(axis)
size = len(labels)
if record.shape[i] == size:
record.axes = record.axes.replace(axis, newaxis, 1)
else:
record.shape[i] //= size
record.shape.insert(i+1, size)
record.axes = record.axes.replace(axis, axis+newaxis, 1)
record.shape = tuple(record.shape)
# squeeze dimensions
for record in result:
record.shape, record.axes = squeeze_axes(record.shape, record.axes)
return result
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [
self._fh.name.capitalize(),
format_size(self._fh.size),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._files) > 1:
result.append("%i files" % (len(self._files)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@lazyattr
def fstat(self):
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
@lazyattr
def is_bigtiff(self):
return self.offset_size != 4
@lazyattr
def is_rgb(self):
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_palette(self):
return all(p.is_palette for p in self.pages)
@lazyattr
def is_mdgel(self):
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
return self.pages[0].is_lsm
@lazyattr
def is_imagej(self):
return self.pages[0].is_imagej
@lazyattr
def is_micromanager(self):
return self.pages[0].is_micromanager
@lazyattr
def is_nih(self):
return self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
return self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
return self.pages[0].is_ome
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, colormapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
colormapped and with one alpha channel if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime,
'L' exposure, 'V' event, 'Q' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy array
Color look up table, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and metadata tags, if exists.
uic_tags: Record(dict)
Consolidated MetaMorph STK/UIC tags, if exists.
All attributes are read-only.
Notes
-----
The internal, normalized '_shape' attribute is 6 dimensional:
0. number planes (stk)
1. planar samples_per_pixel
2. image_depth Z (sgi)
3. image_length Y
4. image_width X
5. contig samples_per_pixel
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0.
"""
fh = self.parent.filehandle
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
if not offset:
raise StopIteration()
# read standard tags
tags = self.tags
fh.seek(offset)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
except Exception:
warnings.warn("corrupted page list")
raise StopIteration()
tagcode = 0
for _ in range(numtags):
try:
tag = TiffTag(self.parent)
# print(tag)
except TiffTag.Error as e:
warnings.warn(str(e))
continue
if tagcode > tag.code:
# expected for early LSM and tifffile versions
warnings.warn("tags are not ordered by code")
tagcode = tag.code
if tag.name not in tags:
tags[tag.name] = tag
else:
# some files contain multiple IFD with same code
# e.g. MicroManager files contain two image_description
i = 1
while True:
name = "%s_%i" % (tag.name, i)
if name not in tags:
tags[name] = tag
break
pos = fh.tell()
if self.is_lsm or (self.index and self.parent.is_lsm):
# correct non standard LSM bitspersample tags
self.tags['bits_per_sample']._correct_lsm_bitspersample(self)
if self.is_lsm:
# read LSM info subrecords
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_'+name]
except KeyError:
continue
if offset < 8:
# older LSM revision
continue
fh.seek(offset)
try:
setattr(self, 'cz_lsm_'+name, reader(fh))
except ValueError:
pass
elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value:
# read uic1tag now that plane count is known
uic1tag = tags['uic1tag']
fh.seek(uic1tag.value_offset)
tags['uic1tag'].value = Record(
read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count,
tags['uic2tag'].count))
fh.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TiffTag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
# LSM might list more items than samples_per_pixel
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if 'photometric' not in tags:
self.photometric = None
if 'image_depth' not in tags:
self.image_depth = 1
if 'image_length' in tags:
self.strips_per_image = int(math.floor(
float(self.image_length + self.rows_per_strip - 1) /
self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if 'image_length' not in self.tags or 'image_width' not in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self.image_depth = 0
self.strip_offsets = 0
self._shape = ()
self.shape = ()
self.axes = ''
if self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
# determine shape of data
image_length = self.image_length
image_width = self.image_width
image_depth = self.image_depth
samples_per_pixel = self.samples_per_pixel
if self.is_stk:
assert self.image_depth == 1
planes = self.tags['uic2tag'].count
if self.is_contig:
self._shape = (planes, 1, 1, image_length, image_width,
samples_per_pixel)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, image_length, image_width,
samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (planes, samples_per_pixel, 1, image_length,
image_width, 1)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, samples_per_pixel, image_length,
image_width)
self.axes = 'SYX'
# detect type of series
if planes == 1:
self.shape = self.shape[1:]
elif numpy.all(self.uic2tag.z_distance != 0):
self.axes = 'Z' + self.axes
elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0):
self.axes = 'T' + self.axes
else:
self.axes = 'I' + self.axes
# DISABLED
if self.is_palette:
assert False, "color mapping disabled for stk"
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, planes, image_length, image_width)
else:
self.shape = (3, planes, image_depth, image_length,
image_width)
self.axes = 'C' + self.axes
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
elif self.is_palette:
samples = 1
if 'extra_samples' in self.tags:
samples += len(self.extra_samples)
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples)
else:
self._shape = (1, samples, image_depth, image_length,
image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, image_length, image_width)
self.axes = 'CYX'
else:
self.shape = (3, image_depth, image_length, image_width)
self.axes = 'CZYX'
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
elif self.is_rgb or samples_per_pixel > 1:
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples_per_pixel)
if image_depth == 1:
self.shape = (image_length, image_width, samples_per_pixel)
self.axes = 'YXS'
else:
self.shape = (image_depth, image_length, image_width,
samples_per_pixel)
self.axes = 'ZYXS'
else:
self._shape = (1, samples_per_pixel, image_depth,
image_length, image_width, 1)
if image_depth == 1:
self.shape = (samples_per_pixel, image_length, image_width)
self.axes = 'SYX'
else:
self.shape = (samples_per_pixel, image_depth,
image_length, image_width)
self.axes = 'SZYX'
if False and self.is_rgb and 'extra_samples' in self.tags:
# DISABLED: only use RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
self.shape = self.shape[:-1] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, image_depth, image_length, image_width, 1)
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
if not self.compression and 'strip_byte_counts' not in tags:
self.strip_byte_counts = (
product(self.shape) * (self.bits_per_sample // 8), )
assert len(self.shape) == len(self.axes)
def asarray(self, squeeze=True, colormapped=True, rgbonly=False,
scale_mdgel=False, memmap=False, reopen=True):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default,
the shape of the returned array might be different from the page shape.
Parameters
----------
squeeze : bool
If True, all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True, color mapping is applied for palette-indexed images.
rgbonly : bool
If True, return RGB(A) image without additional extra samples.
memmap : bool
If True, use numpy.memmap to read arrays from file if possible.
For use on 64 bit systems and files with few huge contiguous data.
reopen : bool
If True and the parent file handle is closed, the file is
temporarily re-opened (and closed if no exception occurs).
scale_mdgel : bool
If True, MD Gel data will be scaled according to the private
metadata in the second TIFF page. The dtype will be float32.
"""
if not self._shape:
return
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
tag = self.tags['sample_format']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError("sample formats don't match %s" % str(tag.value))
fh = self.parent.filehandle
closed = fh.closed
if closed:
if reopen:
fh.open()
else:
raise IOError("file handle is closed")
dtype = self._dtype
shape = self._shape
image_width = self.image_width
image_length = self.image_length
image_depth = self.image_depth
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
if self.is_tiled:
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
tile_width = self.tile_width
tile_length = self.tile_length
tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
td = (image_depth + tile_depth - 1) // tile_depth
shape = (shape[0], shape[1],
td*tile_depth, tl*tile_length, tw*tile_width, shape[-1])
tile_shape = (tile_depth, tile_length, tile_width, shape[-1])
runlen = tile_width
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
runlen = image_width
if any(o < 2 for o in offsets):
raise ValueError("corrupted page")
if memmap and self._is_memmappable(rgbonly, colormapped):
result = fh.memmap_array(typecode, shape, offset=offsets[0])
elif self.is_contiguous:
fh.seek(offsets[0])
result = fh.read_array(typecode, product(shape))
result = result.astype('=' + dtype)
else:
if self.is_contig:
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
def unpack(x):
try:
return numpy.fromstring(x, typecode)
except ValueError as e:
# strips may be missing EOI
warnings.warn("unpack: %s" % e)
xlen = ((len(x) // (bits_per_sample // 8))
* (bits_per_sample // 8))
return numpy.fromstring(x[:xlen], typecode)
elif isinstance(bits_per_sample, tuple):
def unpack(x):
return unpackrgb(x, typecode, bits_per_sample)
else:
def unpack(x):
return unpackints(x, typecode, bits_per_sample, runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.compression == 'jpeg':
table = self.jpeg_tables if 'jpeg_tables' in self.tags else b''
decompress = lambda x: decodejpg(x, table, self.photometric)
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, td, pl = 0, 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
tile = unpack(decompress(fh.read(bytecount)))
tile.shape = tile_shape
if self.predictor == 'horizontal':
numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
result[0, pl, td:td+tile_depth,
tl:tl+tile_length, tw:tw+tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[4]:
tw, tl = 0, tl + tile_length
if tl >= shape[3]:
tl, td = 0, td + tile_depth
if td >= shape[2]:
td, pl = 0, pl + 1
result = result[...,
:image_depth, :image_length, :image_width, :]
else:
strip_size = (self.rows_per_strip * self.image_width *
self.samples_per_pixel)
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
strip = fh.read(bytecount)
strip = decompress(strip)
strip = unpack(strip)
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor == 'horizontal' and not (self.is_tiled and not
self.is_contiguous):
# work around bug in LSM510 software
if not (self.parent.is_lsm and not self.compression):
numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
if colormapped and self.is_palette:
if self.color_map.shape[1] >= 2**bits_per_sample:
# FluoView and LSM might fail here
result = numpy.take(self.color_map,
result[:, 0, :, :, :, 0], axis=1)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
result = result[..., [0, 1, 2, 3+i]]
else:
result = result[:, [0, 1, 2, 3+i]]
break
else:
if self.is_contig:
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
if scale_mdgel and self.parent.is_mdgel:
# MD Gel stores private metadata in the second page
tags = self.parent.pages[1]
if tags.md_file_tag in (2, 128):
scale = tags.md_scale_pixel
scale = scale[0] / scale[1] # rational
result = result.astype('float32')
if tags.md_file_tag == 2:
result **= 2 # squary root data format
result *= scale
if closed:
# TODO: file remains open if an exception occurred above
fh.close()
return result
def _is_memmappable(self, rgbonly, colormapped):
"""Return if image data in file can be memory mapped."""
if not self.parent.filehandle.is_file or not self.is_contiguous:
return False
return not (self.predictor or
(rgbonly and 'extra_samples' in self.tags) or
(colormapped and self.is_palette) or
({'big': '>', 'little': '<'}[sys.byteorder] !=
self.parent.byteorder))
@lazyattr
def is_contiguous(self):
"""Return offset and size of contiguous data, else None.
Excludes prediction and colormapping.
"""
if self.compression or self.bits_per_sample not in (8, 16, 32, 64):
return
if self.is_tiled:
if (self.image_width != self.tile_width or
self.image_length % self.tile_length or
self.tile_width % 16 or self.tile_length % 16):
return
if ('image_depth' in self.tags and 'tile_depth' in self.tags and
(self.image_length != self.tile_length or
self.image_depth % self.tile_depth)):
return
offsets = self.tile_offsets
byte_counts = self.tile_byte_counts
else:
offsets = self.strip_offsets
byte_counts = self.strip_byte_counts
if len(offsets) == 1:
return offsets[0], byte_counts[0]
if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i+1]
or byte_counts[i+1] == 0 # no data/ignore offset
for i in range(len(offsets)-1)):
return offsets[0], sum(byte_counts)
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
'|'.join(t[3:] for t in (
'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej',
'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy',
'is_sgi', 'is_reduced', 'is_tiled',
'is_contiguous') if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def uic_tags(self):
"""Consolidate UIC tags."""
if not self.is_stk:
raise AttributeError("uic_tags")
tags = self.tags
result = Record()
result.number_planes = tags['uic2tag'].count
if 'image_description' in tags:
result.plane_descriptions = self.image_description.split(b'\x00')
if 'uic1tag' in tags:
result.update(tags['uic1tag'].value)
if 'uic3tag' in tags:
result.update(tags['uic3tag'].value) # wavelengths
if 'uic4tag' in tags:
result.update(tags['uic4tag'].value) # override uic1 tags
uic2tag = tags['uic2tag'].value
result.z_distance = uic2tag.z_distance
result.time_created = uic2tag.time_created
result.time_modified = uic2tag.time_modified
try:
result.datetime_created = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_created, uic2tag.time_created)]
result.datetime_modified = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_modified, uic2tag.time_modified)]
except ValueError as e:
warnings.warn("uic_tags: %s" % e)
return result
@lazyattr
def imagej_tags(self):
"""Consolidate ImageJ metadata."""
if not self.is_imagej:
raise AttributeError("imagej_tags")
tags = self.tags
if 'image_description_1' in tags:
# MicroManager
result = imagej_description(tags['image_description_1'].value)
else:
result = imagej_description(tags['image_description'].value)
if 'imagej_metadata' in tags:
try:
result.update(imagej_metadata(
tags['imagej_metadata'].value,
tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception as e:
warnings.warn(str(e))
return Record(result)
@lazyattr
def is_rgb(self):
"""True if page contains a RGB image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 2)
@lazyattr
def is_contig(self):
"""True if page contains a contiguous image."""
return ('planar_configuration' in self.tags and
self.tags['planar_configuration'].value == 1)
@lazyattr
def is_palette(self):
"""True if page contains a palette-colored image and not OME or STK."""
try:
# turn off color mapping for OME-TIFF and STK
if self.is_stk or self.is_ome or self.parent.is_ome:
return False
except IndexError:
pass # OME-XML not found in first page
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""True if page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""True if page is a reduced image of another image."""
return bool(self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_mdgel(self):
"""True if page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""True if page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""True if page contains UIC2Tag tag."""
return 'uic2tag' in self.tags
@lazyattr
def is_lsm(self):
"""True if page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""True if page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""True if page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_sgi(self):
"""True if page contains SGI image and tile depth tags."""
return 'image_depth' in self.tags and 'tile_depth' in self.tags
@lazyattr
def is_ome(self):
"""True if page contains OME-XML in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'<?xml version='))
@lazyattr
def is_shaped(self):
"""True if page contains shape in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'shape=('))
@lazyattr
def is_imagej(self):
"""True if page contains ImageJ description."""
return (
('image_description' in self.tags and
self.tags['image_description'].value.startswith(b'ImageJ=')) or
('image_description_1' in self.tags and # Micromanager
self.tags['image_description_1'].value.startswith(b'ImageJ=')))
@lazyattr
def is_micromanager(self):
"""True if page contains Micro-Manager metadata."""
return 'micromanager_metadata' in self.tags
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data as Python object.
value_offset : int
Location of value in file, if any.
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset', '_value', '_type')
class Error(Exception):
pass
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fh'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
self._value = value
self._type = dtype
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fh = parent.filehandle
byteorder = parent.byteorder
self._offset = fh.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fh.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
self._value = value
self._type = dtype
if code in TIFF_TAGS:
name = TIFF_TAGS[code][0]
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[self._type]
except KeyError:
raise TiffTag.Error("unknown tag data type %i" % self._type)
fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fh.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = offset = struct.unpack(byteorder+tof, value)[0]
if offset < 0 or offset > parent.filehandle.size:
raise TiffTag.Error("corrupt file - invalid tag value offset")
elif offset < 4:
raise TiffTag.Error("corrupt value offset for tag %i" % code)
fh.seek(offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fh, byteorder, dtype, count)
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count)
fh.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if code not in CUSTOM_TAGS and code not in (273, 279, 324, 325):
# scalar value if not strip/tile offsets/byte_counts
if len(value) == 1:
value = value[0]
if (dtype.endswith('s') and isinstance(value, bytes)
and self._type != 7):
# TIFF ASCII fields can contain multiple strings,
# each terminated with a NUL
value = stripascii(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def _correct_lsm_bitspersample(self, parent):
"""Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag.
"""
if self.code == 258 and self.count == 2:
# TODO: test this. Need example file.
warnings.warn("correcting LSM bitspersample tag")
fh = parent.filehandle
tof = {4: '<I', 8: '<Q'}[parent.offset_size]
self.value_offset = struct.unpack(tof, self._value)[0]
fh.seek(self.value_offset)
self.value = struct.unpack("<HH", fh.read(4))
def as_str(self):
"""Return value as human readable string."""
return ((str(self.value).split('\n', 1)[0]) if (self._type != 7)
else '<undefined>')
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffSequence(object):
"""Sequence of image files.
The data shape and dtype of all files must match.
Properties
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
Examples
--------
>>> tifs = TiffSequence("test.oif.files/*.tif") # doctest: +SKIP
>>> tifs.shape, tifs.axes # doctest: +SKIP
((2, 100), 'CT')
>>> data = tifs.asarray() # doctest: +SKIP
>>> data.shape # doctest: +SKIP
(2, 100, 256, 256)
"""
_patterns = {
'axes': r"""
# matches Olympus OIF and Leica TIFF series
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
"""}
class ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern='axes',
*args, **kwargs):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
By default this matches Olympus OIF and Leica TIFF series.
"""
if isinstance(files, basestring):
files = natural_sorted(glob.glob(files))
files = list(files)
if not files:
raise ValueError("no files found")
#if not os.path.isfile(files[0]):
# raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
# redefine imread
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._patterns.get(pattern, pattern)
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self.ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = tuple((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
pass
def asarray(self, memmap=False, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
If memmap is True, return an array stored in a binary file on disk.
The args and kwargs parameters are passed to the imread function.
Raise IndexError or ValueError if image shapes don't match.
"""
im = self.imread(self.files[0], *args, **kwargs)
shape = self.shape + im.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=im.dtype, shape=shape)
else:
result = numpy.zeros(shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i-j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = self.imread(fname, *args, **kwargs)
result[index] = im
result.shape = shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self.ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self.ParseError("pattern doesn't match file names")
matches = matches[-1]
if len(matches) % 2:
raise self.ParseError("pattern doesn't match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self.ParseError("pattern doesn't match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes don't match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i-j+1 for i, j in zip(shape, start_index))
if product(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
try:
if k.startswith('_'): # does not work with byte
continue
except AttributeError:
pass
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
s.append(
("* %s: %s" % (k, str(v))).split("\n", 1)[0]
[:PRINT_LINE_LEN].rstrip())
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TiffTag with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (
tag.code, tag.name, typecode, tag.as_str())
s.append(line[:PRINT_LINE_LEN].lstrip())
return '\n'.join(s)
class FileHandle(object):
"""Binary file handle.
* Handle embedded files (for CZI within CZI files).
* Allow to re-open closed files (for multi file formats such as OME-TIFF).
* Read numpy arrays and records from file like objects.
Only binary read, seek, tell, and close are supported on embedded files.
When initialized from another file handle, do not use it unless this
FileHandle is closed.
Attributes
----------
name : str
Name of the file.
path : str
Absolute path to file.
size : int
Size of file in bytes.
is_file : bool
If True, file has a filno and can be memory mapped.
All attributes are read-only.
"""
__slots__ = ('_fh', '_arg', '_mode', '_name', '_dir',
'_offset', '_size', '_close', 'is_file')
def __init__(self, arg, mode='rb', name=None, offset=None, size=None):
"""Initialize file handle from file name or another file handle.
Parameters
----------
arg : str, File, or FileHandle
File name or open file handle.
mode : str
File open mode in case 'arg' is a file name.
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
"""
self._fh = None
self._arg = arg
self._mode = mode
self._name = name
self._dir = ''
self._offset = offset
self._size = size
self._close = True
self.is_file = False
self.open()
def open(self):
"""Open or re-open file."""
if self._fh:
return # file is open
if isinstance(self._arg, basestring):
# file name
self._arg = os.path.abspath(self._arg)
self._dir, self._name = os.path.split(self._arg)
self._fh = open(self._arg, self._mode)
self._close = True
if self._offset is None:
self._offset = 0
elif isinstance(self._arg, FileHandle):
# FileHandle
self._fh = self._arg._fh
if self._offset is None:
self._offset = 0
self._offset += self._arg._offset
self._close = False
if not self._name:
if self._offset:
name, ext = os.path.splitext(self._arg._name)
self._name = "%s@%i%s" % (name, self._offset, ext)
else:
self._name = self._arg._name
self._dir = self._arg._dir
else:
# open file object
self._fh = self._arg
if self._offset is None:
self._offset = self._arg.tell()
self._close = False
if not self._name:
try:
self._dir, self._name = os.path.split(self._fh.name)
except AttributeError:
self._name = "Unnamed stream"
if self._offset:
self._fh.seek(self._offset)
if self._size is None:
pos = self._fh.tell()
self._fh.seek(self._offset, 2)
self._size = self._fh.tell()
self._fh.seek(pos)
try:
self._fh.fileno()
self.is_file = True
except Exception:
self.is_file = False
def read(self, size=-1):
"""Read 'size' bytes from file, or until EOF is reached."""
if size < 0 and self._offset:
size = self._size
return self._fh.read(size)
def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'):
"""Return numpy.memmap of data stored in file."""
if not self.is_file:
raise ValueError("Can not memory map file without fileno.")
return numpy.memmap(self._fh, dtype=dtype, mode=mode,
offset=self._offset + offset,
shape=shape, order=order)
def read_array(self, dtype, count=-1, sep=""):
"""Return numpy array from file.
Work around numpy issue #2230, "numpy.fromfile does not accept
StringIO object" https://github.com/numpy/numpy/issues/2230.
"""
try:
return numpy.fromfile(self._fh, dtype, count, sep)
except IOError:
if count < 0:
size = self._size
else:
size = count * numpy.dtype(dtype).itemsize
data = self._fh.read(size)
return numpy.fromstring(data, dtype, count, sep)
def read_record(self, dtype, shape=1, byteorder=None):
"""Return numpy record from file."""
try:
rec = numpy.rec.fromfile(self._fh, dtype, shape,
byteorder=byteorder)
except Exception:
dtype = numpy.dtype(dtype)
if shape is None:
shape = self._size // dtype.itemsize
size = product(sequence(shape)) * dtype.itemsize
data = self._fh.read(size)
return numpy.rec.fromstring(data, dtype, shape,
byteorder=byteorder)
return rec[0] if shape == 1 else rec
def tell(self):
"""Return file's current position."""
return self._fh.tell() - self._offset
def seek(self, offset, whence=0):
"""Set file's current position."""
if self._offset:
if whence == 0:
self._fh.seek(self._offset + offset, whence)
return
elif whence == 2:
self._fh.seek(self._offset + self._size + offset, 0)
return
self._fh.seek(offset, whence)
def close(self):
"""Close file."""
if self._close and self._fh:
self._fh.close()
self._fh = None
self.is_file = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, name):
"""Return attribute from underlying file object."""
if self._offset:
warnings.warn(
"FileHandle: '%s' not implemented for embedded files" % name)
return getattr(self._fh, name)
@property
def name(self):
return self._name
@property
def dirname(self):
return self._dir
@property
def path(self):
return os.path.join(self._dir, self._name)
@property
def size(self):
return self._size
@property
def closed(self):
return self._fh is None
def read_bytes(fh, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count).tostring()
def read_numpy(fh, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count)
def read_json(fh, byteorder, dtype, count):
"""Read JSON tag data from file and return as object."""
data = fh.read(count)
try:
return json.loads(unicode(stripnull(data), 'utf-8'))
except ValueError:
warnings.warn("invalid JSON `%s`" % data)
def read_mm_header(fh, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return fh.read_record(MM_HEADER, byteorder=byteorder)
def read_mm_stamp(fh, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.array."""
return fh.read_array(byteorder+'f8', 8)
def read_uic1tag(fh, byteorder, dtype, count, plane_count=None):
"""Read MetaMorph STK UIC1Tag from file and return as dictionary.
Return empty dictionary if plane_count is unknown.
"""
assert dtype in ('2I', '1I') and byteorder == '<'
result = {}
if dtype == '2I':
# pre MetaMorph 2.5 (not tested)
values = fh.read_array('<u4', 2*count).reshape(count, 2)
result = {'z_distance': values[:, 0] / values[:, 1]}
elif plane_count:
for i in range(count):
tagid = struct.unpack('<I', fh.read(4))[0]
if tagid in (28, 29, 37, 40, 41):
# silently skip unexpected tags
fh.read(4)
continue
name, value = read_uic_tag(fh, tagid, plane_count, offset=True)
result[name] = value
return result
def read_uic2tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC2Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 6*plane_count).reshape(plane_count, 6)
return {
'z_distance': values[:, 0] / values[:, 1],
'date_created': values[:, 2], # julian days
'time_created': values[:, 3], # milliseconds
'date_modified': values[:, 4], # julian days
'time_modified': values[:, 5], # milliseconds
}
def read_uic3tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC3Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 2*plane_count).reshape(plane_count, 2)
return {'wavelengths': values[:, 0] / values[:, 1]}
def read_uic4tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC4Tag from file and return as dictionary."""
assert dtype == '1I' and byteorder == '<'
result = {}
while True:
tagid = struct.unpack('<H', fh.read(2))[0]
if tagid == 0:
break
name, value = read_uic_tag(fh, tagid, plane_count, offset=False)
result[name] = value
return result
def read_uic_tag(fh, tagid, plane_count, offset):
"""Read a single UIC tag value from file and return tag name and value.
UIC1Tags use an offset.
"""
def read_int(count=1):
value = struct.unpack('<%iI' % count, fh.read(4*count))
return value[0] if count == 1 else value
try:
name, dtype = UIC_TAGS[tagid]
except KeyError:
# unknown tag
return '_tagid_%i' % tagid, read_int()
if offset:
pos = fh.tell()
if dtype not in (int, None):
off = read_int()
if off < 8:
warnings.warn("invalid offset for uic tag '%s': %i"
% (name, off))
return name, off
fh.seek(off)
if dtype is None:
# skip
name = '_' + name
value = read_int()
elif dtype is int:
# int
value = read_int()
elif dtype is Fraction:
# fraction
value = read_int(2)
value = value[0] / value[1]
elif dtype is julian_datetime:
# datetime
value = julian_datetime(*read_int(2))
elif dtype is read_uic_image_property:
# ImagePropertyEx
value = read_uic_image_property(fh)
elif dtype is str:
# pascal string
size = read_int()
if 0 <= size < 2**10:
value = struct.unpack('%is' % size, fh.read(size))[0][:-1]
value = stripnull(value)
elif offset:
value = ''
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
elif dtype == '%ip':
# sequence of pascal strings
value = []
for i in range(plane_count):
size = read_int()
if 0 <= size < 2**10:
string = struct.unpack('%is' % size, fh.read(size))[0][:-1]
string = stripnull(string)
value.append(string)
elif offset:
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
else:
# struct or numpy type
dtype = '<' + dtype
if '%i' in dtype:
dtype = dtype % plane_count
if '(' in dtype:
# numpy type
value = fh.read_array(dtype, 1)[0]
if value.shape[-1] == 2:
# assume fractions
value = value[..., 0] / value[..., 1]
else:
# struct format
value = struct.unpack(dtype, fh.read(struct.calcsize(dtype)))
if len(value) == 1:
value = value[0]
if offset:
fh.seek(pos + 4)
return name, value
def read_uic_image_property(fh):
"""Read UIC ImagePropertyEx tag from file and return as dict."""
# TODO: test this
size = struct.unpack('B', fh.read(1))[0]
name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
flags, prop = struct.unpack('<IB', fh.read(5))
if prop == 1:
value = struct.unpack('II', fh.read(8))
value = value[0] / value[1]
else:
size = struct.unpack('B', fh.read(1))[0]
value = struct.unpack('%is' % size, fh.read(size))[0]
return dict(name=name, flags=flags, value=value)
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError("not a valid CS_LSM_INFO structure")
fh.seek(-8, 1)
if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize:
# adjust structure according to structure_size
cz_lsm_info = []
size = 0
for name, dtype in CZ_LSM_INFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
cz_lsm_info.append((name, dtype))
else:
cz_lsm_info = CZ_LSM_INFO
return fh.read_record(cz_lsm_info, byteorder=byteorder)
def read_cz_lsm_floatpairs(fh):
"""Read LSM sequence of float pairs from file and return as list."""
size = struct.unpack('<i', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_positions(fh):
"""Read LSM positions from file and return as list."""
size = struct.unpack('<I', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_time_stamps(fh):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack('<ii', fh.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
# return struct.unpack('<%dd' % count, fh.read(8*count))
return fh.read_array('<f8', count=count)
def read_cz_lsm_event_list(fh):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack('<II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack('<IdI', fh.read(16))
etext = stripnull(fh.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fh):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack('<I', fh.read(4))[0]:
# not a Recording sub block
raise ValueError("not a lsm_scan_info structure")
fh.read(8)
while True:
entry, dtype, size = unpack('<III', fh.read(12))
if dtype == 2:
# ascii
value = stripnull(fh.read(size))
elif dtype == 4:
# long
value = unpack('<i', fh.read(4))[0]
elif dtype == 5:
# rational
value = unpack('<d', fh.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
# end sub block
block = blocks.pop()
else:
# unknown entry
setattr(block, "entry_0x%x" % entry, value)
if not blocks:
break
return block
def read_nih_image_header(fh, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = fh.read_record(NIH_IMAGE_HEADER, byteorder=byteorder)
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def read_micromanager_metadata(fh):
"""Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if file does not contain valid MicroManager metadata.
"""
fh.seek(0)
try:
byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
except IndexError:
raise ValueError("not a MicroManager TIFF file")
results = {}
fh.seek(8)
(index_header, index_offset, display_header, display_offset,
comments_header, comments_offset, summary_header, summary_length
) = struct.unpack(byteorder + "IIIIIIII", fh.read(32))
if summary_header != 2355492:
raise ValueError("invalid MicroManager summary_header")
results['summary'] = read_json(fh, byteorder, None, summary_length)
if index_header != 54773648:
raise ValueError("invalid MicroManager index_header")
fh.seek(index_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 3453623:
raise ValueError("invalid MicroManager index_header")
data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count))
results['index_map'] = {
'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5],
'position': data[3::5], 'offset': data[4::5]}
if display_header != 483765892:
raise ValueError("invalid MicroManager display_header")
fh.seek(display_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 347834724:
raise ValueError("invalid MicroManager display_header")
results['display_settings'] = read_json(fh, byteorder, None, count)
if comments_header != 99384722:
raise ValueError("invalid MicroManager comments_header")
fh.seek(comments_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 84720485:
raise ValueError("invalid MicroManager comments_header")
results['comments'] = read_json(fh, byteorder, None, count)
return results
def imagej_metadata(data, bytecounts, byteorder):
"""Return dict from ImageJ metadata tag value."""
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
def read_string(data, byteorder):
return _str(stripnull(data[0 if byteorder == '<' else 1::2]))
def read_double(data, byteorder):
return struct.unpack(byteorder+('d' * (len(data) // 8)), data)
def read_bytes(data, byteorder):
#return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = { # big endian
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
metadata_types.update( # little endian
dict((k[::-1], v) for k, v in metadata_types.items()))
if not bytecounts:
raise ValueError("no ImageJ metadata")
if not data[:4] in (b'IJIJ', b'JIJI'):
raise ValueError("invalid ImageJ metadata")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ metadata header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for _ in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description(description):
"""Return dict from ImageJ image_description tag."""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
return result
def _replace_by(module_function, package=None, warn=False):
"""Try replace decorated function by module.function.
This is used to replace local functions with functions from another
(usually compiled) module, if available.
Parameters
----------
module_function : str
Module and function path string (e.g. numpy.ones)
package : str, optional
The parent package of the module
warn : bool, optional
Whether to warn when wrapping fails
Returns
-------
func : function
Wrapped function, hopefully calling a function in another module.
Example
-------
>>> @_replace_by('_tifffile.decodepackbits')
... def decodepackbits(encoded):
... raise NotImplementedError
"""
def decorate(func, module_function=module_function, warn=warn):
try:
modname, function = module_function.split('.')
if package is None:
full_name = modname
else:
full_name = package + '.' + modname
module = __import__(full_name, fromlist=[modname])
func, oldfunc = getattr(module, function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
return func
return decorate
def decodejpg(encoded, tables=b'', photometric=None,
ycbcr_subsampling=None, ycbcr_positioning=None):
"""Decode JPEG encoded byte string (using _czifile extension module)."""
import _czifile
image = _czifile.decodejpg(encoded, tables)
if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning:
# TODO: convert YCbCr to RGB
pass
return image.tostring()
@_replace_by('_tifffile.decodepackbits')
def decodepackbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i+n])
i += n
elif n > 129:
result_extend(encoded[i:i+1] * (258-n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decodelzw')
def decodelzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of `bitw` bits at `bitcount` position in encoded."""
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code <<= bitcount % 8
code &= mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn("unexpected end of lzw stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpackints')
def unpackints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen*itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - itemsize
bitmask = int(itemsize*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
result = numpy.empty((l, ), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code <<= bitcount % 8
code &= bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i+1) % runlen == 0:
bitcount += skipbits
return result
def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpackrgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpackrgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpackrgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
data = numpy.fromstring(data, dtype.byteorder+dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i+1:]))
t &= int('0b'+'1'*bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2**o - 1) // (2**bps - 1)
t //= 2**(o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def squeeze_axes(shape, axes, skip='XY'):
"""Return shape and axes with single-dimensional entries removed.
Remove unused dimensions unless their axes are listed in 'skip'.
>>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC')
((5, 2, 1), 'TYX')
"""
if len(shape) != len(axes):
raise ValueError("dimensions of axes and shape don't match")
shape, axes = zip(*(i for i in zip(shape, axes)
if i[0] > 1 or i[1] in skip))
return shape, ''.join(axes)
def transpose_axes(data, axes, asaxes='CTZYX'):
"""Return data with its axes permuted to match specified axes.
A view is returned if possible.
>>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape
(5, 2, 1, 3, 4)
"""
for ax in axes:
if ax not in asaxes:
raise ValueError("unknown axis %s" % ax)
# add missing axes to data
shape = data.shape
for ax in reversed(asaxes):
if ax not in axes:
axes = ax + axes
shape = (1,) + shape
data = data.reshape(shape)
# transpose axes
data = data.transpose([axes.index(ax) for ax in asaxes])
return data
def stack_pages(pages, memmap=False, *args, **kwargs):
"""Read data from sequence of TiffPage and stack them vertically.
If memmap is True, return an array stored in a binary file on disk.
Additional parameters are passsed to the page asarray function.
"""
if len(pages) == 0:
raise ValueError("no pages")
if len(pages) == 1:
return pages[0].asarray(memmap=memmap, *args, **kwargs)
result = pages[0].asarray(*args, **kwargs)
shape = (len(pages),) + result.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=result.dtype, shape=shape)
else:
result = numpy.empty(shape, dtype=result.dtype)
for i, page in enumerate(pages):
result[i] = page.asarray(*args, **kwargs)
return result
def stripnull(string):
"""Return string truncated at first null character.
Clean NULL terminated C strings.
>>> stripnull(b'string\\x00') # doctest: +SKIP
b'string'
"""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def stripascii(string):
"""Return string truncated at last byte that is 7bit ASCII.
Clean NULL separated and terminated TIFF strings.
>>> stripascii(b'string\\x00string\\n\\x01\\x00') # doctest: +SKIP
b'string\\x00string\\n'
>>> stripascii(b'\\x00') # doctest: +SKIP
b''
"""
# TODO: pythonize this
ord_ = ord if sys.version_info[0] < 3 else lambda x: x
i = len(string)
while i:
i -= 1
if 8 < ord_(string[i]) < 127:
break
else:
i = -1
return string[:i+1]
def format_size(size):
"""Return file size as string from byte size."""
for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
if size < 2048:
return "%.f %s" % (size, unit)
size /= 1024.0
def sequence(value):
"""Return tuple containing value if value is not a sequence.
>>> sequence(1)
(1,)
>>> sequence([1])
[1]
"""
try:
len(value)
return value
except TypeError:
return (value, )
def product(iterable):
"""Return product of sequence of numbers.
Equivalent of functools.reduce(operator.mul, iterable, 1).
>>> product([2**8, 2**30])
274877906944
>>> product([])
1
"""
prod = 1
for i in iterable:
prod *= i
return prod
def natural_sorted(iterable):
"""Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
def sortkey(x):
return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
numbers = re.compile(r'(\d+)')
return sorted(iterable, key=sortkey)
def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Convert LSM time stamps.
>>> excel_datetime(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(timestamp)
def julian_datetime(julianday, milisecond=0):
"""Return datetime from days since 1/1/4713 BC and ms since midnight.
Convert Julian dates according to MetaMorph.
>>> julian_datetime(2451576, 54362783)
datetime.datetime(2000, 2, 2, 15, 6, 2, 783)
"""
if julianday <= 1721423:
# no datetime before year 1
return None
a = julianday + 1
if a > 2299160:
alpha = math.trunc((a - 1867216.25) / 36524.25)
a += 1 + alpha - alpha // 4
b = a + (1524 if a > 1721423 else 1158)
c = math.trunc((b - 122.1) / 365.25)
d = math.trunc(365.25 * c)
e = math.trunc((b - d) / 30.6001)
day = b - d - math.trunc(30.6001 * e)
month = e - (1 if e < 13.5 else 13)
year = c - (4716 if month > 2.5 else 4715)
hour, milisecond = divmod(milisecond, 1000 * 60 * 60)
minute, milisecond = divmod(milisecond, 1000 * 60)
second, milisecond = divmod(milisecond, 1000)
return datetime.datetime(year, month, day,
hour, minute, second, milisecond)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory.
Print error message on failure.
>>> test_tifffile(verbose=False)
"""
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TiffFile(f, multifile=True)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (
str(tif), str(img.shape), img.dtype, tif[0].compression,
(time.time()-t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful+failed, time.time()-start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated', # CMYK
6: 'ycbcr',
8: 'cielab',
9: 'icclab',
10: 'itulab',
32803: 'cfa', # Color Filter Array
32844: 'logl',
32845: 'logluv',
34892: 'linear_raw'
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decodepackbits,
'lzw': decodelzw,
# 'jpeg': decodejpg
}
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1s', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
#14: '', # UNICODE
#15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
#4: 'void',
#5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
# TODO: is there a standard for character axes labels?
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample', # rgb(a)
'I': 'series', # general sequence, plane, page, IFD
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'P': 'phase', # formerly F # P is Position in LSM!
'R': 'tile', # region, point, mosaic
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'Q': 'other',
#'M': 'mosaic', # LSM 6
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# Map OME pixel types to numpy dtype
OME_PIXEL_TYPES = {
'int8': 'i1',
'int16': 'i2',
'int32': 'i4',
'uint8': 'u1',
'uint16': 'u2',
'uint32': 'u4',
'float': 'f4',
# 'bit': 'bit',
'double': 'f8',
'complex': 'c8',
'double-complex': 'c16',
}
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'), # NIH_UNITS_TYPE
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE
('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE
]
NIH_COLORTABLE_TYPE = (
'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
NIH_LUTMODE_TYPE = (
'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
'ColorLut', 'CustomGrayscale')
NIH_CURVEFIT_TYPE = (
'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
'UncalibratedOD')
NIH_UNITS_TYPE = (
'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
NIH_STACKTYPE_TYPE = (
'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# Map Universal Imaging Corporation MetaMorph internal tag ids to name and type
UIC_TAGS = {
0: ('auto_scale', int),
1: ('min_scale', int),
2: ('max_scale', int),
3: ('spatial_calibration', int),
4: ('x_calibration', Fraction),
5: ('y_calibration', Fraction),
6: ('calibration_units', str),
7: ('name', str),
8: ('thresh_state', int),
9: ('thresh_state_red', int),
10: ('tagid_10', None), # undefined
11: ('thresh_state_green', int),
12: ('thresh_state_blue', int),
13: ('thresh_state_lo', int),
14: ('thresh_state_hi', int),
15: ('zoom', int),
16: ('create_time', julian_datetime),
17: ('last_saved_time', julian_datetime),
18: ('current_buffer', int),
19: ('gray_fit', None),
20: ('gray_point_count', None),
21: ('gray_x', Fraction),
22: ('gray_y', Fraction),
23: ('gray_min', Fraction),
24: ('gray_max', Fraction),
25: ('gray_unit_name', str),
26: ('standard_lut', int),
27: ('wavelength', int),
28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions
29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions
30: ('overlay_mask', None),
31: ('overlay_compress', None),
32: ('overlay', None),
33: ('special_overlay_mask', None),
34: ('special_overlay_compress', None),
35: ('special_overlay', None),
36: ('image_property', read_uic_image_property),
37: ('stage_label', '%ip'), # N str
38: ('autoscale_lo_info', Fraction),
39: ('autoscale_hi_info', Fraction),
40: ('absolute_z', '(%i,2)u4'), # N fractions
41: ('absolute_z_valid', '(%i,)u4'), # N long
42: ('gamma', int),
43: ('gamma_red', int),
44: ('gamma_green', int),
45: ('gamma_blue', int),
46: ('camera_bin', int),
47: ('new_lut', int),
48: ('image_property_ex', None),
49: ('plane_property', int),
50: ('user_lut_table', '(256,3)u1'),
51: ('red_autoscale_info', int),
52: ('red_autoscale_lo_info', Fraction),
53: ('red_autoscale_hi_info', Fraction),
54: ('red_minscale_info', int),
55: ('red_maxscale_info', int),
56: ('green_autoscale_info', int),
57: ('green_autoscale_lo_info', Fraction),
58: ('green_autoscale_hi_info', Fraction),
59: ('green_minscale_info', int),
60: ('green_maxscale_info', int),
61: ('blue_autoscale_info', int),
62: ('blue_autoscale_lo_info', Fraction),
63: ('blue_autoscale_hi_info', Fraction),
64: ('blue_min_scale_info', int),
65: ('blue_max_scale_info', int),
#66: ('overlay_plane_color', read_uic_overlay_plane_color),
}
# Olympus FluoView
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'u4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('data_type', 'i4'), # CZ_DATA_TYPES
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('type_of_data', 'u4'), # CZ_TYPE_OF_DATA
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
# LSM 2.0 ends here
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
('offset_channel_wavelength', 'u4'),
('offset_channel_factors', 'u4'),
('objective_sphere_correction', 'f8'),
('offset_unmix_parameters', 'u4'),
# LSM 3.2, 4.0 end here
('offset_acquisition_parameters', 'u4'),
('offset_characteristics', 'u4'),
('offset_palette', 'u4'),
('time_difference_x', 'f8'),
('time_difference_y', 'f8'),
('time_difference_z', 'f8'),
('internal_use_1', 'u4'),
('dimension_p', 'i4'),
('dimension_m', 'i4'),
('dimensions_reserved', '16i4'),
('offset_tile_positions', 'u4'),
('reserved_1', '9u4'),
('offset_positions', 'u4'),
('reserved_2', '21u4'), # must be 0
]
# Import functions for LSM_INFO sub-records
CZ_LSM_INFO_READERS = {
'scan_info': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
'channel_colors': read_cz_lsm_floatpairs,
'positions': read_cz_lsm_floatpairs,
'tile_positions': read_cz_lsm_floatpairs,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
}
# Description of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
1: '8 bit unsigned integer',
2: '12 bit unsigned integer',
5: '32 bit float',
}
# Description of cz_lsm_info.type_of_data
CZ_TYPE_OF_DATA = {
0: 'Original scan data',
1: 'Calculated data',
2: '3D reconstruction',
3: 'Topography height map',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detection_channels",
0x80000000: "illumination_channels",
0xa0000000: "beam_splitters",
0xc0000000: "data_channels",
0x11000000: "timers",
0x13000000: "markers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
# 0x10000000: "recording",
0x40000000: "track",
0x50000000: "laser",
0x70000000: "detection_channel",
0x90000000: "illumination_channel",
0xb0000000: "beam_splitter",
0xd0000000: "data_channel",
0x12000000: "timer",
0x14000000: "marker",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
# recording
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "scan_type",
0x10000008: "scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bc_correction",
0x10000049: "position_bc_correction1",
0x10000050: "position_bc_correction2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
0x10000059: "rt_binning",
0x1000005a: "rt_frame_width",
0x1000005b: "rt_frame_height",
0x1000005c: "rt_region_width",
0x1000005d: "rt_region_height",
0x1000005e: "rt_offset_x",
0x1000005f: "rt_offset_y",
0x10000060: "rt_zoom",
0x10000061: "rt_line_period",
0x10000062: "prescan",
0x10000063: "scan_direction_z",
# track
0x40000001: "multiplex_type", # 0 after line; 1 after frame
0x40000002: "multiplex_order",
0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average
0x40000004: "sampling_method", # 1 mean; 2 sum
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# laser
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# detection_channel
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "channel_name",
0x70000015: "detector_gain_bc1",
0x70000016: "detector_gain_bc2",
0x70000017: "amplifier_gain_bc1",
0x70000018: "amplifier_gain_bc2",
0x70000019: "amplifier_offset_bc1",
0x70000020: "amplifier_offset_bc2",
0x70000021: "spectral_scan_channels",
0x70000022: "spi_wavelength_start",
0x70000023: "spi_wavelength_stop",
0x70000026: "dye_name",
0x70000027: "dye_folder",
# illumination_channel
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitter
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channel
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# timer
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
# marker
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, 1, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2**32-1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}),
318: ('white_point', None, 5, 2, None),
319: ('primary_chromaticities', None, 5, 6, None),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
340: ('smin_sample_value', None, None, None, None),
341: ('smax_sample_value', None, None, None, None),
347: ('jpeg_tables', None, 7, None, None),
530: ('ycbcr_subsampling', 1, 3, 2, None),
531: ('ycbcr_positioning', 1, 3, 1, None),
32996: ('sgi_matteing', None, None, 1, None), # use extra_samples
32996: ('sgi_datatype', None, None, 1, None), # use sample_format
32997: ('image_depth', None, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
37510: ('user_comment', None, None, None, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
50838: ('imagej_byte_counts', None, None, None, None),
65200: ('flex_xml', None, 2, None, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_bytes),
33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK
33629: ('uic2tag', read_uic2tag),
33630: ('uic3tag', read_uic3tag),
33631: ('uic4tag', read_uic4tag),
34361: ('mm_header', read_mm_header), # Olympus FluoView
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_metadata', read_bytes),
51123: ('micromanager_metadata', read_json),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
# TODO: handle photometric == 'separated' (CMYK)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 16 and
data.shape[-1] < data.shape[-3] // 16 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette' and isrgb:
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
elif data.dtype.kind == 'c':
raise NotImplementedError("complex type") # TODO: handle complex types
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ubf' or vmin == 0:
cmap = 'cubehelix'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import optparse
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description="Display image data in TIFF files.",
version="%%prog %s" % __version__)
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="don't read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="don't display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the docstring examples")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
#if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
def notnone(x):
return next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time()-start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print ("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_palette:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags',
'mm_header', 'imagej_tags', 'micromanager_metadata',
'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if page.is_micromanager:
print('MICROMANAGER_FILE_METADATA')
print(Record(tif.micromanager_metadata))
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = None, None
if 'gdal_nodata' in page.tags:
try:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
except ValueError:
pass
if page.is_stk:
try:
vmin = page.uic_tags['min_scale']
vmax = page.uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = None, None
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str, bytes
unicode = str
if __name__ == "__main__":
sys.exit(main())
|
Britefury/scikit-image
|
skimage/external/tifffile/tifffile_local.py
|
Python
|
bsd-3-clause
| 173,368
|
import fileinput
i = 0
for line in fileinput.input():
i += 1
line = line.strip()
print("The ", i, " line is: ", line)
|
jepio/python-magic
|
stdinput/stdinput.py
|
Python
|
gpl-2.0
| 131
|
import unittest
import json
from bitmovin import Bitmovin, Response, S3Input
from bitmovin.errors import BitmovinApiError
from tests.bitmovin import BitmovinTestCase
class S3InputTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_create_s3_input(self):
(sample_input, sample_files) = self._get_sample_s3_input()
input_resource_response = self.bitmovin.inputs.S3.create(sample_input)
self.assertIsNotNone(input_resource_response)
self.assertIsNotNone(input_resource_response.resource)
self.assertIsNotNone(input_resource_response.resource.id)
self._compare_s3_inputs(sample_input, input_resource_response.resource)
def test_create_s3_input_without_name(self):
(sample_input, sample_files) = self._get_sample_s3_input()
sample_input.name = None
input_resource_response = self.bitmovin.inputs.S3.create(sample_input)
self.assertIsNotNone(input_resource_response)
self.assertIsNotNone(input_resource_response.resource)
self.assertIsNotNone(input_resource_response.resource.id)
self._compare_s3_inputs(sample_input, input_resource_response.resource)
def test_retrieve_s3_input(self):
(sample_input, sample_files) = self._get_sample_s3_input()
created_input_response = self.bitmovin.inputs.S3.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_s3_inputs(sample_input, created_input_response.resource)
retrieved_input_response = self.bitmovin.inputs.S3.retrieve(created_input_response.resource.id)
self.assertIsNotNone(retrieved_input_response)
self.assertIsNotNone(retrieved_input_response.resource)
self._compare_s3_inputs(created_input_response.resource, retrieved_input_response.resource)
def test_delete_s3_input(self):
(sample_input, sample_files) = self._get_sample_s3_input()
created_input_response = self.bitmovin.inputs.S3.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_s3_inputs(sample_input, created_input_response.resource)
deleted_minimal_resource = self.bitmovin.inputs.S3.delete(created_input_response.resource.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.inputs.S3.retrieve(created_input_response.resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving input after deleting it shouldn\'t be possible.'
)
except BitmovinApiError:
pass
def test_list_s3_inputs(self):
(sample_input, sample_files) = self._get_sample_s3_input()
created_input_response = self.bitmovin.inputs.S3.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_s3_inputs(sample_input, created_input_response.resource)
inputs = self.bitmovin.inputs.S3.list()
self.assertIsNotNone(inputs)
self.assertIsNotNone(inputs.resource)
self.assertIsNotNone(inputs.response)
self.assertIsInstance(inputs.resource, list)
self.assertIsInstance(inputs.response, Response)
self.assertGreater(inputs.resource.__sizeof__(), 1)
def test_retrieve_s3_input_custom_data(self):
(sample_input, sample_files) = self._get_sample_s3_input()
sample_input.customData = '<pre>my custom data</pre>'
created_input_response = self.bitmovin.inputs.S3.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_s3_inputs(sample_input, created_input_response.resource)
custom_data_response = self.bitmovin.inputs.S3.retrieve_custom_data(created_input_response.resource.id)
custom_data = custom_data_response.resource
self.assertEqual(sample_input.customData, json.loads(custom_data.customData))
def _compare_s3_inputs(self, first: S3Input, second: S3Input):
"""
:param first: S3Input
:param second: S3Input
:return: bool
"""
self.assertEqual(first.bucketName, second.bucketName)
self.assertEqual(first.cloudRegion, second.cloudRegion)
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
def _get_sample_s3_input(self):
s3_input_settings = self.settings.get('sampleObjects').get('inputs').get('s3')\
.get('9acae039-226b-46a3-8bae-706ae50b33c2')
files = s3_input_settings.get('files')
s3_input = S3Input(
access_key=s3_input_settings.get('accessKey'),
secret_key=s3_input_settings.get('secretKey'),
bucket_name=s3_input_settings.get('bucketName'),
cloud_region=s3_input_settings.get('cloudRegion'),
name='Sample S3 Input'
)
self.assertIsNotNone(s3_input.accessKey)
self.assertIsNotNone(s3_input.secretKey)
self.assertIsNotNone(s3_input.bucketName)
self.assertIsNotNone(s3_input.cloudRegion)
return s3_input, files
if __name__ == '__main__':
unittest.main()
|
bitmovin/bitmovin-python
|
tests/bitmovin/services/inputs/s3_input_service_tests.py
|
Python
|
unlicense
| 6,168
|
import socket
hostname = socket.gethostname()
config = {
# This is just a key to tell the fedmsg-hub to initialize us.
'summershum.enabled': True,
'summershum.sqlalchemy.url': 'sqlite:////var/tmp/summershum.sqlite',
'summershum.lookaside': 'http://pkgs.fedoraproject.org/lookaside/pkgs/',
'summershum.datagrepper': 'https://apps.fedoraproject.org/datagrepper/',
# Here is where we will publish our status
'endpoints': {
'summershum.%s' % hostname.split('.')[0]: [
"tcp://127.0.0.1:99887",
],
},
'logging': {
'version': 1,
'loggers': {
'summershum': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False
},
},
},
}
|
fedora-infra/summershum
|
fedmsg.d/example-config.py
|
Python
|
gpl-2.0
| 791
|
# -*- coding: utf-8 -*-
"""
pyClanSphere.database
~~~~~~~~~~~~~~~~~~~~~
Our layer on top of SQLAlchemy.
Simply use the high level :mod:`~pyClanSphere.database.db` module which
you can import from the :mod:`pyClanSphere.api` module.
:copyright: (c) 2009 - 2010 by the pyClanSphere Team,
see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import time
from os import path
from types import ModuleType
from datetime import datetime
import sqlalchemy
from sqlalchemy import orm, sql
from sqlalchemy.engine.url import make_url, URL
from sqlalchemy.exc import ArgumentError, DisconnectionError
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.interfaces import ConnectionProxy
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.orm.interfaces import AttributeExtension
from sqlalchemy.util import to_list
from werkzeug import url_decode
from werkzeug.exceptions import NotFound
from pyClanSphere.utils import local_manager
if sys.platform == 'win32':
_timer = time.clock
else:
_timer = time.time
_sqlite_re = re.compile(r'sqlite:(?:(?://(.*?))|memory)(?:\?(.*))?$')
def get_engine():
"""Return the active database engine (the database engine of the active
application). If no application is enabled this has an undefined behavior.
If you are not sure if the application is bound to the active thread, use
:func:`~pyClanSphere.application.get_application` and check it for `None`.
The database engine is stored on the application object as `database_engine`.
"""
from pyClanSphere.application import get_application
return get_application().database_engine
def create_engine(uri, relative_to=None, debug=False):
"""Create a new engine. This works a bit like SQLAlchemy's
`create_engine` with the difference that it automaticaly set's MySQL
engines to 'utf-8', and paths for SQLite are relative to the path
provided as `relative_to`. Also hooks in LookLively to catch MySQL's
weird way of connection termination without termination.
Furthermore the engine is created with `convert_unicode` by default.
"""
# This is a good idea in any case
options = {'convert_unicode': True}
# special case sqlite. We want nicer urls for that one.
if uri.startswith('sqlite:'):
match = _sqlite_re.match(uri)
if match is None:
raise ArgumentError('Could not parse rfc1738 URL')
database, query = match.groups()
if database is None:
database = ':memory:'
elif relative_to is not None:
database = path.join(relative_to, database)
if query:
query = url_decode(query).to_dict()
else:
query = {}
info = URL('sqlite', database=database, query=query)
else:
info = make_url(uri)
# if mysql is the database engine and no connection encoding is
# provided we set it to utf-8
if info.drivername == 'mysql':
info.query.setdefault('charset', 'utf8')
options['listeners'] = [LookLively()]
# alternative pool sizes / recycle settings and more. These are
# interpreter wide and not from the config for the following reasons:
#
# - system administrators can set it independently from the webserver
# configuration via SetEnv and friends.
# - this setting is deployment dependent should not affect a development
# server for the same instance or a development shell
for key in 'pool_size', 'pool_recycle', 'pool_timeout':
value = os.environ.get("PYCLANSPHERE_" + key.upper())
if value is not None:
options[key] = int(value)
# if debugging is enabled, hook the ConnectionDebugProxy in
if debug:
options['proxy'] = ConnectionDebugProxy()
return sqlalchemy.create_engine(info, **options)
def secure_database_uri(uri):
"""Returns the database uri with confidental information stripped."""
obj = make_url(uri)
if obj.password:
obj.password = '***'
return unicode(obj).replace(u':%2A%2A%2A@', u':***@', 1)
class ConnectionDebugProxy(ConnectionProxy):
"""Helps debugging the database."""
def cursor_execute(self, execute, cursor, statement, parameters,
context, executemany):
start = _timer()
try:
return execute(cursor, statement, parameters, context)
finally:
from pyClanSphere.application import get_request
from pyClanSphere.utils.debug import find_calling_context
request = get_request()
if request is not None:
request.queries.append((statement, parameters, start,
_timer(), find_calling_context()))
class LookLively(object):
"""Ensures that MySQL connections checked out of the pool are alive.
Specific to the MySQLdb DB-API.
"""
def checkout(self, dbapi_con, con_record, con_proxy):
try:
try:
dbapi_con.ping(False)
except TypeError:
dbapi_con.ping()
except dbapi_con.OperationalError, ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
raise DisconnectionError()
else:
raise
class Query(orm.Query):
"""Default query class."""
def lightweight(self, deferred=None, lazy=None, eager=None):
"""Send a lightweight query which deferes some more expensive
things such as comment queries or even text data yet also
offers eagerloading externals as well.
"""
args = map(db.lazyload, lazy or ()) + map(db.defer, deferred or ()) + map(db.eagerload, eager or ())
return self.options(*args)
def first(self, raise_if_missing=False):
"""Return the first result of this `Query` or None if the result
doesn't contain any rows. If `raise_if_missing` is set to `True`
a `NotFound` exception is raised if no row is found.
"""
rv = orm.Query.first(self)
if rv is None and raise_if_missing:
raise NotFound()
return rv
class AutoAddExt(orm.MapperExtension):
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
session = kwargs.pop('_sa_session', None)
if session is None:
session = db.session
session.add(instance)
return orm.EXT_CONTINUE
#: get a new session
session = orm.scoped_session(lambda: orm.create_session(get_engine(),
autoflush=True, autocommit=False),
local_manager.get_ident)
def mapper(cls, *arg, **options):
"""A mapper that hooks in our standard extensions."""
extensions = to_list(options.pop('extension', None), [])
extensions.append(AutoAddExt())
options['extension'] = extensions
if not hasattr(cls, 'query'):
cls.query = session.query_property()
return orm.mapper(cls, *arg, **options)
#: create a new module for all the database related functions and objects
sys.modules['pyClanSphere.database.db'] = db = ModuleType('db')
key = value = mod = None
for mod in sqlalchemy, orm:
for key, value in mod.__dict__.iteritems():
if key in mod.__all__:
setattr(db, key, value)
del key, mod, value
#: forward some session methods to the module as well
for name in 'delete', 'flush', 'execute', 'begin', \
'commit', 'rollback', 'refresh', 'expire', \
'query_property':
setattr(db, name, getattr(session, name))
#: forward some operators too
for name in 'func', 'and_', 'or_', 'not_':
setattr(db, name, getattr(sql, name))
#: metadata for the core tables and the core table definitions
metadata = db.MetaData()
#: configure a declarative base. This is unused in the code but makes it easier
#: for plugins to work with the database.
class ModelBase(object):
"""Internal baseclass for `Model`."""
Model = declarative_base(name='Model', metadata=metadata, cls=ModelBase, mapper=mapper)
#: and finally hook our own implementations of various objects in
db.Model = Model
db.Query = Query
db.get_engine = get_engine
db.create_engine = create_engine
db.mapper = mapper
db.session = session
db.association_proxy = association_proxy
db.AttributeExtension = AttributeExtension
db.attribute_mapped_collection = attribute_mapped_collection
#: called at the end of a request
cleanup_session = session.remove
def init_database(engine):
"""This is called from the websetup which explains why it takes an engine
and not a pyClanSphere application.
"""
# XXX: consider using something like this for mysql:
# cx = engine.connect()
# cx.execute('set storage_engine=innodb')
# metadata.create_all(cx)
metadata.create_all(engine)
|
jokey2k/pyClanSphere
|
pyClanSphere/database.py
|
Python
|
bsd-3-clause
| 9,011
|
import copy
from itertools import chain
from django import forms
from django.contrib.postgres.validators import (
ArrayMaxLengthValidator, ArrayMinLengthValidator,
)
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from ..utils import prefix_validation_error
class SimpleArrayField(forms.CharField):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, delimiter=',', max_length=None, min_length=None, *args, **kwargs):
self.base_field = base_field
self.delimiter = delimiter
super(SimpleArrayField, self).__init__(*args, **kwargs)
if min_length is not None:
self.min_length = min_length
self.validators.append(ArrayMinLengthValidator(int(min_length)))
if max_length is not None:
self.max_length = max_length
self.validators.append(ArrayMaxLengthValidator(int(max_length)))
def prepare_value(self, value):
if isinstance(value, list):
return self.delimiter.join(six.text_type(self.base_field.prepare_value(v)) for v in value)
return value
def to_python(self, value):
if value:
items = value.split(self.delimiter)
else:
items = []
errors = []
values = []
for index, item in enumerate(items):
try:
values.append(self.base_field.to_python(item))
except ValidationError as error:
errors.append(prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super(SimpleArrayField, self).validate(value)
errors = []
for index, item in enumerate(value):
try:
self.base_field.validate(item)
except ValidationError as error:
errors.append(prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super(SimpleArrayField, self).run_validators(value)
errors = []
for index, item in enumerate(value):
try:
self.base_field.run_validators(item)
except ValidationError as error:
errors.append(prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
if errors:
raise ValidationError(errors)
class SplitArrayWidget(forms.Widget):
def __init__(self, widget, size, **kwargs):
self.widget = widget() if isinstance(widget, type) else widget
self.size = size
super(SplitArrayWidget, self).__init__(**kwargs)
@property
def is_hidden(self):
return self.widget.is_hidden
def value_from_datadict(self, data, files, name):
return [self.widget.value_from_datadict(data, files, '%s_%s' % (name, index))
for index in range(self.size)]
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def render(self, name, value, attrs=None):
if self.is_localized:
self.widget.is_localized = self.is_localized
value = value or []
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id')
for i in range(max(len(value), self.size)):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(self.widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def format_output(self, rendered_widgets):
return ''.join(rendered_widgets)
@property
def media(self):
return self.widget.media
def __deepcopy__(self, memo):
obj = super(SplitArrayWidget, self).__deepcopy__(memo)
obj.widget = copy.deepcopy(self.widget)
return obj
@property
def needs_multipart_form(self):
return self.widget.needs_multipart_form
class SplitArrayField(forms.Field):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, size, remove_trailing_nulls=False, **kwargs):
self.base_field = base_field
self.size = size
self.remove_trailing_nulls = remove_trailing_nulls
widget = SplitArrayWidget(widget=base_field.widget, size=size)
kwargs.setdefault('widget', widget)
super(SplitArrayField, self).__init__(**kwargs)
def clean(self, value):
cleaned_data = []
errors = []
if not any(value) and self.required:
raise ValidationError(self.error_messages['required'])
max_size = max(self.size, len(value))
for index in range(max_size):
item = value[index]
try:
cleaned_data.append(self.base_field.clean(item))
except ValidationError as error:
errors.append(prefix_validation_error(
error,
self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
cleaned_data.append(None)
else:
errors.append(None)
if self.remove_trailing_nulls:
null_index = None
for i, value in reversed(list(enumerate(cleaned_data))):
if value in self.base_field.empty_values:
null_index = i
else:
break
if null_index is not None:
cleaned_data = cleaned_data[:null_index]
errors = errors[:null_index]
errors = list(filter(None, errors))
if errors:
raise ValidationError(list(chain.from_iterable(errors)))
return cleaned_data
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/django/contrib/postgres/forms/array.py
|
Python
|
bsd-2-clause
| 6,758
|
"""
sentry.models.team
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import warnings
from django.conf import settings
from django.db import connections, IntegrityError, models, router, transaction
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.app import env, locks
from sentry.db.models import (
BaseManager, BoundedPositiveIntegerField, FlexibleForeignKey, Model, sane_repr
)
from sentry.db.models.utils import slugify_instance
from sentry.utils.retries import TimedRetryPolicy
class TeamManager(BaseManager):
def get_for_user(self, organization, user, scope=None, with_projects=False):
"""
Returns a list of all teams a user has some level of access to.
"""
from sentry.models import (
OrganizationMemberTeam,
Project,
ProjectStatus,
OrganizationMember,
)
if not user.is_authenticated():
return []
base_team_qs = self.filter(organization=organization, status=TeamStatus.VISIBLE)
if env.request and env.request.is_superuser() or settings.SENTRY_PUBLIC:
team_list = list(base_team_qs)
else:
try:
om = OrganizationMember.objects.get(
user=user,
organization=organization,
)
except OrganizationMember.DoesNotExist:
# User is not a member of the organization at all
return []
# If a scope is passed through, make sure this scope is
# available on the OrganizationMember object.
if scope is not None and scope not in om.get_scopes():
return []
team_list = list(
base_team_qs.filter(
id__in=OrganizationMemberTeam.objects.filter(
organizationmember=om,
is_active=True,
).values_list('team'),
)
)
results = sorted(team_list, key=lambda x: x.name.lower())
if with_projects:
project_list = sorted(
Project.objects.filter(
team__in=team_list,
status=ProjectStatus.VISIBLE,
),
key=lambda x: x.name.lower()
)
projects_by_team = {t.id: [] for t in team_list}
for project in project_list:
projects_by_team[project.team_id].append(project)
# these kinds of queries make people sad :(
for idx, team in enumerate(results):
team_projects = projects_by_team[team.id]
for project in team_projects:
project.team = team
results[idx] = (team, team_projects)
return results
# TODO(dcramer): pull in enum library
class TeamStatus(object):
VISIBLE = 0
PENDING_DELETION = 1
DELETION_IN_PROGRESS = 2
class Team(Model):
"""
A team represents a group of individuals which maintain ownership of projects.
"""
__core__ = True
organization = FlexibleForeignKey('sentry.Organization')
slug = models.SlugField()
name = models.CharField(max_length=64)
status = BoundedPositiveIntegerField(
choices=(
(TeamStatus.VISIBLE, _('Active')), (TeamStatus.PENDING_DELETION, _('Pending Deletion')),
(TeamStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
),
default=TeamStatus.VISIBLE
)
date_added = models.DateTimeField(default=timezone.now, null=True)
objects = TeamManager(cache_fields=('pk', 'slug', ))
class Meta:
app_label = 'sentry'
db_table = 'sentry_team'
unique_together = (('organization', 'slug'), )
__repr__ = sane_repr('name', 'slug')
def __unicode__(self):
return u'%s (%s)' % (self.name, self.slug)
def save(self, *args, **kwargs):
if not self.slug:
lock = locks.get('slug:team', duration=5)
with TimedRetryPolicy(10)(lock.acquire):
slugify_instance(self, self.name, organization=self.organization)
super(Team, self).save(*args, **kwargs)
else:
super(Team, self).save(*args, **kwargs)
@property
def member_set(self):
return self.organization.member_set.filter(
organizationmemberteam__team=self,
organizationmemberteam__is_active=True,
user__is_active=True,
).distinct()
def has_access(self, user, access=None):
from sentry.models import AuthIdentity, OrganizationMember
warnings.warn('Team.has_access is deprecated.', DeprecationWarning)
queryset = self.member_set.filter(
user=user,
)
if access is not None:
queryset = queryset.filter(type__lte=access)
try:
member = queryset.get()
except OrganizationMember.DoesNotExist:
return False
try:
auth_identity = AuthIdentity.objects.get(
auth_provider__organization=self.organization_id,
user=member.user_id,
)
except AuthIdentity.DoesNotExist:
return True
return auth_identity.is_valid(member)
def transfer_to(self, organization):
"""
Transfers a team and all projects under it to the given organization.
"""
from sentry.models import (
OrganizationAccessRequest, OrganizationMember, OrganizationMemberTeam, Project,
ReleaseProject
)
try:
with transaction.atomic():
self.update(organization=organization)
except IntegrityError:
# likely this means a team already exists, let's try to coerce to
# it instead of a blind transfer
new_team = Team.objects.get(
organization=organization,
slug=self.slug,
)
else:
new_team = self
project_ids = list(
Project.objects.filter(
team=self,
).exclude(
organization=organization,
).values_list('id', flat=True)
)
# remove associations with releases from other org
ReleaseProject.objects.filter(
project_id__in=project_ids,
).delete()
Project.objects.filter(
id__in=project_ids,
).update(
team=new_team,
organization=organization,
)
# remove any pending access requests from the old organization
if self != new_team:
OrganizationAccessRequest.objects.filter(
team=self,
).delete()
# identify shared members and ensure they retain team access
# under the new organization
old_memberships = OrganizationMember.objects.filter(
teams=self,
).exclude(
organization=organization,
)
for member in old_memberships:
try:
new_member = OrganizationMember.objects.get(
user=member.user,
organization=organization,
)
except OrganizationMember.DoesNotExist:
continue
try:
with transaction.atomic():
OrganizationMemberTeam.objects.create(
team=new_team,
organizationmember=new_member,
)
except IntegrityError:
pass
OrganizationMemberTeam.objects.filter(
team=self,
).exclude(
organizationmember__organization=organization,
).delete()
if new_team != self:
cursor = connections[router.db_for_write(Team)].cursor()
# we use a cursor here to avoid automatic cascading of relations
# in Django
try:
cursor.execute('DELETE FROM sentry_team WHERE id = %s', [self.id])
finally:
cursor.close()
def get_audit_log_data(self):
return {
'id': self.id,
'slug': self.slug,
'name': self.name,
'status': self.status,
}
|
jean/sentry
|
src/sentry/models/team.py
|
Python
|
bsd-3-clause
| 8,526
|
# vim: tabstop=5 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute.contrib import used_limits
from nova.api.openstack.compute import limits
from nova.api.openstack import wsgi
import nova.context
from nova import quota
from nova import test
class FakeRequest(object):
def __init__(self, context):
self.environ = {'nova.context': context}
class UsedLimitsTestCase(test.TestCase):
def setUp(self):
"""Run before each test."""
super(UsedLimitsTestCase, self).setUp()
self.controller = used_limits.UsedLimitsController()
self.fake_context = nova.context.RequestContext('fake', 'fake')
self.fake_req = FakeRequest(self.fake_context)
def test_used_limits(self):
obj = {
"limits": {
"rate": [],
"absolute": {},
},
}
res = wsgi.ResponseObject(obj)
quota_map = {
'totalRAMUsed': 'ram',
'totalCoresUsed': 'cores',
'totalInstancesUsed': 'instances',
'totalVolumesUsed': 'volumes',
'totalVolumeGigabytesUsed': 'gigabytes',
'totalFloatingIpsUsed': 'floating_ips',
'totalSecurityGroupsUsed': 'security_groups',
}
limits = {}
for display_name, q in quota_map.iteritems():
limits[q] = {'limit': 10, 'in_use': 2}
def stub_get_project_quotas(context, project_id, usages=True):
return limits
self.stubs.Set(quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
self.controller.index(self.fake_req, res)
abs_limits = res.obj['limits']['absolute']
for used_limit, value in abs_limits.iteritems():
self.assertEqual(value, limits[quota_map[used_limit]]['in_use'])
def test_used_ram_added(self):
obj = {
"limits": {
"rate": [],
"absolute": {
"maxTotalRAMSize": 512,
},
},
}
res = wsgi.ResponseObject(obj)
def stub_get_project_quotas(context, project_id, usages=True):
return {'ram': {'limit': 512, 'in_use': 256}}
self.stubs.Set(quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
self.controller.index(self.fake_req, res)
abs_limits = res.obj['limits']['absolute']
self.assertTrue('totalRAMUsed' in abs_limits)
self.assertEqual(abs_limits['totalRAMUsed'], 256)
def test_no_ram_quota(self):
obj = {
"limits": {
"rate": [],
"absolute": {},
},
}
res = wsgi.ResponseObject(obj)
def stub_get_project_quotas(context, project_id, usages=True):
return {}
self.stubs.Set(quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
self.controller.index(self.fake_req, res)
abs_limits = res.obj['limits']['absolute']
self.assertFalse('totalRAMUsed' in abs_limits)
def test_used_limits_xmlns(self):
obj = {
"limits": {
"rate": [],
"absolute": {},
},
}
res = wsgi.ResponseObject(obj, xml=limits.LimitsTemplate)
res.preserialize('xml')
def stub_get_project_quotas(context, project_id, usages=True):
return {}
self.stubs.Set(quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
self.controller.index(self.fake_req, res)
response = res.serialize(None, 'xml')
self.assertTrue(used_limits.XMLNS in response.body)
|
paulmathews/nova
|
nova/tests/api/openstack/compute/contrib/test_used_limits.py
|
Python
|
apache-2.0
| 4,341
|
import http.server
from threading import Thread
import os.path
import mimetypes
class HTTPHandler:
def __init__(self, config):
self.config = config
handler = HTTPHandler.make_http_handler(self.config['media_dir'])
self.httpd = http.server.HTTPServer(('', self.config['media']['port']), handler)
self.thread = Thread(target=self.main_loop)
def run(self):
self.thread.start()
return self
def main_loop(self):
self.httpd.serve_forever()
@staticmethod
def make_http_handler(root_path):
class RelayGramHTTPHandler(http.server.BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super(RelayGramHTTPHandler, self).__init__(*args, **kwargs)
def do_GET(self):
file_path = os.path.abspath(root_path + self.path)
if os.path.commonprefix([root_path, file_path]) != os.path.abspath(root_path): # Detect path traversal attempt
self.send_error(501, "Nice try")
else:
if not os.path.exists(file_path) or not os.path.isfile(file_path):
self.send_error(404, 'File Not Found')
else:
mimetype = mimetypes.guess_type(file_path)
self.send_response(200)
if mimetype[0]:
self.send_header('Content-Type', mimetype[0])
self.send_header('Content-Length', os.path.getsize(file_path))
self.end_headers()
self.wfile.write(open(file_path, mode='rb').read())
return RelayGramHTTPHandler
|
Surye/relaygram
|
relaygram/http_server.py
|
Python
|
mit
| 1,712
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import getpass
import os
import json
import subprocess
import threading
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow import configuration as conf
from tempfile import mkstemp
PYTHONPATH_VAR = 'PYTHONPATH'
class BaseTaskRunner(LoggingMixin):
"""
Runs Airflow task instances by invoking the `airflow run` command with raw
mode enabled in a subprocess.
"""
def __init__(self, local_task_job):
"""
:param local_task_job: The local task job associated with running the
associated task instance.
:type local_task_job: airflow.jobs.LocalTaskJob
"""
# Pass task instance context into log handlers to setup the logger.
super(BaseTaskRunner, self).__init__(local_task_job.task_instance)
self._task_instance = local_task_job.task_instance
popen_prepend = []
cfg_path = None
if self._task_instance.run_as_user:
self.run_as_user = self._task_instance.run_as_user
else:
try:
self.run_as_user = conf.get('core', 'default_impersonation')
except conf.AirflowConfigException:
self.run_as_user = None
# Add sudo commands to change user if we need to. Needed to handle SubDagOperator
# case using a SequentialExecutor.
if self.run_as_user and (self.run_as_user != getpass.getuser()):
self.log.debug("Planning to run as the %s user", self.run_as_user)
cfg_dict = conf.as_dict(display_sensitive=True)
cfg_subset = {
'core': cfg_dict.get('core', {}),
'smtp': cfg_dict.get('smtp', {}),
'scheduler': cfg_dict.get('scheduler', {}),
'webserver': cfg_dict.get('webserver', {}),
'hive': cfg_dict.get('hive', {}), # we should probably generalized this
}
temp_fd, cfg_path = mkstemp()
# Give ownership of file to user; only they can read and write
subprocess.call(
['sudo', 'chown', self.run_as_user, cfg_path],
close_fds=True
)
subprocess.call(
['sudo', 'chmod', '600', cfg_path],
close_fds=True
)
with os.fdopen(temp_fd, 'w') as temp_file:
json.dump(cfg_subset, temp_file)
# propagate PYTHONPATH environment variable
pythonpath_value = os.environ.get(PYTHONPATH_VAR, '')
popen_prepend = ['sudo', '-H', '-u', self.run_as_user]
if pythonpath_value:
popen_prepend.append('{}={}'.format(PYTHONPATH_VAR, pythonpath_value))
self._cfg_path = cfg_path
self._command = popen_prepend + self._task_instance.command_as_list(
raw=True,
pickle_id=local_task_job.pickle_id,
mark_success=local_task_job.mark_success,
job_id=local_task_job.id,
pool=local_task_job.pool,
cfg_path=cfg_path,
)
self.process = None
def _read_task_logs(self, stream):
while True:
line = stream.readline()
if isinstance(line, bytes):
line = line.decode('utf-8')
if len(line) == 0:
break
self.log.info(u'Job {}: Subtask {} %s'.format(
self._task_instance.job_id, self._task_instance.task_id),
line.rstrip('\n'))
def run_command(self, run_with, join_args=False):
"""
Run the task command
:param run_with: list of tokens to run the task command with
E.g. ['bash', '-c']
:type run_with: list
:param join_args: whether to concatenate the list of command tokens
E.g. ['airflow', 'run'] vs ['airflow run']
:param join_args: bool
:return: the process that was run
:rtype: subprocess.Popen
"""
cmd = [" ".join(self._command)] if join_args else self._command
full_cmd = run_with + cmd
self.log.info('Running: %s', full_cmd)
proc = subprocess.Popen(
full_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
close_fds=True,
)
# Start daemon thread to read subprocess logging output
log_reader = threading.Thread(
target=self._read_task_logs,
args=(proc.stdout,),
)
log_reader.daemon = True
log_reader.start()
return proc
def start(self):
"""
Start running the task instance in a subprocess.
"""
raise NotImplementedError()
def return_code(self):
"""
:return: The return code associated with running the task instance or
None if the task is not yet done.
:rtype int:
"""
raise NotImplementedError()
def terminate(self):
"""
Kill the running task instance.
"""
raise NotImplementedError()
def on_finish(self):
"""
A callback that should be called when this is done running.
"""
if self._cfg_path and os.path.isfile(self._cfg_path):
subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True)
|
KL-WLCR/incubator-airflow
|
airflow/task/task_runner/base_task_runner.py
|
Python
|
apache-2.0
| 5,924
|
# -*- coding: utf-8 -*-
from sys import argv
script,file=argv
def print_all(f):
print f.read()
def rewind(f):
f.seek(0)
def print_line(count,f):
print count,f.readline()
currentFile=open(file)
print("Print the whole file!")
print_all(currentFile)
print("Back to start!")
rewind(currentFile)
print("Print each line!")
count=1
print_line(count,currentFile)
count+=1
print_line(count,currentFile)
count+=1
print_line(count,currentFile)
|
Vayne-Lover/Effective
|
Python/Learn Python The Hard Way/ex20.py
|
Python
|
mit
| 456
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('quotes', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='quote',
name='ip',
),
]
|
FMF-studenti/backend
|
fmf/quotes/migrations/0002_remove_quote_ip.py
|
Python
|
gpl-2.0
| 335
|
def build_models(payment_class):
return []
|
dekoza/django-getpaid
|
getpaid/backends/transferuj/models.py
|
Python
|
mit
| 47
|
# Copyright (c) 2019 Salesforce. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import rbac_security_groups
from neutron_lib.api import extensions
class Rbac_security_groups(extensions.APIExtensionDescriptor):
"""Extension class supporting security groups RBAC."""
api_definition = rbac_security_groups
|
mahak/neutron
|
neutron/extensions/rbac_security_groups.py
|
Python
|
apache-2.0
| 897
|
#!/usr/bin/env python2.3
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
===================
Basic Result Tester
===================
A simple component for testing that a stream of data tests true.
This is NOT intended for live systems, but for testing and development purposes
only.
Example Usage
-------------
::
Pipeline( source(), TestResult() ).activate()
Raises an assertion error if source() generates a value that doesn't test
true.
How does it work?
-----------------
If the component receives a value on its "inbox" inbox that does not test true,
then an AssertionError is raised.
If the component receives a StopSystem message on its "control" inbox then a
StopSystemException message is raised as an exception.
This component does not terminate (unless it throws an exception).
It does not pass on the data it receives.
"""
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdownMicroprocess,ipc
from Axon.AxonExceptions import AxonException
class StopSystem(ipc):
"""\
This IPC message is the command to the component to throw a
StopSystemException and bring the Axon system to a halt.
"""
pass
class StopSystemException(AxonException):
"""This exception is used to stop the whole Axon system."""
pass
class TestResult(component):
"""\
TestResult() -> new TestResult.
Component that raises an AssertionError if it receives data on its "inbox"
inbox that does not test true. Or raises a StopSystemException if a
StopSystem message is received on its "control" inbox.
"""
Inboxes = { "inbox" : "Data to test",
"control" : "StopSystemException messages",
}
Outboxes = { "outbox" : "NOT USED",
"signal" : "NOT USED",
}
def mainBody(self):
if self.dataReady():
if not self.recv():
raise AssertionError("false value message received by: %s" % self)
if self.dataReady("control"):
mes = self.recv("control")
if isinstance(mes, StopSystem):
raise StopSystemException("StopSystem request raised from TestResult")
return 1
__kamaelia_components__ = ( TestResult, )
|
sparkslabs/kamaelia_
|
Code/Python/Kamaelia/Kamaelia/Util/TestResult.py
|
Python
|
apache-2.0
| 3,101
|
# 1 - Import library
import pygame
import random
from pygame.locals import *
# 2 - Initialize the game
pygame.init()
width, height = 153,153
screen=pygame.display.set_mode((width, height))
# 3 - Load images
heads = pygame.image.load("head.png")
tails = pygame.image.load("tail.png")
foo = [heads,tails]
player = random.choice(foo)
# print(random.choice(foo))
# 4 - keep looping through
while 1:
# 5 - clear the screen before drawing it again
screen.fill(0)
# 6 - draw the screen elements
screen.blit(player, (0,0))
# 7 - update the screen
pygame.display.flip()
# 8 - loop through the events
for event in pygame.event.get():
# check if the event is the X button
if event.type==pygame.QUIT:
# if it is quit the game
pygame.quit()
exit(0)
if event.type==pygame.KEYUP:
player = random.choice(foo)
|
Alafazam/simple_projects
|
pygame/coinFlip/app.py
|
Python
|
mit
| 911
|
# Copyright 2018 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import time
from oslo_config import cfg
from oslo_log import log
import oslo_messaging
from vitrage.common.constants import VertexProperties as VProps
from vitrage.common.utils import spawn
from vitrage.datasources.transformer_base import TransformerBase
from vitrage.entity_graph import driver_exec
from vitrage.entity_graph import get_graph_driver
from vitrage.entity_graph.consistency.consistency_enforcer import \
ConsistencyEnforcer
from vitrage.entity_graph import EVALUATOR_TOPIC
from vitrage.entity_graph.graph_persistency import GraphPersistency
from vitrage.entity_graph.processor.notifier import GraphNotifier
from vitrage.entity_graph.processor.notifier import PersistNotifier
from vitrage.entity_graph.processor.processor import Processor
from vitrage.entity_graph.scheduler import Scheduler
from vitrage.graph.driver.networkx_graph import NXGraph
from vitrage import messaging
from vitrage import storage
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class VitrageGraphInit(object):
def __init__(self, workers):
self.graph = get_graph_driver()('Entity Graph')
self.db = db_connection = storage.get_connection_from_config()
self.workers = workers
self.events_coordination = EventsCoordination(self.process_event)
self.persist = GraphPersistency(db_connection, self.graph)
self.driver_exec = driver_exec.DriverExec(
self.events_coordination.handle_multiple_low_priority,
self.persist, self.graph)
consistency = ConsistencyEnforcer(
self.graph,
self.events_coordination.handle_multiple_high_priority)
self.scheduler = Scheduler(self.graph,
self.driver_exec,
self.persist,
consistency)
self.processor = Processor(self.graph)
def run(self):
LOG.info('Init Started')
graph_snapshot = self.persist.query_recent_snapshot()
if graph_snapshot:
t = spawn(self.workers.submit_read_db_graph)
self._restart_from_stored_graph(graph_snapshot)
t.join()
self.workers.submit_enable_evaluations()
else:
self._start_from_scratch()
self.workers.submit_read_db_graph()
self.workers.submit_start_evaluations()
self._init_finale(immediate_get_all=True if graph_snapshot else False)
def _restart_from_stored_graph(self, graph_snapshot):
LOG.info('Main process - loading graph from database snapshot (%sKb)',
len(graph_snapshot.graph_snapshot) / 1024)
NXGraph.read_gpickle(graph_snapshot.graph_snapshot, self.graph)
self.persist.replay_events(self.graph, graph_snapshot.event_id)
self._recreate_transformers_id_cache()
LOG.info("%s vertices loaded", self.graph.num_vertices())
self.subscribe_presist_notifier()
def _start_from_scratch(self):
LOG.info('Starting for the first time')
LOG.info('Clearing database active_actions')
self.db.active_actions.delete()
LOG.info('Disabling previously active alarms')
self.db.history_facade.disable_alarms_in_history()
self.subscribe_presist_notifier()
self.driver_exec.snapshot_get_all()
LOG.info("%s vertices loaded", self.graph.num_vertices())
def _init_finale(self, immediate_get_all):
self._add_graph_subscriptions()
self.scheduler.start_periodic_tasks(immediate_get_all)
LOG.info('Init Finished')
self.events_coordination.start()
def process_event(self, event):
if isinstance(event, list):
for e in event:
self.processor.process_event(e)
elif event.get('template_action'):
self.workers.submit_template_event(event)
self.workers.submit_evaluators_reload_templates()
else:
self.processor.process_event(event)
self.persist.flush_events()
def _recreate_transformers_id_cache(self):
for v in self.graph.get_vertices():
if not v.get(VProps.VITRAGE_CACHED_ID):
LOG.warning("Missing vitrage_cached_id in the vertex. "
"Vertex is not added to the ID cache %s", v)
else:
TransformerBase.key_to_uuid_cache[v[VProps.VITRAGE_CACHED_ID]]\
= v.vertex_id
def _add_graph_subscriptions(self):
self.graph.subscribe(self.workers.submit_graph_update)
vitrage_notifier = GraphNotifier()
if vitrage_notifier.enabled:
self.graph.subscribe(vitrage_notifier.notify_when_applicable)
LOG.info('Subscribed vitrage notifier to graph changes')
self.graph.subscribe(self.persist.persist_event,
finalization=True)
def subscribe_presist_notifier(self):
self.graph.subscribe(PersistNotifier().notify_when_applicable)
PRIORITY_DELAY = 0.05
class EventsCoordination(object):
def __init__(self, do_work_func):
self._lock = threading.Lock()
self._high_event_finish_time = 0
def do_work(event):
try:
return do_work_func(event)
except Exception:
LOG.exception('Got Exception for event %s' % event)
self._do_work_func = do_work
self._low_pri_listener = None
self._high_pri_listener = None
def start(self):
self._low_pri_listener = driver_exec.DriversNotificationEndpoint(
self.handle_multiple_low_priority).init().get_listener()
self._high_pri_listener = self._init_listener(
EVALUATOR_TOPIC,
self._do_high_priority_work)
LOG.info('Listening on %s', self._high_pri_listener.targets[0].topic)
LOG.info('Listening on %s', self._low_pri_listener.targets[0].topic)
self._high_pri_listener.start()
self._low_pri_listener.start()
def stop(self):
self._low_pri_listener.stop()
self._high_pri_listener.stop()
def wait(self):
self._low_pri_listener.wait()
self._high_pri_listener.wait()
def _do_high_priority_work(self, event):
self._lock.acquire()
self._do_work_func(event)
self._high_event_finish_time = time.time()
self._lock.release()
def _do_low_priority_work(self, event):
while True:
self._lock.acquire()
if (time.time() - self._high_event_finish_time) < PRIORITY_DELAY:
self._lock.release()
time.sleep(PRIORITY_DELAY)
else:
break
self._do_work_func(event)
self._lock.release()
def handle_multiple_low_priority(self, events):
index = 0
if events is None:
events = []
for index, e in enumerate(events, 1):
self._do_low_priority_work(e)
return index
def handle_multiple_high_priority(self, events):
for e in events:
self._do_high_priority_work(e)
def _init_listener(self, topic, callback):
if not topic:
return
return messaging.get_notification_listener(
transport=messaging.get_transport(),
targets=[oslo_messaging.Target(topic=topic)],
endpoints=[PushNotificationsEndpoint(callback)])
class PushNotificationsEndpoint(object):
def __init__(self, process_event_callback):
self.process_event_callback = process_event_callback
def info(self, ctxt, publisher_id, event_type, payload, metadata):
try:
self.process_event_callback(payload)
except Exception:
LOG.exception('Failed to process event callback.')
|
openstack/vitrage
|
vitrage/entity_graph/graph_init.py
|
Python
|
apache-2.0
| 8,349
|
from chatterbot.adapters import Adapter
class OutputAdapter(Adapter):
"""
A generic class that can be overridden by a subclass to provide extended
functionality, such as delivering a response to an API endpoint.
"""
def process_response(self, statement, session_id=None):
"""
Override this method in a subclass to implement customized functionality.
:param statement: The statement that the chat bot has produced in response to some input.
:param session_id: The unique id of the current chat session.
:returns: The response statement.
"""
return statement
|
maclogan/VirtualPenPal
|
chatterbot/output/output_adapter.py
|
Python
|
bsd-3-clause
| 639
|
from category import *
import unittest
class TestCategory():
def testMatch(self):
path = '/CATEGORY/CLASS/instance/PROTOCOL/METHOD'
target = {'category': 'CATEGORY',
'class': 'CLASS',
'file': None,
'dir': 'instance',
'protocol': 'PROTOCOL',
'method': 'METHOD'}
assert CategoryPathParser(0).match(path) == target
|
phb/squeakfs
|
unittests/test_category.py
|
Python
|
mit
| 401
|
registered_handlers = {}
|
schneck/django-foreignkeysearch
|
foreignkeysearch/__init__.py
|
Python
|
mit
| 26
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py with feature_column_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
import six
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.framework import test_util
from tensorflow_estimator.python.estimator.canned import dnn
from tensorflow_estimator.python.estimator.canned import prediction_keys
from tensorflow_estimator.python.estimator.canned.v1 import dnn_testing_utils_v1
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.inputs import numpy_io
from tensorflow_estimator.python.estimator.inputs import pandas_io
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifier(*args, **kwargs)
@test_util.run_v1_only('Tests v1 only symbols')
class DNNModelFnV2Test(dnn_testing_utils_v1.BaseDNNModelFnTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils_v1.BaseDNNModelFnTest.__init__(
self, dnn._dnn_model_fn, fc_impl=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class DNNLogitFnV2Test(dnn_testing_utils_v1.BaseDNNLogitFnTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils_v1.BaseDNNLogitFnTest.__init__(
self, dnn.dnn_logit_fn_builder, fc_impl=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class DNNWarmStartingV2Test(dnn_testing_utils_v1.BaseDNNWarmStartingTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils_v1.BaseDNNWarmStartingTest.__init__(
self, _dnn_classifier_fn, _dnn_regressor_fn, fc_impl=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class DNNClassifierEvaluateV2Test(
dnn_testing_utils_v1.BaseDNNClassifierEvaluateTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils_v1.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class DNNClassifierPredictV2Test(
dnn_testing_utils_v1.BaseDNNClassifierPredictTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils_v1.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class DNNClassifierTrainV2Test(dnn_testing_utils_v1.BaseDNNClassifierTrainTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils_v1.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn, fc_impl=feature_column_v2)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressor(*args, **kwargs)
@test_util.run_v1_only('Tests v1 only symbols')
class DNNRegressorEvaluateV2Test(
dnn_testing_utils_v1.BaseDNNRegressorEvaluateTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils_v1.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class DNNRegressorPredictV2Test(
dnn_testing_utils_v1.BaseDNNRegressorPredictTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils_v1.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class DNNRegressorTrainV2Test(dnn_testing_utils_v1.BaseDNNRegressorTrainTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
dnn_testing_utils_v1.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn, fc_impl=feature_column_v2)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = tf.queue.FIFOQueue(capacity=100, dtypes=queue_dtypes)
tf.compat.v1.train.queue_runner.add_queue_runner(
tf.compat.v1.train.queue_runner.QueueRunner(
input_queue, [input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
@test_util.run_v1_only('Tests v1 only symbols')
class DNNRegressorIntegrationTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, batch_size):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUATE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
@test_util.run_v1_only('Tests v1 only symbols')
class DNNClassifierIntegrationTest(tf.test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, n_classes, batch_size):
feature_columns = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUATE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data}, y=y_data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data}, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([1], tf.dtypes.int64),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
if __name__ == '__main__':
tf.test.main()
|
tensorflow/estimator
|
tensorflow_estimator/python/estimator/canned/v1/dnn_test_fc_v2_v1.py
|
Python
|
apache-2.0
| 17,722
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with URL parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2014, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
import sys
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
###############################################################################
# 命令行接口###################################################################
###############################################################################
# INFO: Some server adapters need to monkey-patch std-lib modules before they
# are imported. This is why some of the command-line handling is done here, but
# the actual call to main() is at the end of the file.
def _cli_parse(args):
from optparse import OptionParser
parser = OptionParser(
usage="usage: %prog [options] package.module:app")
opt = parser.add_option
opt("--version", action="store_true", help="show version number.")
opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
opt("-p", "--plugin", action="append", help="install additional plugin/s.")
opt("-c", "--conf", action="append", metavar="FILE",
help="load config values from FILE.")
opt("-C", "--param", action="append", metavar="NAME=VALUE",
help="override config values.")
opt("--debug", action="store_true", help="start server in debug mode.")
opt("--reload", action="store_true", help="auto-reload on file changes.")
opts, args = parser.parse_args(args[1:])
return opts, args, parser
def _cli_patch(args):
opts, _, _ = _cli_parse(args)
if opts.server:
if opts.server.startswith('gevent'):
import gevent.monkey
gevent.monkey.patch_all()
elif opts.server.startswith('eventlet'):
import eventlet
eventlet.monkey_patch()
if __name__ == '__main__':
_cli_patch(sys.argv)
###############################################################################
# Imports and Python 2/3 unification ###########################################
###############################################################################
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, tempfile, threading, time, warnings
from types import FunctionType
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from unicodedata import normalize
# inspect.getargspec was removed in Python 3.6, use
# Signature-based version where we can (Python 3.3+)
try:
from inspect import signature
def getargspec(func):
params = signature(func).parameters
args, varargs, keywords, defaults = [], None, None, []
for name, param in params.items():
if param.kind == param.VAR_POSITIONAL:
varargs = name
elif param.kind == param.VAR_KEYWORD:
keywords = name
else:
args.append(name)
if param.default is not param.empty:
defaults.append(param.default)
return (args, varargs, keywords, tuple(defaults) or None)
except ImportError:
from inspect import getargspec
try:
from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try:
from json import dumps as json_dumps, loads as json_lds
except ImportError:
try:
from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError(
"JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e():
return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser, Error as ConfigParserError
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a):
raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser, \
Error as ConfigParserError
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it):
return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self):
pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, strict=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern),
None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" %
(rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback,
name=None,
plugins=None,
skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
attributes = getattr(func, closure_attr)
func = attributes[0].cell_contents
# in case of decorators with multiple arguments
if not isinstance(func, FunctionType):
# pick first FunctionType instance from multiple arguments
func = filter(lambda x: isinstance(x, FunctionType),
map(lambda x: x.cell_contents, attributes))
func = list(func)[0] # py3 support
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
for conf in (self.config, self.app.config):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
"""
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist:
rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
rs.body = itertools.chain(rs.body, body) if rs.body else body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes:
route.reset()
if DEBUG:
for route in routes:
route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``<name>`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8', 'ignore')
def _inner_handle():
# Maybe pass variables as locals for better performance?
try:
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return _inner_handle()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
try:
out = None
environ['bottle.app'] = self
request.bind(environ)
response.bind()
self.trigger_hook('before_request')
out = _inner_handle()
return out;
finally:
if isinstance(out, HTTPResponse):
out.apply(response)
self.trigger_hook('after_request')
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code,
self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
def __setattr__(self, name, value):
if name in self.__dict__:
raise AttributeError("Attribute %s already defined. Plugin conflict?" % name)
self.__dict__[name] = value
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" :attr:`query_string`解析成:class:`FormsDict`.
这些值有时称为“URL参数”或“GET参数”,
但不能与“URL通配符”混淆,因为它们由:class:`Router`提供。"""
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. Invalid JSON raises a 400 error response. """
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
try:
return json_loads(b)
except (ValueError, TypeError):
raise HTTPError(400, 'Invalid JSON')
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
try:
read_func = self.environ['wsgi.input'].read
except KeyError:
self.environ['wsgi.input'] = BytesIO()
return self.environ['wsgi.input']
body_iter = self._iter_chunked if self.chunked else self._iter_body
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request entity too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request entity too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get(
'HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'],
encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') \
or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script, path = path_shift(self.environ.get('SCRIPT_NAME', '/'), self.path, shift)
self['SCRIPT_NAME'], self['PATH_INFO'] = script, path
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None):
return self.environ.get(value, default)
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del (self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s' % name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
key = 'bottle.request.ext.%s' % name
if key in self.environ:
raise AttributeError("Attribute already defined: %s" % name)
self.environ[key] = value
def __delattr__(self, name, value):
try:
del self.environ['bottle.request.ext.%s' % name]
except KeyError:
raise AttributeError("Attribute not defined: %s" % name)
def _hkey(s):
return s.title().replace('_', '-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type', )),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))
}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(
_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [value if isinstance(value, unicode) else
str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [value if isinstance(value, unicode)
else str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(
value if isinstance(value, unicode) else str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
if py3k:
return [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
else:
return [(k, v.encode('utf8') if isinstance(v, unicode) else v)
for (k, v) in out]
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty(
'Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
# Cookie size plus options must not exceed 4kb.
if len(name) + len(value) > 3800:
raise ValueError('Content does not fit into a cookie.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
if key in ('secure', 'httponly') and not value:
continue
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try:
return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value):
ls.var = value
def fdel(_):
del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self,
status=None,
body=None,
exception=None,
traceback=None, **more_headers):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **more_headers)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException):
pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, _):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def setup(self, app):
app.tpl = self
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({
'__file__': __file__,
'__path__': [],
'__all__': [],
'__loader__': self
})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def keys(self):
return self.dict.keys()
if py3k:
def values(self):
return (v[-1] for v in self.dict.values())
def items(self):
return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self):
return [v[-1] for v in self.dict.values()]
def items(self):
return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(
value if isinstance(value, unicode) else str(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self):
return [x for x in self]
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self._ekey(key) in self.environ
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
"""
__slots__ = ('_meta', '_on_change')
def __init__(self):
self._meta = {}
self._on_change = lambda name, value: None
def load_module(self, path, squash):
""" Load values from a Python module.
:param squash: Squash nested dicts into namespaces by using
load_dict(), otherwise use update()
Example: load_config('my.app.settings', True)
Example: load_config('my.app.settings', False)
"""
config_obj = __import__(path)
obj = dict([(key, getattr(config_obj, key))
for key in dir(config_obj) if key.isupper()])
if squash:
self.load_dict(obj)
else:
self.update(obj)
return self
def load_config(self, filename):
""" Load values from an ``*.ini`` style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
"""
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, basestring):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` """
prefix = ''
if a and isinstance(a[0], basestring):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix + key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, basestring):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. """
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname)
fname = fname.encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2 ** 16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2 ** 16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root,
mimetype='auto',
download=False,
charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
if download and download != True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(end - offset)
if body: body = _file_iter_range(body, offset, end - offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0, )) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen - int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end) + 1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x == y else 1
for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
""" Encode and sign a pickle-able object. Return a (byte) string """
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n', ' ')\
.replace('\r', ' ').replace('\t', '	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls,
handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler,
host=self.host,
port=str(self.port), **self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
server = wsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self, handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler})
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AiohttpServer(ServerAdapter):
""" Untested.
aiohttp
https://pypi.python.org/pypi/aiohttp/
"""
def run(self, handler):
import asyncio
from aiohttp.wsgi import WSGIServerHttpProtocol
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
protocol_factory = lambda: WSGIServerHttpProtocol(
handler,
readpayload=True,
debug=(not self.quiet))
self.loop.run_until_complete(self.loop.create_server(protocol_factory,
self.host,
self.port))
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: self.loop.stop())
try:
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.stop()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO': GeventSocketIOServer,
'rocket': RocketServer,
'bjoern': BjoernServer,
'aiohttp': AiohttpServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None,
config=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if config:
app.config.update(config)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.',
True) #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.', True) #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name,
filename=self.filename,
lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self,
escape_func=html_escape,
noescape=False,
syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are not supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({
'_stdout': _stdout,
'_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env),
'_rebase': None,
'_str': self._str,
'_escape': self._escape,
'get': env.get,
'setdefault': env.setdefault,
'defined': env.__contains__
})
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError):
pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# We use the verbose (?x) regex mode to make this more manageable
_re_tok = _re_inl = r'''((?mx) # verbose and dot-matches-newline mode
[urbURB]*
(?: ''(?!')
|""(?!")
|'{6}
|"{6}
|'(?:[^\\']|\\.)+?'
|"(?:[^\\"]|\\.)+?"
|'{3}(?:[^\\]|\\.|\n)+?'{3}
|"{3}(?:[^\\]|\\.|\n)+?"{3}
)
)'''
_re_inl = _re_tok.replace(r'|\n', '') # We re-use this string pattern later
_re_tok += r'''
# 2: Comments (until end of line, but not the newline itself)
|(\#.*)
# 3: Open and close (4) grouping tokens
|([\[\{\(])
|([\]\}\)])
# 5,6: Keywords that start or continue a python block (only start of line)
|^([\ \t]*(?:if|for|while|with|try|def|class)\b)
|^([\ \t]*(?:elif|else|except|finally)\b)
# 7: Our special 'end' keyword (but only if it stands alone)
|((?:^|;)[\ \t]*end[\ \t]*(?=(?:%(block_close)s[\ \t]*)?\r?$|;|\#))
# 8: A customizable end-of-code-block template token (only end of line)
|(%(block_close)s[\ \t]*(?=\r?$))
# 9: And finally, a single newline. The 10th token is 'everything else'
|(\r?\n)
'''
# Match the start tokens of code areas in a template
_re_split = r'''(?m)^[ \t]*(\\?)((%(line_start)s)|(%(block_start)s))'''
# Match inline statements (may contain python strings)
_re_inl = r'''%%(inline_start)s((?:%s|[^'"\n]+?)*?)%%(inline_end)s''' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p % pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source, pos=self.offset)
if m:
text = self.source[self.offset:m.start()]
self.text_buffer.append(text)
self.offset = m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[m.start():m.start(1)] +
m.group(2) + line + sep)
self.offset += len(line + sep)
continue
self.flush_text()
self.offset += self.read_code(self.source[self.offset:],
multiline=bool(m.group(4)))
else:
break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, pysource, multiline):
code_line, comment = '', ''
offset = 0
while True:
m = self.re_tok.search(pysource, pos=offset)
if not m:
code_line += pysource[offset:]
offset = len(pysource)
self.write_code(code_line.strip(), comment)
break
code_line += pysource[offset:m.start()]
offset = m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if self.paren_depth > 0 and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
return offset
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n') + 1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]:
kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template,
template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses.copy()
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v))
for (k, v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multi-threaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else
__name__ + ".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cli_parse(sys.argv)
def _cli_error(msg):
parser.print_help()
_stderr('\nError: %s\n' % msg)
sys.exit(1)
if opt.version:
_stdout('Bottle %s\n' % __version__)
sys.exit(0)
if not args:
_cli_error("No application entry point specified.")
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
config = ConfigDict()
for cfile in opt.conf or []:
try:
if cfile.endswith('.json'):
with open(cfile, 'rb') as fp:
config.load_dict(json_loads(fp.read()))
else:
config.load_config(cfile)
except ConfigParserError:
_cli_error(str(_e()))
except IOError:
_cli_error("Unable to read config file %r" % cfile)
except (UnicodeError, TypeError, ValueError):
_cli_error("Unable to parse config file %r: %s" % (cfile, _e()))
for cval in opt.param or []:
if '=' in cval:
config.update((cval.split('=', 1),))
else:
config[cval] = True
run(args[0],
host=host,
port=int(port),
server=opt.server,
reloader=opt.reload,
plugins=opt.plugin,
debug=opt.debug,
config=config)
# THE END
|
hackersql/sq1map
|
thirdparty/bottle/bottle.py
|
Python
|
gpl-3.0
| 152,507
|
from cms.app_base import CMSApp
from cms.test_utils.project.sampleapp.menu import SampleAppMenu, StaticMenu3
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class SampleApp(CMSApp):
name = _("Sample App")
urls = ["cms.test_utils.project.sampleapp.urls"]
menus = [SampleAppMenu]
permissions = True
apphook_pool.register(SampleApp)
class SampleAppWithExcludedPermissions(CMSApp):
name = _("Sample App with excluded permissions")
urls = [
"cms.test_utils.project.sampleapp.urls_excluded"
]
permissions = True
exclude_permissions = ['excluded']
apphook_pool.register(SampleAppWithExcludedPermissions)
class SampleApp2(CMSApp):
name = _("Sample App 2")
urls = ["cms.test_utils.project.sampleapp.urls2"]
menus = [StaticMenu3]
apphook_pool.register(SampleApp2)
class NamespacedApp(CMSApp):
name = _("Namespaced App")
urls = [
"cms.test_utils.project.sampleapp.ns_urls",
"cms.test_utils.project.sampleapp.urls"
]
menus = [SampleAppMenu, StaticMenu3]
app_name = 'namespaced_app_ns'
apphook_pool.register(NamespacedApp)
class ParentApp(CMSApp):
name = _("Parent app")
urls = ["cms.test_utils.project.sampleapp.urls_parentapp"]
apphook_pool.register(ParentApp)
class ChildApp(CMSApp):
name = _("Child app")
urls = ["cms.test_utils.project.sampleapp.urls_childapp"]
apphook_pool.register(ChildApp)
|
Venturi/oldcms
|
env/lib/python2.7/site-packages/cms/test_utils/project/sampleapp/cms_app.py
|
Python
|
apache-2.0
| 1,462
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DefaultProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]
|
alexvanaxe/socialauthtests
|
theauth/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 607
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.