repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
albertoferna/compmech | setup.py | 1 | 1198 | from glob import glob
import sys
import os
from subprocess import Popen
import numpy
#params = 'build_ext -inplace -IC:\clones\cubature\cubature ' + ' '.join(sys.argv[1:])
params = 'build_ext --inplace -I%s' % numpy.get_include() + ' '.join(sys.argv[1:]) + ' clean'
cwd = os.getcwd()
if os.name == 'nt':
use_sdk = 'DISTUTILS_USE_SDK'
if not use_sdk in os.environ.keys():
os.environ[use_sdk] = '1'
print('####################')
print('Compiling modules...')
print('####################')
print('')
basedirs = [
os.path.join('compmech', 'conecyl', 'clpt'),
os.path.join('compmech', 'conecyl', 'fsdt'),
os.path.join('compmech', 'integrate'),
os.path.join('compmech', 'conecyl', 'imperfections'),
os.path.join('compmech', 'aero', 'pistonplate', 'clpt'),
os.path.join('compmech', 'aero', 'pistonstiffpanel', 'clpt'),
]
for basedir in basedirs:
print('Compiling setup.py in %s' % basedir)
basedir = os.path.sep.join([cwd, basedir])
os.chdir(basedir)
for fname in glob('setup*.py'):
p = Popen(('python {} '.format(fname) + params), shell=True)
p.wait()
os.chdir(cwd)
| bsd-3-clause |
transientskp/tkp | tests/test_accessors/test_detection.py | 3 | 2726 | import os
import unittest
from tkp.accessors.detection import isfits, islofarhdf5, detect, iscasa
from tkp.accessors.lofarcasaimage import LofarCasaImage
from tkp.accessors.casaimage import CasaImage
from tkp.accessors.fitsimage import FitsImage
from tkp.accessors.amicasaimage import AmiCasaImage
import tkp.accessors
from tkp.testutil.decorators import requires_data
from tkp.testutil.data import DATAPATH
lofarcasatable = os.path.join(DATAPATH, 'casatable/L55596_000TO009_skymodellsc_wmax6000_noise_mult10_cell40_npix512_wplanes215.img.restored.corr')
casatable = os.path.join(DATAPATH, 'accessors/casa.table')
fitsfile = os.path.join(DATAPATH, 'accessors/lofar.fits')
hdf5file = os.path.join(DATAPATH, 'accessors/lofar.h5')
antennafile = os.path.join(DATAPATH, 'lofar/CS001-AntennaArrays.conf')
amicasatable = os.path.join(DATAPATH, 'accessors/ami-la.image')
class TestAutodetect(unittest.TestCase):
@requires_data(lofarcasatable)
def test_islofarcasa(self):
self.assertTrue(iscasa(lofarcasatable))
self.assertFalse(islofarhdf5(lofarcasatable))
self.assertFalse(isfits(lofarcasatable))
self.assertEqual(detect(lofarcasatable), LofarCasaImage)
@requires_data(casatable)
def test_iscasa(self):
# CasaImages are not directly instantiable, since they don't provide
# the basic DataAcessor interface.
self.assertTrue(iscasa(casatable))
self.assertFalse(islofarhdf5(casatable))
self.assertFalse(isfits(casatable))
self.assertEqual(detect(casatable), None)
@requires_data(hdf5file)
def test_ishdf5(self):
# TODO: disable this for now, since casacore can't parse LOFAR hdf5
#self.assertTrue(islofarhdf5(hdf5file))
self.assertFalse(isfits(hdf5file))
self.assertFalse(iscasa(hdf5file))
#self.assertEqual(detect(hdf5file), LofarHdf5Image)
@requires_data(fitsfile)
def test_isfits(self):
self.assertTrue(isfits(fitsfile))
self.assertFalse(islofarhdf5(fitsfile))
self.assertFalse(iscasa(fitsfile))
self.assertEqual(detect(fitsfile), FitsImage)
@requires_data(lofarcasatable, antennafile)
def test_open(self):
accessor = tkp.accessors.open(lofarcasatable)
self.assertEqual(accessor.__class__, LofarCasaImage)
self.assertRaises(IOError, tkp.accessors.open, antennafile)
self.assertRaises(IOError, tkp.accessors.open, 'doesntexists')
@requires_data(amicasatable)
def test_isamicasa(self):
self.assertTrue(iscasa(amicasatable))
self.assertFalse(islofarhdf5(amicasatable))
self.assertFalse(isfits(amicasatable))
self.assertEqual(detect(amicasatable), AmiCasaImage)
| bsd-2-clause |
Endika/edx-platform | common/djangoapps/util/tests/test_date_utils.py | 55 | 7800 | # -*- coding: utf-8 -*-
"""
Tests for util.date_utils
"""
from datetime import datetime, timedelta, tzinfo
import unittest
import ddt
from mock import patch
from nose.tools import assert_equals, assert_false # pylint: disable=no-name-in-module
from pytz import UTC
from util.date_utils import (
get_default_time_display, get_time_display, almost_same_datetime,
strftime_localized,
)
def test_get_default_time_display():
assert_equals("", get_default_time_display(None))
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals(
"Mar 12, 1992 at 15:03 UTC",
get_default_time_display(test_time))
def test_get_dflt_time_disp_notz():
test_time = datetime(1992, 3, 12, 15, 3, 30)
assert_equals(
"Mar 12, 1992 at 15:03 UTC",
get_default_time_display(test_time))
def test_get_time_disp_ret_empty():
assert_equals("", get_time_display(None))
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("", get_time_display(test_time, ""))
def test_get_time_display():
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("dummy text", get_time_display(test_time, 'dummy text'))
assert_equals("Mar 12 1992", get_time_display(test_time, '%b %d %Y'))
assert_equals("Mar 12 1992 UTC", get_time_display(test_time, '%b %d %Y %Z'))
assert_equals("Mar 12 15:03", get_time_display(test_time, '%b %d %H:%M'))
def test_get_time_pass_through():
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("Mar 12, 1992 at 15:03 UTC", get_time_display(test_time))
assert_equals("Mar 12, 1992 at 15:03 UTC", get_time_display(test_time, None))
assert_equals("Mar 12, 1992 at 15:03 UTC", get_time_display(test_time, "%"))
def test_get_time_display_coerce():
test_time_standard = datetime(1992, 1, 12, 15, 3, 30, tzinfo=UTC)
test_time_daylight = datetime(1992, 7, 12, 15, 3, 30, tzinfo=UTC)
assert_equals("Jan 12, 1992 at 07:03 PST",
get_time_display(test_time_standard, None, coerce_tz="US/Pacific"))
assert_equals("Jan 12, 1992 at 15:03 UTC",
get_time_display(test_time_standard, None, coerce_tz="NONEXISTENTTZ"))
assert_equals("Jan 12 07:03",
get_time_display(test_time_standard, '%b %d %H:%M', coerce_tz="US/Pacific"))
assert_equals("Jul 12, 1992 at 08:03 PDT",
get_time_display(test_time_daylight, None, coerce_tz="US/Pacific"))
assert_equals("Jul 12, 1992 at 15:03 UTC",
get_time_display(test_time_daylight, None, coerce_tz="NONEXISTENTTZ"))
assert_equals("Jul 12 08:03",
get_time_display(test_time_daylight, '%b %d %H:%M', coerce_tz="US/Pacific"))
class NamelessTZ(tzinfo):
"""Static timezone for testing"""
def utcoffset(self, _dt):
return timedelta(hours=-3)
def dst(self, _dt):
return timedelta(0)
def test_get_default_time_display_no_tzname():
assert_equals("", get_default_time_display(None))
test_time = datetime(1992, 3, 12, 15, 3, 30, tzinfo=NamelessTZ())
assert_equals(
"Mar 12, 1992 at 15:03-0300",
get_default_time_display(test_time))
def test_almost_same_datetime():
assert almost_same_datetime(
datetime(2013, 5, 3, 10, 20, 30),
datetime(2013, 5, 3, 10, 21, 29)
)
assert almost_same_datetime(
datetime(2013, 5, 3, 11, 20, 30),
datetime(2013, 5, 3, 10, 21, 29),
timedelta(hours=1)
)
assert_false(
almost_same_datetime(
datetime(2013, 5, 3, 11, 20, 30),
datetime(2013, 5, 3, 10, 21, 29)
)
)
assert_false(
almost_same_datetime(
datetime(2013, 5, 3, 11, 20, 30),
datetime(2013, 5, 3, 10, 21, 29),
timedelta(minutes=10)
)
)
def fake_ugettext(translations):
"""
Create a fake implementation of ugettext, for testing.
"""
def _ugettext(text): # pylint: disable=missing-docstring
return translations.get(text, text)
return _ugettext
def fake_pgettext(translations):
"""
Create a fake implementation of pgettext, for testing.
"""
def _pgettext(context, text): # pylint: disable=missing-docstring
return translations.get((context, text), text)
return _pgettext
@ddt.ddt
class StrftimeLocalizedTest(unittest.TestCase):
"""
Tests for strftime_localized.
"""
@ddt.data(
("%Y", "2013"),
("%m/%d/%y", "02/14/13"),
("hello", "hello"),
(u'%Y년 %m월 %d일', u"2013년 02월 14일"),
("%a, %b %d, %Y", "Thu, Feb 14, 2013"),
("%I:%M:%S %p", "04:41:17 PM"),
("%A at %-I%P", "Thursday at 4pm"),
)
def test_usual_strftime_behavior(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
# strftime doesn't like Unicode, so do the work in UTF8.
self.assertEqual(expected, dtime.strftime(fmt.encode('utf8')).decode('utf8'))
@ddt.data(
("SHORT_DATE", "Feb 14, 2013"),
("LONG_DATE", "Thursday, February 14, 2013"),
("TIME", "04:41:17 PM"),
("DAY_AND_TIME", "Thursday at 4pm"),
("%x %X!", "Feb 14, 2013 04:41:17 PM!"),
)
def test_shortcuts(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@patch('util.date_utils.pgettext', fake_pgettext(translations={
("abbreviated month name", "Feb"): "XXfebXX",
("month name", "February"): "XXfebruaryXX",
("abbreviated weekday name", "Thu"): "XXthuXX",
("weekday name", "Thursday"): "XXthursdayXX",
("am/pm indicator", "PM"): "XXpmXX",
}))
@ddt.data(
("SHORT_DATE", "XXfebXX 14, 2013"),
("LONG_DATE", "XXthursdayXX, XXfebruaryXX 14, 2013"),
("DATE_TIME", "XXfebXX 14, 2013 at 16:41"),
("TIME", "04:41:17 XXpmXX"),
("%x %X!", "XXfebXX 14, 2013 04:41:17 XXpmXX!"),
)
def test_translated_words(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "date(%Y.%m.%d)",
"LONG_DATE_FORMAT": "date(%A.%Y.%B.%d)",
"DATE_TIME_FORMAT": "date(%Y.%m.%d@%H.%M)",
"TIME_FORMAT": "%Hh.%Mm.%Ss",
}))
@ddt.data(
("SHORT_DATE", "date(2013.02.14)"),
("Look: %x", "Look: date(2013.02.14)"),
("LONG_DATE", "date(Thursday.2013.February.14)"),
("DATE_TIME", "date(2013.02.14@16.41)"),
("TIME", "16h.41m.17s"),
("The time is: %X", "The time is: 16h.41m.17s"),
("%x %X", "date(2013.02.14) 16h.41m.17s"),
)
def test_translated_formats(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "oops date(%Y.%x.%d)",
"TIME_FORMAT": "oops %Hh.%Xm.%Ss",
}))
@ddt.data(
("SHORT_DATE", "Feb 14, 2013"),
("TIME", "04:41:17 PM"),
)
def test_recursion_protection(self, (fmt, expected)):
dtime = datetime(2013, 02, 14, 16, 41, 17)
self.assertEqual(expected, strftime_localized(dtime, fmt))
@ddt.data(
"%",
"Hello%"
"%Y/%m/%d%",
)
def test_invalid_format_strings(self, fmt):
dtime = datetime(2013, 02, 14, 16, 41, 17)
with self.assertRaises(ValueError):
strftime_localized(dtime, fmt)
| agpl-3.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/scipy/signal/tests/test_fir_filter_design.py | 22 | 19826 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_raises, \
assert_almost_equal, assert_array_almost_equal, assert_equal, \
assert_, assert_allclose
from scipy.special import sinc
from scipy.signal import kaiser_beta, kaiser_atten, kaiserord, \
firwin, firwin2, freqz, remez, firls
def test_kaiser_beta():
b = kaiser_beta(58.7)
assert_almost_equal(b, 0.1102 * 50.0)
b = kaiser_beta(22.0)
assert_almost_equal(b, 0.5842 + 0.07886)
b = kaiser_beta(21.0)
assert_equal(b, 0.0)
b = kaiser_beta(10.0)
assert_equal(b, 0.0)
def test_kaiser_atten():
a = kaiser_atten(1, 1.0)
assert_equal(a, 7.95)
a = kaiser_atten(2, 1/np.pi)
assert_equal(a, 2.285 + 7.95)
def test_kaiserord():
assert_raises(ValueError, kaiserord, 1.0, 1.0)
numtaps, beta = kaiserord(2.285 + 7.95 - 0.001, 1/np.pi)
assert_equal((numtaps, beta), (2, 0.0))
class TestFirwin(TestCase):
def check_response(self, h, expected_response, tol=.05):
N = len(h)
alpha = 0.5 * (N-1)
m = np.arange(0,N) - alpha # time indices of taps
for freq, expected in expected_response:
actual = abs(np.sum(h*np.exp(-1.j*np.pi*m*freq)))
mse = abs(actual-expected)**2
self.assertTrue(mse < tol, 'response not as expected, mse=%g > %g'
% (mse, tol))
def test_response(self):
N = 51
f = .5
# increase length just to try even/odd
h = firwin(N, f) # low-pass from 0 to f
self.check_response(h, [(.25,1), (.75,0)])
h = firwin(N+1, f, window='nuttall') # specific window
self.check_response(h, [(.25,1), (.75,0)])
h = firwin(N+2, f, pass_zero=False) # stop from 0 to f --> high-pass
self.check_response(h, [(.25,0), (.75,1)])
f1, f2, f3, f4 = .2, .4, .6, .8
h = firwin(N+3, [f1, f2], pass_zero=False) # band-pass filter
self.check_response(h, [(.1,0), (.3,1), (.5,0)])
h = firwin(N+4, [f1, f2]) # band-stop filter
self.check_response(h, [(.1,1), (.3,0), (.5,1)])
h = firwin(N+5, [f1, f2, f3, f4], pass_zero=False, scale=False)
self.check_response(h, [(.1,0), (.3,1), (.5,0), (.7,1), (.9,0)])
h = firwin(N+6, [f1, f2, f3, f4]) # multiband filter
self.check_response(h, [(.1,1), (.3,0), (.5,1), (.7,0), (.9,1)])
h = firwin(N+7, 0.1, width=.03) # low-pass
self.check_response(h, [(.05,1), (.75,0)])
h = firwin(N+8, 0.1, pass_zero=False) # high-pass
self.check_response(h, [(.05,0), (.75,1)])
def mse(self, h, bands):
"""Compute mean squared error versus ideal response across frequency
band.
h -- coefficients
bands -- list of (left, right) tuples relative to 1==Nyquist of
passbands
"""
w, H = freqz(h, worN=1024)
f = w/np.pi
passIndicator = np.zeros(len(w), bool)
for left, right in bands:
passIndicator |= (f >= left) & (f < right)
Hideal = np.where(passIndicator, 1, 0)
mse = np.mean(abs(abs(H)-Hideal)**2)
return mse
def test_scaling(self):
"""
For one lowpass, bandpass, and highpass example filter, this test
checks two things:
- the mean squared error over the frequency domain of the unscaled
filter is smaller than the scaled filter (true for rectangular
window)
- the response of the scaled filter is exactly unity at the center
of the first passband
"""
N = 11
cases = [
([.5], True, (0, 1)),
([0.2, .6], False, (.4, 1)),
([.5], False, (1, 1)),
]
for cutoff, pass_zero, expected_response in cases:
h = firwin(N, cutoff, scale=False, pass_zero=pass_zero, window='ones')
hs = firwin(N, cutoff, scale=True, pass_zero=pass_zero, window='ones')
if len(cutoff) == 1:
if pass_zero:
cutoff = [0] + cutoff
else:
cutoff = cutoff + [1]
self.assertTrue(self.mse(h, [cutoff]) < self.mse(hs, [cutoff]),
'least squares violation')
self.check_response(hs, [expected_response], 1e-12)
class TestFirWinMore(TestCase):
"""Different author, different style, different tests..."""
def test_lowpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
taps = firwin(ntaps, cutoff=0.5, window=('kaiser', beta), scale=False)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
def test_highpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
# Ensure that ntaps is odd.
ntaps |= 1
taps = firwin(ntaps, cutoff=0.5, window=('kaiser', beta),
pass_zero=False, scale=False)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
def test_bandpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
taps = firwin(ntaps, cutoff=[0.3, 0.7], window=('kaiser', beta),
pass_zero=False, scale=False)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.2, 0.3-width/2, 0.3+width/2, 0.5,
0.7-width/2, 0.7+width/2, 0.8, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
def test_multi(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
taps = firwin(ntaps, cutoff=[0.2, 0.5, 0.8], window=('kaiser', beta),
pass_zero=True, scale=False)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.1, 0.2-width/2, 0.2+width/2, 0.35,
0.5-width/2, 0.5+width/2, 0.65,
0.8-width/2, 0.8+width/2, 0.9, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
decimal=5)
def test_nyq(self):
"""Test the nyq keyword."""
nyquist = 1000
width = 40.0
relative_width = width/nyquist
ntaps, beta = kaiserord(120, relative_width)
taps = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta),
pass_zero=False, scale=False, nyq=nyquist)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 200, 300-width/2, 300+width/2, 500,
700-width/2, 700+width/2, 800, 1000])
freqs, response = freqz(taps, worN=np.pi*freq_samples/nyquist)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
def test_bad_cutoff(self):
"""Test that invalid cutoff argument raises ValueError."""
# cutoff values must be greater than 0 and less than 1.
assert_raises(ValueError, firwin, 99, -0.5)
assert_raises(ValueError, firwin, 99, 1.5)
# Don't allow 0 or 1 in cutoff.
assert_raises(ValueError, firwin, 99, [0, 0.5])
assert_raises(ValueError, firwin, 99, [0.5, 1])
# cutoff values must be strictly increasing.
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.2])
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.5])
# Must have at least one cutoff value.
assert_raises(ValueError, firwin, 99, [])
# 2D array not allowed.
assert_raises(ValueError, firwin, 99, [[0.1, 0.2],[0.3, 0.4]])
# cutoff values must be less than nyq.
assert_raises(ValueError, firwin, 99, 50.0, nyq=40)
assert_raises(ValueError, firwin, 99, [10, 20, 30], nyq=25)
def test_even_highpass_raises_value_error(self):
"""Test that attempt to create a highpass filter with an even number
of taps raises a ValueError exception."""
assert_raises(ValueError, firwin, 40, 0.5, pass_zero=False)
assert_raises(ValueError, firwin, 40, [.25, 0.5])
class TestFirwin2(TestCase):
def test_invalid_args(self):
# `freq` and `gain` have different lengths.
assert_raises(ValueError, firwin2, 50, [0, 0.5, 1], [0.0, 1.0])
# `nfreqs` is less than `ntaps`.
assert_raises(ValueError, firwin2, 50, [0, 0.5, 1], [0.0, 1.0, 1.0], nfreqs=33)
# Decreasing value in `freq`
assert_raises(ValueError, firwin2, 50, [0, 0.5, 0.4, 1.0], [0, .25, .5, 1.0])
# Value in `freq` repeated more than once.
assert_raises(ValueError, firwin2, 50, [0, .1, .1, .1, 1.0],
[0.0, 0.5, 0.75, 1.0, 1.0])
# `freq` does not start at 0.0.
assert_raises(ValueError, firwin2, 50, [0.5, 1.0], [0.0, 1.0])
# Type II filter, but the gain at nyquist rate is not zero.
assert_raises(ValueError, firwin2, 16, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0])
# Type III filter, but the gains at nyquist and zero rate are not zero.
assert_raises(ValueError, firwin2, 17, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0],
antisymmetric=True)
assert_raises(ValueError, firwin2, 17, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0],
antisymmetric=True)
assert_raises(ValueError, firwin2, 17, [0.0, 0.5, 1.0], [1.0, 1.0, 1.0],
antisymmetric=True)
# Type VI filter, but the gain at zero rate is not zero.
assert_raises(ValueError, firwin2, 16, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0],
antisymmetric=True)
def test01(self):
width = 0.04
beta = 12.0
ntaps = 400
# Filter is 1 from w=0 to w=0.5, then decreases linearly from 1 to 0 as w
# increases from w=0.5 to w=1 (w=1 is the Nyquist frequency).
freq = [0.0, 0.5, 1.0]
gain = [1.0, 1.0, 0.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2,
0.75, 1.0-width/2])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 1.0-width, 0.5, width], decimal=5)
def test02(self):
width = 0.04
beta = 12.0
# ntaps must be odd for positive gain at Nyquist.
ntaps = 401
# An ideal highpass filter.
freq = [0.0, 0.5, 0.5, 1.0]
gain = [0.0, 0.0, 1.0, 1.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.25, 0.5-width, 0.5+width, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
def test03(self):
width = 0.02
ntaps, beta = kaiserord(120, width)
# ntaps must be odd for positive gain at Nyquist.
ntaps = int(ntaps) | 1
freq = [0.0, 0.4, 0.4, 0.5, 0.5, 1.0]
gain = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.4-width, 0.4+width, 0.45,
0.5-width, 0.5+width, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
def test04(self):
"""Test firwin2 when window=None."""
ntaps = 5
# Ideal lowpass: gain is 1 on [0,0.5], and 0 on [0.5, 1.0]
freq = [0.0, 0.5, 0.5, 1.0]
gain = [1.0, 1.0, 0.0, 0.0]
taps = firwin2(ntaps, freq, gain, window=None, nfreqs=8193)
alpha = 0.5 * (ntaps - 1)
m = np.arange(0, ntaps) - alpha
h = 0.5 * sinc(0.5 * m)
assert_array_almost_equal(h, taps)
def test05(self):
"""Test firwin2 for calculating Type IV filters"""
ntaps = 1500
freq = [0.0, 1.0]
gain = [0.0, 1.0]
taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2:][::-1])
freqs, response = freqz(taps, worN=2048)
assert_array_almost_equal(abs(response), freqs / np.pi, decimal=4)
def test06(self):
"""Test firwin2 for calculating Type III filters"""
ntaps = 1501
freq = [0.0, 0.5, 0.55, 1.0]
gain = [0.0, 0.5, 0.0, 0.0]
taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
assert_equal(taps[ntaps // 2], 0.0)
assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2 + 1:][::-1])
freqs, response1 = freqz(taps, worN=2048)
response2 = np.interp(freqs / np.pi, freq, gain)
assert_array_almost_equal(abs(response1), response2, decimal=3)
def test_nyq(self):
taps1 = firwin2(80, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], nyq=60.0)
assert_array_almost_equal(taps1, taps2)
class TestRemez(TestCase):
def test_bad_args(self):
assert_raises(ValueError, remez, 11, [0.1, 0.4], [1], type='pooka')
def test_hilbert(self):
N = 11 # number of taps in the filter
a = 0.1 # width of the transition band
# design an unity gain hilbert bandpass filter from w to 0.5-w
h = remez(11, [a, 0.5-a], [1], type='hilbert')
# make sure the filter has correct # of taps
assert_(len(h) == N, "Number of Taps")
# make sure it is type III (anti-symmetric tap coefficients)
assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1])
# Since the requested response is symmetric, all even coeffcients
# should be zero (or in this case really small)
assert_((abs(h[1::2]) < 1e-15).all(), "Even Coefficients Equal Zero")
# now check the frequency response
w, H = freqz(h, 1)
f = w/2/np.pi
Hmag = abs(H)
# should have a zero at 0 and pi (in this case close to zero)
assert_((Hmag[[0, -1]] < 0.02).all(), "Zero at zero and pi")
# check that the pass band is close to unity
idx = np.logical_and(f > a, f < 0.5-a)
assert_((abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity")
class TestFirls(TestCase):
def test_bad_args(self):
# even numtaps
assert_raises(ValueError, firls, 10, [0.1, 0.2], [0, 0])
# odd bands
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.4], [0, 0, 0])
# len(bands) != len(desired)
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.4], [0, 0, 0])
# non-monotonic bands
assert_raises(ValueError, firls, 11, [0.2, 0.1], [0, 0])
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.3], [0] * 4)
assert_raises(ValueError, firls, 11, [0.3, 0.4, 0.1, 0.2], [0] * 4)
assert_raises(ValueError, firls, 11, [0.1, 0.3, 0.2, 0.4], [0] * 4)
# negative desired
assert_raises(ValueError, firls, 11, [0.1, 0.2], [-1, 1])
# len(weight) != len(pairs)
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [1, 2])
# negative weight
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [-1])
def test_firls(self):
N = 11 # number of taps in the filter
a = 0.1 # width of the transition band
# design a halfband symmetric low-pass filter
h = firls(11, [0, a, 0.5-a, 0.5], [1, 1, 0, 0], nyq=0.5)
# make sure the filter has correct # of taps
assert_equal(len(h), N)
# make sure it is symmetric
midx = (N-1) // 2
assert_array_almost_equal(h[:midx], h[:-midx-1:-1])
# make sure the center tap is 0.5
assert_almost_equal(h[midx], 0.5)
# For halfband symmetric, odd coefficients (except the center)
# should be zero (really small)
hodd = np.hstack((h[1:midx:2], h[-midx+1::2]))
assert_array_almost_equal(hodd, 0)
# now check the frequency response
w, H = freqz(h, 1)
f = w/2/np.pi
Hmag = np.abs(H)
# check that the pass band is close to unity
idx = np.logical_and(f > 0, f < a)
assert_array_almost_equal(Hmag[idx], 1, decimal=3)
# check that the stop band is close to zero
idx = np.logical_and(f > 0.5-a, f < 0.5)
assert_array_almost_equal(Hmag[idx], 0, decimal=3)
def test_compare(self):
# compare to OCTAVE output
taps = firls(9, [0, 0.5, 0.55, 1], [1, 1, 0, 0], [1, 2])
# >> taps = firls(8, [0 0.5 0.55 1], [1 1 0 0], [1, 2]);
known_taps = [-6.26930101730182e-04, -1.03354450635036e-01,
-9.81576747564301e-03, 3.17271686090449e-01,
5.11409425599933e-01, 3.17271686090449e-01,
-9.81576747564301e-03, -1.03354450635036e-01,
-6.26930101730182e-04]
assert_allclose(taps, known_taps)
# compare to MATLAB output
taps = firls(11, [0, 0.5, 0.5, 1], [1, 1, 0, 0], [1, 2])
# >> taps = firls(10, [0 0.5 0.5 1], [1 1 0 0], [1, 2]);
known_taps = [
0.058545300496815, -0.014233383714318, -0.104688258464392,
0.012403323025279, 0.317930861136062, 0.488047220029700,
0.317930861136062, 0.012403323025279, -0.104688258464392,
-0.014233383714318, 0.058545300496815]
assert_allclose(taps, known_taps)
# With linear changes:
taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], nyq=10)
# >> taps = firls(6, [0, 0.1, 0.2, 0.3, 0.4, 0.5], [1, 0, 0, 1, 1, 0])
known_taps = [
1.156090832768218, -4.1385894727395849, 7.5288619164321826,
-8.5530572592947856, 7.5288619164321826, -4.1385894727395849,
1.156090832768218]
assert_allclose(taps, known_taps)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
MicBrain/Scheme-Interpreter | scheme.py | 1 | 21214 | """This module implements the core Scheme interpreter functions, including the
eval/apply mutual recurrence, environment model, and read-eval-print loop.
"""
from scheme_primitives import *
from scheme_reader import *
from ucb import main, trace
##############
# Eval/Apply #
##############
def scheme_eval(expr, env):
"""Evaluate Scheme expression EXPR in environment ENV. If ENV is None,
simply returns EXPR as its value without further evaluation.
>>> expr = read_line("(+ 2 2)")
>>> expr
Pair('+', Pair(2, Pair(2, nil)))
>>> scheme_eval(expr, create_global_frame())
scnum(4)
"""
while env is not None:
# Note: until extra-credit problem 22 is complete, env will
# always be None on the second iteration of the loop, so that
# the value of EXPR is returned at that point.
if expr is None:
raise SchemeError("Cannot evaluate an undefined expression.")
# Evaluate Atoms
if scheme_symbolp(expr):
expr, env = env.lookup(expr).get_actual_value(), None
elif scheme_atomp(expr):
env = None
# All non-atomic expressions are lists.
elif not scheme_listp(expr):
raise SchemeError("malformed list: {0}".format(str(expr)))
else:
first, rest = scheme_car(expr), scheme_cdr(expr)
# Evaluate Combinations
if (scheme_symbolp(first) # first might be unhashable
and first in SPECIAL_FORMS):
if proper_tail_recursion:
expr, env = SPECIAL_FORMS[first](rest, env)
else:
expr, env = SPECIAL_FORMS[first](rest, env)
expr, env = scheme_eval(expr, env), None
else:
procedure = scheme_eval(first, env)
args = procedure.evaluate_arguments(rest, env)
if proper_tail_recursion:
expr, env = procedure.apply(args, env)
else:
# UPDATED 4/14/2014 @ 19:08
expr, env = scheme_apply(procedure, args, env), None
return expr
proper_tail_recursion = True
################################################################
# Uncomment the following line to apply tail call optimization #
################################################################
# proper_tail_recursion = True
def scheme_apply(procedure, args, env):
"""Apply PROCEDURE (type Procedure) to argument values ARGS
in environment ENV. Returns the resulting Scheme value."""
# UPDATED 4/14/2014 @ 19:08
# Since .apply is allowed to do a partial evaluation, we finish up
# with a call to scheme_eval to complete the evaluation. scheme_eval
# will simply return expr if its env argument is None.
expr, env = procedure.apply(args, env)
return scheme_eval(expr, env)
################
# Environments #
################
class Frame:
"""An environment frame binds Scheme symbols to Scheme values."""
def __init__(self, parent):
"""An empty frame with a PARENT frame (that may be None)."""
self.bindings = {}
self.parent = parent
def __repr__(self):
if self.parent is None:
return "<Global Frame>"
else:
s = sorted('{0}: {1}'.format(k,v) for k,v in self.bindings.items())
return "<{{{0}}} -> {1}>".format(', '.join(s), repr(self.parent))
def __eq__(self, other):
return isinstance(other, Frame) and \
self.parent == other.parent
def lookup(self, symbol):
"""Return the value bound to SYMBOL. Errors if SYMBOL is not found.
As a convenience, also accepts Python strings, which it turns into
symbols."""
if type(symbol) is str:
symbol = intern(symbol)
if symbol in self.bindings:
return self.bindings[symbol]
if self.parent is not None:
return self.parent.lookup(symbol)
raise SchemeError("unknown identifier: {0}".format(str(symbol)))
def global_frame(self):
"""The global environment at the root of the parent chain."""
e = self
while e.parent is not None:
e = e.parent
return e
def make_call_frame(self, formals, vals):
"""Return a new local frame whose parent is SELF, in which the symbols
in the Scheme formal parameter list FORMALS are bound to the Scheme
values in the Scheme value list VALS. Raise an error if too many or too
few arguments are given.
>>> env = create_global_frame()
>>> formals, vals = read_line("(a b c)"), read_line("(1 2 3)")
>>> env.make_call_frame(formals, vals)
<{a: 1, b: 2, c: 3} -> <Global Frame>>
"""
frame = Frame(self)
if len(formals) != len(vals):
raise SchemeError
for expression in range(len(formals)):
frame.define(formals[expression], vals[expression])
return frame
def define(self, sym, val):
"""Define Scheme symbol SYM to have value VAL in SELF. As a
convenience, SYM may be Python string, which is converted first
to a Scheme symbol. VAL must be a SchemeValue."""
assert isinstance(val, SchemeValue), "values must be SchemeValues"
if type(sym) is str:
sym = intern(sym)
self.bindings[sym] = val
#####################
# Procedures #
#####################
class Procedure(SchemeValue):
"""The superclass of all kinds of procedure in Scheme."""
# Arcane Technical Note: The odd placement of the import from scheme in
# evaluate_arguments is necessary because it introduces mutually recursive
# imports between this file and scheme.py. The effect of putting it
# here is that we delay attempting to access scheme.scheme_eval until
# after the scheme module's initialization is finished.
def evaluate_arguments(self, arg_list, env):
"""Evaluate the expressions in ARG_LIST in ENV to produce
arguments for this procedure. Default definition for procedures."""
from scheme import scheme_eval
return arg_list.map(lambda operand: scheme_eval(operand, env))
class PrimitiveProcedure(Procedure):
"""A Scheme procedure defined as a Python function."""
def __init__(self, fn, use_env=False):
self.fn = fn
self.use_env = use_env
def __str__(self):
return '#[primitive]'
def __repr__(self):
return "PrimitiveProcedure({})".format(str(self))
def apply(self, args, env):
"""Apply a primitive procedure to ARGS in ENV. Returns
a pair (val, None), where val is the resulting value.
>>> twos = Pair(SchemeInt(2), Pair(SchemeInt(2), nil))
>>> plus = PrimitiveProcedure(scheme_add, False)
>>> plus.apply(twos, None)
(scnum(4), None)
"""
try:
converted_list = []
while args != nil:
converted_list.append(args.first)
args = args.second
if self.use_env:
converted_list.append(env)
val = self.fn(*converted_list)
return val, None
except TypeError:
raise SchemeError
class LambdaProcedure(Procedure):
"""A procedure defined by a lambda expression or the complex define form."""
def __init__(self, formals, body, env = None):
"""A procedure whose formal parameter list is FORMALS (a Scheme list),
whose body is the single Scheme expression BODY, and whose parent
environment is the Frame ENV. A lambda expression containing multiple
expressions, such as (lambda (x) (display x) (+ x 1)) can be handled by
using (begin (display x) (+ x 1)) as the body."""
self.formals = formals
self.body = body
self.env = env
def _symbol(self):
return 'lambda'
def __str__(self):
# UPDATED 4/16/2014 @ 13:20
return "({0} {1} {2})".format(self._symbol(),
str(self.formals), str(self.body))
def __repr__(self):
args = (self.formals, self.body, self.env)
return "{0}Procedure({1}, {2}, {3})".format(self._symbol().capitalize(),
*(repr(a) for a in args))
def __eq__(self, other):
return type(other) is type(self) and \
self.formals == other.formals and \
self.body == other.body and \
self.env == other.env
def apply(self, args, env):
environment = self.env.make_call_frame(self.formals, args)
if proper_tail_recursion:
return self.body, self.env.make_call_frame(self.formals, args)
else:
return scheme_eval(self.body, self.env.make_call_frame(self.formals, args)), None
class MuProcedure(LambdaProcedure):
"""A procedure defined by a mu expression, which has dynamic scope.
"""
def _symbol(self):
return 'mu'
def apply(self, args, env):
if proper_tail_recursion:
return self.body, env.make_call_frame(self.formals, args)
else:
return scheme_eval(self.body, env.make_call_frame(self.formals, args)), None
# Call-by-name (nu) extension.
class NuProcedure(LambdaProcedure):
"""A procedure whose parameters are to be passed by name."""
def _symbol(self):
return 'nu'
def evaluate_arguments(self, arg_list, env):
"""Evaluate the expressions in ARG_LIST in ENV to produce
arguments for this procedure. Default definition for procedures."""
return arg_list.map(lambda operand: Thunk(nil, operand, env))
class Thunk(LambdaProcedure):
"""A by-name value that is to be called as a parameterless function when
its value is fetched to be used."""
def get_actual_value(self):
return scheme_eval(self.body, self.env)
#################
# Special forms #
#################
# All of the 'do_..._form' methods return a value and an environment,
# as for the 'apply' method on Procedures. That is, they either return
# (V, None), indicating that the value of the special form is V, or they
# return (Expr, Env), indicating that the value of the special form is what
# you would get by evaluating Expr in the environment Env.
def do_lambda_form(vals, env, function_type=LambdaProcedure):
"""Evaluate a lambda form with formals VALS[0] and body VALS.second
in environment ENV, create_global_frame eating a procedure of type FUNCTION_TYPE
(a subtype of Procedure)."""
check_form(vals, 2)
operands = vals.first
check_formals(operands)
body = vals.second
if len(body)!= 1:
return function_type(operands, Pair("begin", body), env), None
return function_type(operands, body.first, env), None
def do_mu_form(vals, env):
"""Evaluate a mu (dynamically scoped lambda) form with formals VALS[0]
and body VALS.second in environment ENV."""
return do_lambda_form(vals, env, function_type=MuProcedure)
def do_nu_form(vals, env):
"""Evaluate a mu (call-by-name scoped lambda) form with formals VALS[0]
and body VALS.second in environment ENV."""
return do_lambda_form(vals, env, function_type=NuProcedure)
def do_define_form(vals, env):
"""Evaluate a define form with parameters VALS in environment ENV."""
check_form(vals, 2)
target = vals[0]
if scheme_symbolp(target):
check_form(vals, 2, 2)
env.define(target, scheme_eval(vals[1], env))
return (target, None)
elif scheme_pairp(target):
func_name = target.first
if isinstance(func_name, SchemeNumber) or isinstance(func_name, SchemeFloat):
raise SchemeError("bad argument to define")
lambda_vals = Pair(target.second, vals.second)
lambda_func = do_lambda_form(lambda_vals, env)[0]
env.define(func_name, lambda_func)
return func_name, None
else:
raise SchemeError("bad argument to define")
def do_quote_form(vals, env):
"""Evaluate a quote form with parameters VALS. ENV is ignored."""
check_form(vals, 1, 1)
return vals[0], None
def do_let_form(vals, env):
"""Evaluate a let form with parameters VALS in environment ENV."""
check_form(vals, 2)
bindings = vals[0]
exprs = vals.second
if not scheme_listp(bindings):
raise SchemeError("bad bindings list in let form")
# Add a frame containing bindings
names, values = nil, nil
for item in bindings:
values = Pair(scheme_eval(item.second.first, env), values)
names = Pair(item.first, names)
new_env = env.make_call_frame(names, values)
# Evaluate all but the last expression after bindings, and return the last
last = len(exprs)-1
for i in range(0, last):
scheme_eval(exprs[i], new_env)
return exprs[last], new_env
#########################
# Logical Special Forms #
#########################
def do_if_form(vals, env):
"""Evaluate if form with parameters VALS in environment ENV."""
check_form(vals, 2, 3)
if (scheme_eval(vals.first, env)):
return vals.second.first, env
elif len(vals) == 2:
return okay, None
return vals.second.second.first, env
def do_and_form(vals, env):
"""Evaluate short-circuited and with parameters VALS in environment ENV."""
if len(vals):
for i in range(len(vals) - 1):
if not(scheme_eval(vals[i], env)):
return scheme_false, None
return vals[len(vals) - 1], env
return scheme_true, None
def quote(value):
"""Return a Scheme expression quoting the Scheme VALUE.
>>> s = quote('hello')
>>> print(s)
(quote hello)
>>> scheme_eval(s, Frame(None)) # "hello" is undefined in this frame.
intern('hello')
"""
return Pair("quote", Pair(value, nil))
def do_or_form(vals, env):
"""Evaluate short-circuited or with parameters VALS in environment ENV."""
for value in vals:
eval_expression = scheme_eval(value, env)
if eval_expression:
return eval_expression, None
return scheme_false, None
def do_cond_form(vals, env):
"""Evaluate cond form with parameters VALS in environment ENV."""
num_clauses = len(vals)
for i, clause in enumerate(vals):
check_form(clause, 1)
if clause.first is else_sym:
if i < num_clauses-1:
raise SchemeError("else must be last")
test = scheme_true
if clause.second is nil:
raise SchemeError("badly formed else clause")
else:
test = scheme_eval(clause.first, env)
if test:
if len(clause.second) == 0:
return test, None
if len(clause.second) >= 2:
return Pair('begin', clause.second), env
return clause.second.first, env
return okay, None
def do_begin_form(vals, env):
"""Evaluate begin form with parameters VALS in environment ENV."""
check_form(vals, 0)
if scheme_nullp(vals):
return okay, None
for i in range(len(vals) - 1):
scheme_eval(vals[i], env)
return vals[len(vals) - 1], env
# Collected symbols with significance to the interpreter
and_sym = intern("and")
begin_sym = intern("begin")
cond_sym = intern("cond")
define_macro_sym = intern("define-macro")
define_sym = intern("define")
else_sym = intern("else")
if_sym = intern("if")
lambda_sym = intern("lambda")
let_sym = intern("let")
mu_sym = intern("mu")
nu_sym = intern("nu")
or_sym = intern("or")
quasiquote_sym = intern("quasiquote")
quote_sym = intern("quote")
set_bang_sym = intern("set!")
unquote_splicing_sym = intern("unquote-splicing")
unquote_sym = intern("unquote")
# Collected special forms
SPECIAL_FORMS = {
and_sym: do_and_form,
begin_sym: do_begin_form,
cond_sym: do_cond_form,
define_sym: do_define_form,
if_sym: do_if_form,
lambda_sym: do_lambda_form,
let_sym: do_let_form,
mu_sym: do_mu_form,
nu_sym: do_nu_form,
or_sym: do_or_form,
quote_sym: do_quote_form,
}
# Utility methods for checking the structure of Scheme programs
def check_form(expr, min, max = None):
"""Check EXPR (default SELF.expr) is a proper list whose length is
at least MIN and no more than MAX (default: no maximum). Raises
a SchemeError if this is not the case."""
if not scheme_listp(expr):
raise SchemeError("badly formed expression: " + str(expr))
length = len(expr)
if length < min:
raise SchemeError("too few operands in form")
elif max is not None and length > max:
raise SchemeError("too many operands in form")
def check_formals(formals):
"""Check that FORMALS is a valid parameter list, a Scheme list of symbols
in which each symbol is distinct. Raise a SchemeError if the list of formals
is not a well-formed list of symbols or if any symbol is repeated.
>>> check_formals(read_line("(a b c)"))
"""
seen_symbols = []
while len(formals):
if not(scheme_symbolp(formals.first)) or formals.first in seen_symbols:
raise SchemeError
seen_symbols.append(formals.first)
formals = formals.second
################
# Input/Output #
################
def read_eval_print_loop(next_line, env, quiet=False, startup=False,
interactive=False, load_files=()):
"""Read and evaluate input until an end of file or keyboard interrupt."""
if startup:
for filename in load_files:
scheme_load(scstr(filename), True, env)
while True:
try:
src = next_line()
while src.more_on_line:
expression = scheme_read(src)
result = scheme_eval(expression, env)
if not quiet and result is not None:
scheme_print(result)
except (SchemeError, SyntaxError, ValueError, RuntimeError) as err:
if (isinstance(err, RuntimeError) and
'maximum recursion depth exceeded' not in err.args[0]):
raise
print("Error:", err)
except KeyboardInterrupt: # <Control>-C
if not startup:
raise
print("\nKeyboardInterrupt")
if not interactive:
return
except EOFError: # <Control>-D, etc.
return
def scheme_load(*args):
"""Load a Scheme source file. ARGS should be of the form (SYM, ENV) or (SYM,
QUIET, ENV). The file named SYM is loaded in environment ENV, with verbosity
determined by QUIET (default true)."""
if not (2 <= len(args) <= 3):
vals = args[:-1]
raise SchemeError("wrong number of arguments to load: {0}".format(vals))
sym = args[0]
quiet = args[1] if len(args) > 2 else True
env = args[-1]
if (scheme_stringp(sym)):
sym = intern(str(sym))
check_type(sym, scheme_symbolp, 0, "load")
with scheme_open(str(sym)) as infile:
lines = infile.readlines()
args = (lines, None) if quiet else (lines,)
def next_line():
return buffer_lines(*args)
read_eval_print_loop(next_line, env.global_frame(), quiet=quiet)
return okay
def scheme_open(filename):
"""If either FILENAME or FILENAME.scm is the name of a valid file,
return a Python file opened to it. Otherwise, raise an error."""
try:
return open(filename)
except IOError as exc:
if filename.endswith('.scm'):
raise SchemeError(str(exc))
try:
return open(filename + '.scm')
except IOError as exc:
raise SchemeError(str(exc))
def create_global_frame():
"""Initialize and return a single-frame environment with built-in names."""
env = Frame(None)
env.define("eval", PrimitiveProcedure(scheme_eval, True))
env.define("apply", PrimitiveProcedure(scheme_apply, True))
env.define("load", PrimitiveProcedure(scheme_load, True))
for names, fn in get_primitive_bindings():
for name in names:
proc = PrimitiveProcedure(fn)
env.define(name, proc)
return env
@main
def run(*argv):
next_line = buffer_input
interactive = True
load_files = ()
if argv:
try:
filename = argv[0]
if filename == '-load':
load_files = argv[1:]
else:
input_file = open(argv[0])
lines = input_file.readlines()
def next_line():
return buffer_lines(lines)
interactive = False
except IOError as err:
print(err)
sys.exit(1)
read_eval_print_loop(next_line, create_global_frame(), startup=True,
interactive=interactive, load_files=load_files)
tscheme_exitonclick()
| apache-2.0 |
danilito19/django | django/utils/feedgenerator.py | 183 | 17059 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
import warnings
from django.utils import datetime_safe, six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text, iri_to_uri
from django.utils.six import StringIO
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.xmlutils import SimplerXMLGenerator
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependent results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
offset = date.utcoffset()
# Historically, this function assumes that naive datetimes are in UTC.
if offset is None:
return time_str + '-0000'
else:
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
offset = date.utcoffset()
# Historically, this function assumes that naive datetimes are in UTC.
if offset is None:
return time_str + 'Z'
else:
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, enclosure=None,
categories=(), item_copyright=None, ttl=None, updateddate=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate and updateddate, which are datetime.datetime
objects, and enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'updateddate': updateddate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'unique_id_is_permalink': unique_id_is_permalink,
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate or updateddate. If no items
have either of these attributes this returns the current date/time.
"""
latest_date = None
date_keys = ('updateddate', 'pubdate')
for item in self.items:
for date_key in date_keys:
item_date = item.get(date_key)
if item_date:
if latest_date is None or item_date > latest_date:
latest_date = item_date
return latest_date or datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
content_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None,
{"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
@property
def mime_type(self):
warnings.warn(
'The mime_type attribute of RssFeed is deprecated. '
'Use content_type instead.',
RemovedInDjango20Warning, stacklevel=2
)
return self.content_type
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" %
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"],
{"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
guid_attrs = {}
if isinstance(item.get('unique_id_is_permalink'), bool):
guid_attrs['isPermaLink'] = str(
item['unique_id_is_permalink']).lower()
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("enclosure", '',
{"url": item['enclosure'].url, "length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
content_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement('published', rfc3339_date(item['pubdate']))
if item['updateddate'] is not None:
handler.addQuickElement('updated', rfc3339_date(item['updateddate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("link", '',
{"rel": "enclosure",
"href": item['enclosure'].url,
"length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
@property
def mime_type(self):
warnings.warn(
'The mime_type attribute of Atom1Feed is deprecated. '
'Use content_type instead.',
RemovedInDjango20Warning, stacklevel=2
)
return self.content_type
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| bsd-3-clause |
Kussie/HTPC-Manager | libs/sqlobject/classregistry.py | 10 | 4903 | """
classresolver.py
2 February 2004, Ian Bicking <ianb@colorstudy.com>
Resolves strings to classes, and runs callbacks when referenced
classes are created.
Classes are referred to only by name, not by module. So that
identically-named classes can coexist, classes are put into individual
registries, which are keyed on strings (names). These registries are
created on demand.
Use like::
>>> import classregistry
>>> registry = classregistry.registry('MyModules')
>>> def afterMyClassExists(cls):
... print 'Class finally exists:', cls
>>> registry.addClassCallback('MyClass', afterMyClassExists)
>>> class MyClass:
... pass
>>> registry.addClass(MyClass)
Class finally exists: MyClass
"""
class ClassRegistry(object):
"""
We'll be dealing with classes that reference each other, so
class C1 may reference C2 (in a join), while C2 references
C1 right back. Since classes are created in an order, there
will be a point when C1 exists but C2 doesn't. So we deal
with classes by name, and after each class is created we
try to fix up any references by replacing the names with
actual classes.
Here we keep a dictionaries of class names to classes -- note
that the classes might be spread among different modules, so
since we pile them together names need to be globally unique,
to just module unique.
Like needSet below, the container dictionary is keyed by the
class registry.
"""
def __init__(self, name):
self.name = name
self.classes = {}
self.callbacks = {}
self.genericCallbacks = []
def addClassCallback(self, className, callback, *args, **kw):
"""
Whenever a name is substituted for the class, you can register
a callback that will be called when the needed class is
created. If it's already been created, the callback will be
called immediately.
"""
if className in self.classes:
callback(self.classes[className], *args, **kw)
else:
self.callbacks.setdefault(className, []).append((callback, args, kw))
def addCallback(self, callback, *args, **kw):
"""
This callback is called for all classes, not just specific
ones (like addClassCallback).
"""
self.genericCallbacks.append((callback, args, kw))
for cls in self.classes.values():
callback(cls, *args, **kw)
def addClass(self, cls):
"""
Everytime a class is created, we add it to the registry, so
that other classes can find it by name. We also call any
callbacks that are waiting for the class.
"""
if cls.__name__ in self.classes:
import sys
other = self.classes[cls.__name__]
raise ValueError(
"class %s is already in the registry (other class is "
"%r, from the module %s in %s; attempted new class is "
"%r, from the module %s in %s)"
% (cls.__name__,
other, other.__module__,
getattr(sys.modules.get(other.__module__),
'__file__', '(unknown)'),
cls, cls.__module__,
getattr(sys.modules.get(cls.__module__),
'__file__', '(unknown)')))
self.classes[cls.__name__] = cls
if cls.__name__ in self.callbacks:
for callback, args, kw in self.callbacks[cls.__name__]:
callback(cls, *args, **kw)
del self.callbacks[cls.__name__]
for callback, args, kw in self.genericCallbacks:
callback(cls, *args, **kw)
def getClass(self, className):
try:
return self.classes[className]
except KeyError:
all = self.classes.keys()
all.sort()
raise KeyError(
"No class %s found in the registry %s (these classes "
"exist: %s)"
% (className, self.name or '[default]', ', '.join(all)))
def allClasses(self):
return self.classes.values()
class _MasterRegistry(object):
"""
This singleton holds all the class registries. There can be
multiple registries to hold different unrelated sets of classes
that reside in the same process. These registries are named with
strings, and are created on demand. The MasterRegistry module
global holds the singleton.
"""
def __init__(self):
self.registries = {}
def registry(self, item):
if item not in self.registries:
self.registries[item] = ClassRegistry(item)
return self.registries[item]
MasterRegistry = _MasterRegistry()
registry = MasterRegistry.registry
def findClass(name, class_registry=None):
return registry(class_registry).getClass(name)
| mit |
blighj/django | tests/generic_inline_admin/tests.py | 47 | 22688 | from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.contenttypes.admin import GenericTabularInline
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.contrib.contenttypes.models import ContentType
from django.forms.formsets import DEFAULT_MAX_NUM
from django.forms.models import ModelForm
from django.test import (
RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from django.urls import reverse
from .admin import MediaInline, MediaPermanentInline, site as admin_site
from .models import Category, Episode, EpisodePermanent, Media, PhoneNumber
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='secret', email='super@example.com')
# Set DEBUG to True to ensure {% include %} will raise exceptions.
# That is how inlines are rendered and #9498 will bubble up if it is an issue.
@override_settings(DEBUG=True, ROOT_URLCONF='generic_inline_admin.urls')
class GenericAdminViewTest(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get(reverse('admin:generic_inline_admin_episode_add'))
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get(
reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
)
self.assertEqual(response.status_code, 200)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse('admin:generic_inline_admin_episode_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": "%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": "%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_generic_inline_formset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(
Media,
can_delete=False,
exclude=['description', 'keywords'],
extra=3,
)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">'
'Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" '
'type="url" name="generic_inline_admin-media-content_type-object_id-0-url" '
'value="http://example.com/podcast.mp3" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" '
'value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>'
% self.mp3_media_pk
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">'
'Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" '
'type="url" name="generic_inline_admin-media-content_type-object_id-1-url" '
'value="http://example.com/logo.png" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" '
'value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>'
% self.png_media_pk
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label>'
'<input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" '
'name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" '
'id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>'
)
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label>'
'<input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" '
'name="generic_inline_admin-media-content_type-object_id-0-url"'
'value="http://example.com/logo.png" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" '
'value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>'
% self.png_media_pk
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label>'
'<input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" '
'name="generic_inline_admin-media-content_type-object_id-1-url" '
'value="http://example.com/podcast.mp3" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" '
'value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>'
% self.mp3_media_pk
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">'
'Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" '
'type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" '
'id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>'
)
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label>'
' <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" '
'name="generic_inline_admin-media-content_type-object_id-0-url" '
'value="http://example.com/logo.png" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" '
'value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>'
% self.png_media_pk
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">'
'Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" '
'type="url" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" />'
'<input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" '
'id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>'
)
def test_generic_inline_formset_factory(self):
# Regression test for #10522.
inline_formset = generic_inlineformset_factory(Media, exclude=('url',))
# Regression test for #12340.
e = Episode.objects.get(name='This Week in Django')
formset = inline_formset(instance=e)
self.assertTrue(formset.get_queryset().ordered)
@override_settings(ROOT_URLCONF='generic_inline_admin.urls')
class GenericInlineAdminParametersTest(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
self.factory = RequestFactory()
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def test_no_param(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def test_extra_param(self):
"""
With extra=0, there should be one form.
"""
class ExtraInline(GenericTabularInline):
model = Media
extra = 0
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [ExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def testMaxNumParam(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
class MaxNumInline(GenericTabularInline):
model = Media
extra = 5
max_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
def test_min_num_param(self):
"""
With extra=3 and min_num=2, there should be five forms.
"""
class MinNumInline(GenericTabularInline):
model = Media
extra = 3
min_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 5)
self.assertEqual(formset.initial_form_count(), 1)
def test_get_extra(self):
class GetExtraInline(GenericTabularInline):
model = Media
extra = 4
def get_extra(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.extra, 2)
def test_get_min_num(self):
class GetMinNumInline(GenericTabularInline):
model = Media
min_num = 5
def get_min_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.min_num, 2)
def test_get_max_num(self):
class GetMaxNumInline(GenericTabularInline):
model = Media
extra = 5
def get_max_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.max_num, 2)
@override_settings(ROOT_URLCONF='generic_inline_admin.urls')
class GenericInlineAdminWithUniqueTogetherTest(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_add(self):
category_id = Category.objects.create(name='male').pk
post_data = {
"name": "John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
"generic_inline_admin-phonenumber-content_type-object_id-0-category": "%s" % category_id,
}
response = self.client.get(reverse('admin:generic_inline_admin_contact_add'))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('admin:generic_inline_admin_contact_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_delete(self):
from .models import Contact
c = Contact.objects.create(name='foo')
PhoneNumber.objects.create(
object_id=c.id,
content_type=ContentType.objects.get_for_model(Contact),
phone_number="555-555-5555",
)
response = self.client.post(reverse('admin:generic_inline_admin_contact_delete', args=[c.pk]))
self.assertContains(response, 'Are you sure you want to delete')
@override_settings(ROOT_URLCONF='generic_inline_admin.urls')
class NoInlineDeletionTest(SimpleTestCase):
def test_no_deletion(self):
inline = MediaPermanentInline(EpisodePermanent, admin_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
class MockRequest:
pass
class MockSuperUser:
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
@override_settings(ROOT_URLCONF='generic_inline_admin.urls')
class GenericInlineModelAdminTest(SimpleTestCase):
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, DEFAULT_MAX_NUM)
self.assertIs(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertIs(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
The custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
readonly_fields = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['keywords', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
The custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['url', 'keywords', 'id', 'DELETE'])
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['description', 'keywords', 'id', 'DELETE'])
def test_get_fieldsets(self):
# get_fieldsets is called when figuring out form fields.
# Refs #18681.
class MediaForm(ModelForm):
class Meta:
model = Media
fields = '__all__'
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['url', 'description']})]
ma = MediaInline(Media, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['url', 'description'])
def test_get_formsets_with_inlines_returns_tuples(self):
"""
get_formsets_with_inlines() returns the correct tuples.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class AlternateInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
AlternateInline, MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
inlines = ma.get_inline_instances(request)
for (formset, inline), other_inline in zip(ma.get_formsets_with_inlines(request), inlines):
self.assertIsInstance(formset, other_inline.get_formset(request).__class__)
| bsd-3-clause |
huobaowangxi/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
nlholdem/icodoom | .venv/lib/python2.7/site-packages/tensorflow/python/ops/standard_ops.py | 20 | 11074 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Import names of Tensor Flow standard Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys as _sys
# Imports the following modules so that @RegisterGradient get executed.
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import sparse_grad
from tensorflow.python.ops import state_grad
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.util.all_util import remove_undocumented
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.array_ops import *
from tensorflow.python.ops.check_ops import *
from tensorflow.python.ops.clip_ops import *
from tensorflow.python.ops.special_math_ops import *
# TODO(vrv): Switch to import * once we're okay with exposing the module.
from tensorflow.python.ops.confusion_matrix import confusion_matrix
from tensorflow.python.ops.control_flow_ops import Assert
from tensorflow.python.ops.control_flow_ops import group
from tensorflow.python.ops.control_flow_ops import no_op
from tensorflow.python.ops.control_flow_ops import tuple
from tensorflow.python.ops.control_flow_ops import cond
from tensorflow.python.ops.control_flow_ops import case
from tensorflow.python.ops.control_flow_ops import while_loop
from tensorflow.python.ops.data_flow_ops import *
from tensorflow.python.ops.functional_ops import *
from tensorflow.python.ops.gradients import *
from tensorflow.python.ops.histogram_ops import *
from tensorflow.python.ops.init_ops import *
from tensorflow.python.ops.io_ops import *
from tensorflow.python.ops.linalg_ops import *
from tensorflow.python.ops.logging_ops import Print
from tensorflow.python.ops.logging_ops import get_summary_op
from tensorflow.python.ops.math_ops import *
from tensorflow.python.ops.numerics import *
from tensorflow.python.ops.parsing_ops import *
from tensorflow.python.ops.partitioned_variables import *
from tensorflow.python.ops.random_ops import *
from tensorflow.python.ops.script_ops import py_func
from tensorflow.python.ops.session_ops import *
from tensorflow.python.ops.sparse_ops import *
from tensorflow.python.ops.state_ops import assign
from tensorflow.python.ops.state_ops import assign_add
from tensorflow.python.ops.state_ops import assign_sub
from tensorflow.python.ops.state_ops import count_up_to
from tensorflow.python.ops.state_ops import scatter_add
from tensorflow.python.ops.state_ops import scatter_div
from tensorflow.python.ops.state_ops import scatter_mul
from tensorflow.python.ops.state_ops import scatter_sub
from tensorflow.python.ops.state_ops import scatter_update
from tensorflow.python.ops.state_ops import scatter_nd_add
from tensorflow.python.ops.state_ops import scatter_nd_sub
# TODO(simister): Re-enable once binary size increase due to scatter_nd
# ops is under control.
# from tensorflow.python.ops.state_ops import scatter_nd_mul
# from tensorflow.python.ops.state_ops import scatter_nd_div
from tensorflow.python.ops.state_ops import scatter_nd_update
from tensorflow.python.ops.string_ops import *
from tensorflow.python.ops.template import *
from tensorflow.python.ops.tensor_array_ops import *
from tensorflow.python.ops.variable_scope import *
from tensorflow.python.ops.variables import *
# pylint: enable=wildcard-import
#### For use in remove_undocumented below:
from tensorflow.python.framework import constant_op as _constant_op
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import check_ops as _check_ops
from tensorflow.python.ops import clip_ops as _clip_ops
from tensorflow.python.ops import confusion_matrix as _confusion_matrix
from tensorflow.python.ops import control_flow_ops as _control_flow_ops
from tensorflow.python.ops import data_flow_ops as _data_flow_ops
from tensorflow.python.ops import functional_ops as _functional_ops
from tensorflow.python.ops import gradients as _gradients
from tensorflow.python.ops import histogram_ops as _histogram_ops
from tensorflow.python.ops import init_ops as _init_ops
from tensorflow.python.ops import io_ops as _io_ops
from tensorflow.python.ops import linalg_ops as _linalg_ops
from tensorflow.python.ops import logging_ops as _logging_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.ops import numerics as _numerics
from tensorflow.python.ops import parsing_ops as _parsing_ops
from tensorflow.python.ops import partitioned_variables as _partitioned_variables
from tensorflow.python.ops import random_ops as _random_ops
from tensorflow.python.ops import script_ops as _script_ops
from tensorflow.python.ops import session_ops as _session_ops
from tensorflow.python.ops import sparse_ops as _sparse_ops
from tensorflow.python.ops import special_math_ops as _special_math_ops
from tensorflow.python.ops import state_ops as _state_ops
from tensorflow.python.ops import string_ops as _string_ops
from tensorflow.python.ops import template as _template
from tensorflow.python.ops import tensor_array_ops as _tensor_array_ops
from tensorflow.python.ops import variable_scope as _variable_scope
from tensorflow.python.ops import variables as _variables
_allowed_symbols_math_ops = [
# TODO(drpng): decide if we want to reference these in the documentation.
"reduced_shape",
"sparse_segment_mean_grad",
"sparse_segment_sqrt_n_grad",
# Legacy: will be removed.
"arg_max",
"arg_min",
"lin_space",
"sparse_matmul", # Use tf.matmul.
# Deprecated (see versions.h):
"batch_fft",
"batch_fft2d",
"batch_fft3d",
"batch_ifft",
"batch_ifft2d",
"batch_ifft3d",
"mul", # use tf.multiply instead.
"neg", # use tf.negative instead.
"sub", # use tf.subtract instead.
# These are documented in nn.
# We are are not importing nn because it would create a circular dependency.
"sigmoid",
"tanh",
]
_allowed_symbols_array_ops = [
# TODO(drpng): make sure they are documented.
# Scalars:
"NEW_AXIS",
"SHRINK_AXIS",
"newaxis",
# Documented in training.py.
# I do not import train, to avoid circular dependencies.
# TODO(drpng): this is defined in gen_array_ops, clearly not the right
# place.
"stop_gradient",
# See gen_docs_combined for tf.copy documentation.
"copy",
## TODO(drpng): make them inaccessible directly.
## TODO(drpng): Below, to-doc means that we need to find an appropriate
## documentation section to reference.
## For re-exporting to tf.*:
"constant",
"edit_distance", # to-doc
# From gen_array_ops:
"copy_host", # to-doc
"immutable_const", # to-doc
"invert_permutation", # to-doc
"quantize_and_dequantize", # to-doc
# TODO(drpng): legacy symbols to be removed.
"list_diff", # Use tf.listdiff instead.
"batch_matrix_diag",
"batch_matrix_band_part",
"batch_matrix_diag_part",
"batch_matrix_set_diag",
"concat_v2", # Use tf.concat instead.
]
_allowed_symbols_partitioned_variables = [
"PartitionedVariable", # Requires doc link.
# Legacy.
"create_partitioned_variables",
"variable_axis_size_partitioner",
"min_max_variable_partitioner",
"fixed_size_partitioner",
]
_allowed_symbols_control_flow_ops = [
# TODO(drpng): Find a place in the documentation to reference these or
# remove.
"control_trigger",
"loop_cond",
"merge",
"switch",
]
_allowed_symbols_functional_ops = [
"nest", # Used by legacy code.
]
_allowed_symbols_gradients = [
# Documented in training.py:
# Not importing training.py to avoid complex graph dependencies.
"AggregationMethod",
"gradients", # tf.gradients = gradients.gradients
"hessians",
]
_allowed_symbols_clip_ops = [
# Documented in training.py:
# Not importing training.py to avoid complex graph dependencies.
"clip_by_average_norm",
"clip_by_global_norm",
"clip_by_norm",
"clip_by_value",
"global_norm",
]
_allowed_symbols_image_ops = [
# Documented in training.py.
# We are not importing training.py to avoid complex dependencies.
"audio_summary",
"histogram_summary",
"image_summary",
"merge_all_summaries",
"merge_summary",
"scalar_summary",
# TODO(drpng): link in training.py if it should be documented.
"get_summary_op",
]
_allowed_symbols_variable_scope_ops = [
"get_local_variable", # Documented in framework package.
]
_allowed_symbols_misc = [
"deserialize_many_sparse",
"parse_single_sequence_example",
"serialize_many_sparse",
"serialize_sparse",
"confusion_matrix",
]
_allowed_symbols = (_allowed_symbols_array_ops +
_allowed_symbols_clip_ops +
_allowed_symbols_control_flow_ops +
_allowed_symbols_functional_ops +
_allowed_symbols_image_ops +
_allowed_symbols_gradients +
_allowed_symbols_math_ops +
_allowed_symbols_variable_scope_ops +
_allowed_symbols_misc +
_allowed_symbols_partitioned_variables)
remove_undocumented(__name__, _allowed_symbols,
[_sys.modules[__name__],
_array_ops,
_check_ops,
_clip_ops,
_confusion_matrix,
_control_flow_ops,
_constant_op,
_data_flow_ops,
_functional_ops,
_gradients,
_histogram_ops,
_init_ops,
_io_ops,
_linalg_ops,
_logging_ops,
_math_ops,
_numerics,
_parsing_ops,
_partitioned_variables,
_random_ops,
_script_ops,
_session_ops,
_sparse_ops,
_special_math_ops,
_state_ops,
_string_ops,
_template,
_tensor_array_ops,
_variable_scope,
_variables,])
| gpl-3.0 |
beefoo/still-i-rise | collect_sound_data.py | 1 | 2076 | # -*- coding: utf-8 -*-
# Description: generate audio clips for lines, words, and syllables
import argparse
import json
import os
from pprint import pprint
import re
import subprocess
import sys
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="still_i_rise.wav", help="Path to input audio file file")
parser.add_argument('-pitch', dest="OUTPUT_PITCH_FILE", default="data/still_i_rise.Pitch", help="Path to output pitch data file")
parser.add_argument('-pulse', dest="OUTPUT_PULSE_FILE", default="data/still_i_rise.PointProcess", help="Path to output pulse data file")
parser.add_argument('-ts', dest="TIME_STEP", default="0.01", help="Time step in seconds")
parser.add_argument('-p0', dest="PITCH_FLOOR", default="70", help="Pitch floor in Hz")
parser.add_argument('-mc', dest="MAX_CANDIDATES", default="4", help="Maximum candidates per frame")
parser.add_argument('-va', dest="VERY_ACCURATE", default="on", help="Very accurate, on/off")
parser.add_argument('-st', dest="SILENCE_THRESHOLD", default="0.01", help="Silence threshold")
parser.add_argument('-vt', dest="VOICING_THRESHOLD", default="0.3", help="Voicing threshold")
parser.add_argument('-oc', dest="OCTAVE_COST", default="0.001", help="Octave cost")
parser.add_argument('-ojc', dest="OCTAVE_JUMP_COST", default="0.3", help="Octave jump cost")
parser.add_argument('-vc', dest="VOICED_COST", default="0.2", help="Voiced cost")
parser.add_argument('-p1', dest="PITCH_CEILING", default="400", help="Pitch ceiling in Hz")
# init input
args = parser.parse_args()
# cut the clip
command = ['Praat', '--run', 'collect_sound_data.praat', args.INPUT_FILE, args.OUTPUT_PITCH_FILE, args.OUTPUT_PULSE_FILE, args.TIME_STEP, args.PITCH_FLOOR, args.MAX_CANDIDATES, args.VERY_ACCURATE, args.SILENCE_THRESHOLD, args.VOICING_THRESHOLD, args.OCTAVE_COST, args.OCTAVE_JUMP_COST, args.VOICED_COST, args.PITCH_CEILING]
print "Running %s" % " ".join(command)
finished = subprocess.check_call(command)
print "Wrote data to %s and %s" % (args.OUTPUT_PITCH_FILE, args.OUTPUT_PULSE_FILE)
| mit |
Grirrane/odoo | addons/mrp/res_config.py | 4 | 3430 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class mrp_config_settings(osv.osv_memory):
_name = 'mrp.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_mrp_repair': fields.boolean("Manage repairs of products ",
help='Allows to manage all product repairs.\n'
'* Add/remove products in the reparation\n'
'* Impact for stocks\n'
'* Invoicing (products and/or services)\n'
'* Warranty concept\n'
'* Repair quotation report\n'
'* Notes for the technician and for the final customer.\n'
'-This installs the module mrp_repair.'),
'module_mrp_operations': fields.boolean("Allow detailed planning of work order",
help='This allows to add state, date_start,date_stop in production order operation lines (in the "Work Centers" tab).\n'
'-This installs the module mrp_operations.'),
'module_mrp_byproduct': fields.boolean("Produce several products from one manufacturing order",
help='You can configure by-products in the bill of material.\n'
'Without this module: A + B + C -> D.\n'
'With this module: A + B + C -> D + E.\n'
'-This installs the module mrp_byproduct.'),
'group_mrp_routings': fields.boolean("Manage Work Order Operations and work orders ",
implied_group='mrp.group_mrp_routings',
help='Work Order Operations allow you to create and manage the manufacturing operations that should be followed '
'within your work centers in order to produce a product. They are attached to bills of materials '
'that will define the required raw materials.'),
'group_mrp_properties': fields.boolean("Allow several bill of materials per products using properties",
implied_group='product.group_mrp_properties',
help="""The selection of the right Bill of Material to use will depend on the properties specified on the sales order and the Bill of Material."""),
'group_rounding_efficiency': fields.boolean("Manage rounding and efficiency of BoM components",
implied_group='mrp.group_rounding_efficiency',
help="""Allow to manage product rounding on quantity and product efficiency during production process"""),
}
| agpl-3.0 |
mozaik-association/mozaik | odoo_addons/mozaik_address/wizard/change_main_address.py | 2 | 5253 | # -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of mozaik_address, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# mozaik_address is free software:
# you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# mozaik_address is distributed in the hope that it will
# be useful but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with mozaik_address.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools import SUPERUSER_ID
from openerp import fields as new_fields, api
from openerp.tools.translate import _
class change_main_address(orm.TransientModel):
_name = 'change.main.address'
_inherit = 'change.main.coordinate'
_description = 'Change Main Address Wizard'
_columns = {
'old_address_id': fields.many2one(
'address.address', 'Current Main Address'),
'address_id': fields.many2one(
'address.address', 'New Main Address',
required=True, ondelete='cascade'),
}
co_residency_id = new_fields.Many2one('co.residency', 'Co-Residency')
move_co_residency = new_fields.Boolean('Move Co-Residency', default=True)
invalidate_co_residency = new_fields.Boolean('Invalidate Co-Residency',
default=True)
move_allowed = new_fields.Boolean('Move Allowed')
message = new_fields.Char('Message')
def default_get(self, cr, uid, flds, context):
context = dict(context or {})
res = super(change_main_address, self).default_get(
cr, uid, flds, context=context)
if context.get('mode', False) == 'switch':
coord = self.pool[context.get('target_model')].browse(
cr, uid, context.get('target_id', False))
res['address_id'] = coord.address_id.id
ids = context.get('active_ids') \
or (context.get('active_id') and [context.get('active_id')]) \
or []
if len(ids) == 1:
partner = self.pool['res.partner'].browse(
cr, SUPERUSER_ID, ids[0], context=context)
res['old_address_id'] = partner.postal_coordinate_id.address_id.id
if context.get('address_id', False):
res['change_allowed'] = not(
res['address_id'] == res['old_address_id'])
if res.get('old_address_id', False):
cores_obj = self.pool.get('co.residency')
cores_wiz_obj = self.pool.get('change.co.residency.address')
co_res = cores_obj.search(
cr, uid, [('address_id', '=', res['old_address_id'])],
context=context)
if co_res:
co_res_id = co_res[0]
if co_res_id:
res['move_allowed'] = cores_wiz_obj._use_allowed(
cr, uid, co_res_id, context=context)
res['co_residency_id'] = co_res_id
res['move_co_residency'] = res.get('move_allowed', False)
res['invalidate_co_residency'] = res.get('move_allowed',
False)
if not res.get('move_allowed', False):
res['message'] = _('Due to security restrictions'
' you are not allowed to move'
' all co-residency members !')
return res
@api.multi
def button_change_main_coordinate(self):
postal_coordinate_ids = []
if self.co_residency_id and self.move_co_residency:
postal_coordinate_ids = self.co_residency_id.\
postal_coordinate_ids.ids
cores_wiz_obj = self.env['change.co.residency.address']
vals = {
'co_residency_id': self.co_residency_id.id,
'old_address_id': self.old_address_id.id,
'address_id': self.address_id.id,
'use_allowed': self.move_allowed,
'invalidate': self.invalidate_co_residency,
}
wizard = cores_wiz_obj.create(vals)
wizard.change_address()
res = super(
change_main_address, self._model).button_change_main_coordinate(
self.env.cr, self.env.uid, self.ids, self.env.context.copy())
if self.invalidate_previous_coordinate and postal_coordinate_ids:
postal_coordinate_ids =\
self.env['postal.coordinate'].browse(postal_coordinate_ids)
postal_coordinate_ids.action_invalidate()
return res
| agpl-3.0 |
googleapis/googleapis-gen | google/cloud/talent/v4beta1/talent-v4beta1-py/google/cloud/talent_v4beta1/services/job_service/transports/grpc.py | 1 | 23359 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.talent_v4beta1.types import job
from google.cloud.talent_v4beta1.types import job as gct_job
from google.cloud.talent_v4beta1.types import job_service
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import JobServiceTransport, DEFAULT_CLIENT_INFO
class JobServiceGrpcTransport(JobServiceTransport):
"""gRPC backend transport for JobService.
A service handles job management, including job CRUD,
enumeration and search.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'jobs.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'jobs.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_job(self) -> Callable[
[job_service.CreateJobRequest],
gct_job.Job]:
r"""Return a callable for the create job method over gRPC.
Creates a new job.
Typically, the job becomes searchable within 10 seconds,
but it may take up to 5 minutes.
Returns:
Callable[[~.CreateJobRequest],
~.Job]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_job' not in self._stubs:
self._stubs['create_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/CreateJob',
request_serializer=job_service.CreateJobRequest.serialize,
response_deserializer=gct_job.Job.deserialize,
)
return self._stubs['create_job']
@property
def batch_create_jobs(self) -> Callable[
[job_service.BatchCreateJobsRequest],
operations_pb2.Operation]:
r"""Return a callable for the batch create jobs method over gRPC.
Begins executing a batch create jobs operation.
Returns:
Callable[[~.BatchCreateJobsRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_create_jobs' not in self._stubs:
self._stubs['batch_create_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/BatchCreateJobs',
request_serializer=job_service.BatchCreateJobsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['batch_create_jobs']
@property
def get_job(self) -> Callable[
[job_service.GetJobRequest],
job.Job]:
r"""Return a callable for the get job method over gRPC.
Retrieves the specified job, whose status is OPEN or
recently EXPIRED within the last 90 days.
Returns:
Callable[[~.GetJobRequest],
~.Job]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_job' not in self._stubs:
self._stubs['get_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/GetJob',
request_serializer=job_service.GetJobRequest.serialize,
response_deserializer=job.Job.deserialize,
)
return self._stubs['get_job']
@property
def update_job(self) -> Callable[
[job_service.UpdateJobRequest],
gct_job.Job]:
r"""Return a callable for the update job method over gRPC.
Updates specified job.
Typically, updated contents become visible in search
results within 10 seconds, but it may take up to 5
minutes.
Returns:
Callable[[~.UpdateJobRequest],
~.Job]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_job' not in self._stubs:
self._stubs['update_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/UpdateJob',
request_serializer=job_service.UpdateJobRequest.serialize,
response_deserializer=gct_job.Job.deserialize,
)
return self._stubs['update_job']
@property
def batch_update_jobs(self) -> Callable[
[job_service.BatchUpdateJobsRequest],
operations_pb2.Operation]:
r"""Return a callable for the batch update jobs method over gRPC.
Begins executing a batch update jobs operation.
Returns:
Callable[[~.BatchUpdateJobsRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_update_jobs' not in self._stubs:
self._stubs['batch_update_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/BatchUpdateJobs',
request_serializer=job_service.BatchUpdateJobsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['batch_update_jobs']
@property
def delete_job(self) -> Callable[
[job_service.DeleteJobRequest],
empty_pb2.Empty]:
r"""Return a callable for the delete job method over gRPC.
Deletes the specified job.
Typically, the job becomes unsearchable within 10
seconds, but it may take up to 5 minutes.
Returns:
Callable[[~.DeleteJobRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_job' not in self._stubs:
self._stubs['delete_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/DeleteJob',
request_serializer=job_service.DeleteJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_job']
@property
def batch_delete_jobs(self) -> Callable[
[job_service.BatchDeleteJobsRequest],
empty_pb2.Empty]:
r"""Return a callable for the batch delete jobs method over gRPC.
Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by
filter.
Returns:
Callable[[~.BatchDeleteJobsRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_delete_jobs' not in self._stubs:
self._stubs['batch_delete_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/BatchDeleteJobs',
request_serializer=job_service.BatchDeleteJobsRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['batch_delete_jobs']
@property
def list_jobs(self) -> Callable[
[job_service.ListJobsRequest],
job_service.ListJobsResponse]:
r"""Return a callable for the list jobs method over gRPC.
Lists jobs by filter.
Returns:
Callable[[~.ListJobsRequest],
~.ListJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_jobs' not in self._stubs:
self._stubs['list_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/ListJobs',
request_serializer=job_service.ListJobsRequest.serialize,
response_deserializer=job_service.ListJobsResponse.deserialize,
)
return self._stubs['list_jobs']
@property
def search_jobs(self) -> Callable[
[job_service.SearchJobsRequest],
job_service.SearchJobsResponse]:
r"""Return a callable for the search jobs method over gRPC.
Searches for jobs using the provided
[SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest].
This call constrains the
[visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs
present in the database, and only returns jobs that the caller
has permission to search against.
Returns:
Callable[[~.SearchJobsRequest],
~.SearchJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'search_jobs' not in self._stubs:
self._stubs['search_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/SearchJobs',
request_serializer=job_service.SearchJobsRequest.serialize,
response_deserializer=job_service.SearchJobsResponse.deserialize,
)
return self._stubs['search_jobs']
@property
def search_jobs_for_alert(self) -> Callable[
[job_service.SearchJobsRequest],
job_service.SearchJobsResponse]:
r"""Return a callable for the search jobs for alert method over gRPC.
Searches for jobs using the provided
[SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest].
This API call is intended for the use case of targeting passive
job seekers (for example, job seekers who have signed up to
receive email alerts about potential job opportunities), and has
different algorithmic adjustments that are targeted to passive
job seekers.
This call constrains the
[visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs
present in the database, and only returns jobs the caller has
permission to search against.
Returns:
Callable[[~.SearchJobsRequest],
~.SearchJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'search_jobs_for_alert' not in self._stubs:
self._stubs['search_jobs_for_alert'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/SearchJobsForAlert',
request_serializer=job_service.SearchJobsRequest.serialize,
response_deserializer=job_service.SearchJobsResponse.deserialize,
)
return self._stubs['search_jobs_for_alert']
__all__ = (
'JobServiceGrpcTransport',
)
| apache-2.0 |
terencehonles/mailman | src/mailman/handlers/member_recipients.py | 3 | 6310 | # Copyright (C) 1998-2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Calculate the regular (i.e. non-digest) recipients of the message.
This module calculates the non-digest recipients for the message based on the
list's membership and configuration options. It places the list of recipients
on the `recipients' attribute of the message. This attribute is used by the
SendmailDeliver and BulkDeliver modules.
"""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'MemberRecipients',
]
from zope.interface import implementer
from mailman.config import config
from mailman.core import errors
from mailman.core.i18n import _
from mailman.interfaces.handler import IHandler
from mailman.interfaces.member import DeliveryStatus
from mailman.utilities.string import wrap
@implementer(IHandler)
class MemberRecipients:
"""Calculate the regular (i.e. non-digest) recipients of the message."""
name = 'member-recipients'
description = _('Calculate the regular recipients of the message.')
def process(self, mlist, msg, msgdata):
"""See `IHandler`."""
# Short circuit if we've already calculated the recipients list,
# regardless of whether the list is empty or not.
if 'recipients' in msgdata:
return
# Should the original sender should be included in the recipients list?
include_sender = True
member = mlist.members.get_member(msg.sender)
if member and not member.receive_own_postings:
include_sender = False
# Support for urgent messages, which bypasses digests and disabled
# delivery and forces an immediate delivery to all members Right Now.
# We are specifically /not/ allowing the site admins password to work
# here because we want to discourage the practice of sending the site
# admin password through email in the clear. (see also Approve.py)
#
# XXX This is broken.
missing = object()
password = msg.get('urgent', missing)
if password is not missing:
if mlist.Authenticate((config.AuthListModerator,
config.AuthListAdmin),
password):
recipients = mlist.getMemberCPAddresses(
mlist.getRegularMemberKeys() +
mlist.getDigestMemberKeys())
msgdata['recipients'] = recipients
return
else:
# Bad Urgent: password, so reject it instead of passing it on.
# I think it's better that the sender know they screwed up
# than to deliver it normally.
text = _("""\
Your urgent message to the $mlist.display_name mailing list was not authorized
for delivery. The original message as received by Mailman is attached.
""")
raise errors.RejectMessage(wrap(text))
# Calculate the regular recipients of the message
recipients = set(member.address.email
for member in mlist.regular_members.members
if member.delivery_status == DeliveryStatus.enabled)
# Remove the sender if they don't want to receive their own posts
if not include_sender and member.address.email in recipients:
recipients.remove(member.address.email)
# Handle topic classifications
do_topic_filters(mlist, msg, msgdata, recipients)
# Bookkeeping
msgdata['recipients'] = recipients
def do_topic_filters(mlist, msg, msgdata, recipients):
"""Filter out recipients based on topics."""
if not mlist.topics_enabled:
# MAS: if topics are currently disabled for the list, send to all
# regardless of ReceiveNonmatchingTopics
return
hits = msgdata.get('topichits')
zap_recipients = []
if hits:
# The message hit some topics, so only deliver this message to those
# who are interested in one of the hit topics.
for user in recipients:
utopics = mlist.getMemberTopics(user)
if not utopics:
# This user is not interested in any topics, so they get all
# postings.
continue
# BAW: Slow, first-match, set intersection!
for topic in utopics:
if topic in hits:
# The user wants this message
break
else:
# The user was interested in topics, but not any of the ones
# this message matched, so zap him.
zap_recipients.append(user)
else:
# The semantics for a message that did not hit any of the pre-canned
# topics is to troll through the membership list, looking for users
# who selected at least one topic of interest, but turned on
# ReceiveNonmatchingTopics.
for user in recipients:
if not mlist.getMemberTopics(user):
# The user did not select any topics of interest, so he gets
# this message by default.
continue
if not mlist.getMemberOption(
user, config.ReceiveNonmatchingTopics):
# The user has interest in some topics, but elects not to
# receive message that match no topics, so zap him.
zap_recipients.append(user)
# Otherwise, the user wants non-matching messages.
# Prune out the non-receiving users
for user in zap_recipients:
recipients.remove(user)
| gpl-3.0 |
chungjjang80/FRETBursts | fretbursts/burstlib.py | 1 | 133746 | #
# FRETBursts - A single-molecule FRET burst analysis toolkit.
#
# Copyright (C) 2013-2016 The Regents of the University of California,
# Antonino Ingargiola <tritemio@gmail.com>
#
"""
This module contains all the main FRETBursts analysis functions.
`burstslib.py` defines the fundamental object `Data()` that contains both the
experimental data (attributes) and the high-level analysis routines (methods).
Furthermore it loads all the remaining **FRETBursts** modules (except for
`loaders.py`).
For usage example see the IPython Notebooks in sub-folder "notebooks".
"""
from __future__ import print_function, absolute_import, division
from future.utils import raise_from
from builtins import range, zip
import os
import hashlib
import numpy as np
import copy
from numpy import zeros, size, r_
import scipy.stats as SS
from .utils.misc import pprint, clk_to_s, deprecate
from .poisson_threshold import find_optimal_T_bga
from . import fret_fit
from . import bg_cache
from .ph_sel import Ph_sel
from .fretmath import gamma_correct_E, gamma_uncorrect_E
from .phtools import burstsearch as bslib
from .phtools.burstsearch import (
# Burst search function
bsearch,
# Photon counting function,
mch_count_ph_in_bursts
)
from .phtools import phrates
from . import background as bg
from . import select_bursts
from . import fit
from .fit.gaussian_fitting import (gaussian_fit_hist,
gaussian_fit_cdf,
two_gaussian_fit_hist,
two_gaussian_fit_hist_min,
two_gaussian_fit_hist_min_ab,
two_gaussian_fit_EM,
two_gauss_mix_pdf,
two_gauss_mix_ab,)
# Redefine some old functions that have been renamed so old scripts will not
# break but will print a warning
bg_calc_exp = deprecate(bg.exp_fit, 'bg_calc_exp', 'bg.exp_fit')
bg_calc_exp_cdf = deprecate(bg.exp_cdf_fit, 'bg_calc_exp_cdf', 'bg.exp_cdf_fit')
def _get_bsearch_func(pure_python=False):
if pure_python:
# return the python version
return bslib.bsearch_py
else:
# or what is available
return bsearch
def _get_mch_count_ph_in_bursts_func(pure_python=False):
if pure_python:
# return the python version
return bslib.mch_count_ph_in_bursts_py
else:
# or what is available
return mch_count_ph_in_bursts
def isarray(obj):
"""Test if the object support the array interface.
Returns True for numpy arrays and pandas sequences.
"""
return hasattr(obj, '__array__')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# BURST SELECTION FUNCTIONS
#
def Sel(d_orig, filter_fun, negate=False, nofret=False, **kwargs):
"""Uses `filter_fun` to select a sub-set of bursts from `d_orig`.
This function is deprecated. Use :meth:`Data.select_bursts` instead.
"""
d_sel = d_orig.select_bursts(filter_fun, negate=negate,
computefret=not nofret,
**kwargs)
return d_sel
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Bursts and Timestamps utilities
#
def get_alex_fraction(on_range, alex_period):
"""Get the fraction of period beween two numbers indicating a range.
"""
assert len(on_range) == 2
if on_range[0] < on_range[1]:
fraction = (on_range[1] - on_range[0]) / alex_period
else:
fraction = (alex_period + on_range[1] - on_range[0]) / alex_period
return fraction
def top_tail(nx, a=0.1):
"""Return for each ch the mean size of the top `a` fraction.
nx is one of nd, na, nt from Data() (list of burst size in each ch).
"""
assert a > 0 and a < 1
return np.r_[[n[n > n.max() * (1 - a)].mean() for n in nx]]
##
# Per-burst quatitites from ph-data arrays (timestamps, lifetime, etc..)
#
def _excitation_width(excitation_range, alex_period):
"""Returns duration of alternation period outside selected excitation.
"""
if excitation_range[1] > excitation_range[0]:
return alex_period - excitation_range[1] + excitation_range[0]
elif excitation_range[1] < excitation_range[0]:
return excitation_range[0] - excitation_range[1]
def _ph_times_compact(ph_times_sel, alex_period, excitation_width):
"""Compact ph_times inplace by removing gaps between alternation periods.
Arguments:
ph_times_sel (array): array of timestamps from one alternation period.
alex_period (scalar): period of alternation in timestamp units.
excitation_width (float): fraction of `alex_period` covered by
current photon selection.
Returns nothing, ph_times is modified in-place.
"""
# The formula is
#
# gaps = (ph_times_sel // alex_period)*excitation_width
# ph_times_sel = ph_times_sel - gaps
#
# As a memory optimization the `-gaps` array is reused inplace
times_minusgaps = (ph_times_sel // alex_period) * (-1 * excitation_width)
# The formula is ph_times_sel = ph_times_sel - "gaps"
times_minusgaps += ph_times_sel
return times_minusgaps
def iter_bursts_start_stop(bursts):
"""Iterate over (start, stop) indexes to slice photons for each burst.
"""
arr_istart = bursts.istart
arr_istop = bursts.istop + 1
for istart, istop in zip(arr_istart, arr_istop):
yield istart, istop
def iter_bursts_ph(ph_data, bursts, mask=None, compact=False,
alex_period=None, excitation_width=None):
"""Iterator over arrays of photon-data for each burst.
Arguments:
ph_data (1D array): array of photon-data (timestamps, nanotimes).
bursts (Bursts object): bursts computed from `ph`.
mask (boolean mask or None): if not None, is a boolean mask
to select photons in `ph_data` (for example Donor-ch photons).
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
alex_period (scalar): period of alternation in timestamp units.
excitation_width (float): fraction of `alex_period` covered by
current photon selection.
Yields an array with a selection of "photons" for each burst.
"""
if isinstance(mask, slice) and mask == slice(None):
mask = None
if compact:
assert alex_period is not None
assert excitation_width is not None
assert mask is not None
for start, stop in iter_bursts_start_stop(bursts):
ph = ph_data[start:stop]
if mask is not None:
ph = ph[mask[start:stop]]
if compact:
ph = _ph_times_compact(ph, alex_period, excitation_width)
yield ph
def bursts_ph_list(ph_data, bursts, mask=None):
"""Returna list of ph-data for each burst.
ph_data can be either the timestamp array on which the burst search
has been performed or any other array with same size (boolean array,
nanotimes, etc...)
"""
return [ph for ph in iter_bursts_ph(ph_data, bursts, mask=mask)]
def burst_ph_stats(ph_data, bursts, func=np.mean, func_kw=None, **kwargs):
"""Reduce burst photons (timestamps, nanotimes) to a scalar using `func`.
Arguments
ph_data (1D array): array of photon-data (timestamps, nanotimes).
bursts (Bursts object): bursts computed from `ph`.
func (callable): function that takes the burst photon timestamps
as first argument and returns a scalar.
func_kw (callable): additional arguments in `func` beyond photon-data.
**kwargs: additional arguments passed to :func:`iter_bursts_ph`.
Return
Array one element per burst.
"""
if func_kw is None:
func_kw = {}
burst_stats = []
for burst_ph in iter_bursts_ph(ph_data, bursts, **kwargs):
burst_stats.append(func(burst_ph, **func_kw))
return np.asfarray(burst_stats) # NOTE: asfarray converts None to nan
def ph_in_bursts_mask(ph_data_size, bursts):
"""Return bool mask to select all "ph-data" inside any burst."""
mask = zeros(ph_data_size, dtype=bool)
for start, stop in iter_bursts_start_stop(bursts):
mask[start:stop] = True
return mask
def fuse_bursts_direct(bursts, ms=0, clk_p=12.5e-9, verbose=True):
"""Fuse bursts separated by less than `ms` (milli-seconds).
This function is a direct implementation using a single loop.
For a faster implementation see :func:`fuse_bursts_iter`.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
verbose (bool): if True print a summary of fused bursts.
Returns:
A BurstsGap object containing the new fused bursts.
"""
max_delay_clk = (ms * 1e-3) / clk_p
fused_bursts_list = []
fused_burst = None
for burst1, burst2 in zip(bursts[:-1], bursts[1:]):
if fused_burst is not None:
burst1c = fused_burst
else:
burst1c = bslib.BurstGap.from_burst(burst1)
separation = burst2.start - burst1c.stop
if separation <= max_delay_clk:
gap = burst2.start - burst1c.stop
gap_counts = burst2.istart - burst1c.istop - 1
if burst1c.istop >= burst2.istart:
gap = 0
gap_counts = 0
fused_burst = bslib.BurstGap(
start = burst1c.start,
istart = burst1c.istart,
stop = burst2.stop,
istop = burst2.istop,
gap = burst1c.gap + gap,
gap_counts = burst1c.gap_counts + gap_counts)
else:
if fused_burst is not None:
fused_bursts_list.append(fused_burst)
fused_burst = None
else:
fused_bursts_list.append(bslib.BurstGap.from_burst(burst1c))
# Append the last bursts (either a fused or an isolated one)
if fused_burst is not None:
fused_bursts_list.append(fused_burst)
else:
fused_bursts_list.append(bslib.BurstGap.from_burst(burst2))
fused_bursts = bslib.BurstsGap.from_list(fused_bursts_list)
init_num_bursts = bursts.num_bursts
delta_b = init_num_bursts - fused_bursts.num_bursts
pprint(" --> END Fused %d bursts (%.1f%%)\n\n" %
(delta_b, 100 * delta_b / init_num_bursts), mute=not verbose)
return fused_bursts
def fuse_bursts_iter(bursts, ms=0, clk_p=12.5e-9, verbose=True):
"""Fuse bursts separated by less than `ms` (milli-secs).
This function calls iteratively :func:`b_fuse` until there are no more
bursts to fuse. For a slower but more readable version see
:func:`fuse_bursts_direct`.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
verbose (bool): if True print a summary of fused bursts.
Returns:
A BurstsGap object containing the new fused bursts.
"""
init_nburst = bursts.num_bursts
bursts = bslib.BurstsGap(bursts.data)
z = 0
new_nburst, nburst = 0, 1 # starting condition
while new_nburst < nburst:
z += 1
nburst = bursts.num_bursts
bursts = b_fuse(bursts, ms=ms, clk_p=clk_p)
new_nburst = bursts.num_bursts
delta_b = init_nburst - nburst
pprint(" --> END Fused %d bursts (%.1f%%, %d iter)\n\n" %
(delta_b, 100 * delta_b / init_nburst, z), mute=not verbose)
return bursts
def b_fuse(bursts, ms=0, clk_p=12.5e-9):
"""Fuse bursts separated by less than `ms` (milli-secs).
This is a low-level function which fuses pairs of consecutive
bursts separated by less than `ms` millisec.
If there are 3 or more consecutive bursts separated by less than `ms`
only the first 2 are fused.
See :func:`fuse_bursts_iter` or :func:`fuse_bursts_direct` for
higher level functions.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
Returns:
A BurstsGap object containing the new fused bursts.
"""
max_delay_clk = (ms * 1e-3) / clk_p
# Nearby bursts masks
delays_below_th = (bursts.separation <= max_delay_clk)
if not np.any(delays_below_th):
return bursts
buffer_mask = np.hstack([(False,), delays_below_th, (False,)])
first_bursts = buffer_mask[1:]
second_bursts = buffer_mask[:-1]
# Keep only the first pair in case of more than 2 consecutive bursts
first_bursts ^= (second_bursts * first_bursts)
# note that previous in-place operation also modifies `second_bursts`
both_bursts = first_bursts + second_bursts
# istart is from the first burst, istop is from the second burst
fused_bursts1 = bursts[first_bursts]
fused_bursts2 = bursts[second_bursts]
# Compute gap and gap_counts
gap = fused_bursts2.start - fused_bursts1.stop
gap_counts = fused_bursts2.istart - fused_bursts1.istop - 1 # yes it's -1
overlaping = fused_bursts1.istop >= fused_bursts2.istart
gap[overlaping] = 0
gap_counts[overlaping] = 0
# Assign the new burst data
# fused_bursts1 has alredy the right start and istart
fused_bursts1.istop = fused_bursts2.istop
fused_bursts1.stop = fused_bursts2.stop
fused_bursts1.gap += gap
fused_bursts1.gap_counts += gap_counts
# Join fused bursts with the remaining bursts
new_burst = fused_bursts1.join(bursts[~both_bursts], sort=True)
return new_burst
def mch_fuse_bursts(MBurst, ms=0, clk_p=12.5e-9, verbose=True):
"""Multi-ch version of `fuse_bursts`. `MBurst` is a list of Bursts objects.
"""
mburst = [b.copy() for b in MBurst] # safety copy
new_mburst = []
ch = 0
for mb in mburst:
ch += 1
pprint(" - - - - - CHANNEL %2d - - - - \n" % ch, not verbose)
if mb.num_bursts == 0:
new_bursts = bslib.Bursts.empty()
else:
new_bursts = fuse_bursts_iter(mb, ms=ms, clk_p=clk_p,
verbose=verbose)
new_mburst.append(new_bursts)
return new_mburst
def burst_stats(mburst, clk_p):
"""Compute average duration, size and burst-delay for bursts in mburst.
"""
nans = [np.nan, np.nan]
width_stats = np.array([[b.width.mean(), b.width.std()]
if b.num_bursts > 0 else nans for b in mburst]).T
height_stats = np.array([[b.counts.mean(), b.counts.std()]
if b.num_bursts > 0 else nans for b in mburst]).T
mean_burst_delay = np.array([b.separation.mean() if b.num_bursts > 0
else np.nan for b in mburst])
return (clk_to_s(width_stats, clk_p) * 1e3, height_stats,
clk_to_s(mean_burst_delay, clk_p))
def print_burst_stats(d):
"""Print some bursts statistics."""
nch = len(d.mburst)
width_ms, height, delays = burst_stats(d.mburst, d.clk_p)
s = "\nNUMBER OF BURSTS: m = %d, L = %d" % (d.m, d.L)
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\n#: "+"%7d "*nch % tuple([b.num_bursts for b in d.mburst])
s += "\nT (us) [BS par] "+"%7d "*nch % tuple(np.array(d.T)*1e6)
s += "\nBG Rat T (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel('all')])
s += "\nBG Rat D (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel(Dex='Dem')])
s += "\nBG Rat A (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel(Dex='Aem')])
s += "\n\nBURST WIDTH STATS"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nMean (ms): "+"%7.3f "*nch % tuple(width_ms[0, :])
s += "\nStd.dev (ms): "+"%7.3f "*nch % tuple(width_ms[1, :])
s += "\n\nBURST SIZE STATS"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nMean (# ph): "+"%7.2f "*nch % tuple(height[0, :])
s += "\nStd.dev (# ph): "+"%7.2f "*nch % tuple(height[1, :])
s += "\n\nBURST MEAN DELAY"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nDelay (s): "+"%7.3f "*nch % tuple(delays)
return s
def ES_histog(E, S, bin_step=0.05, E_bins=None, S_bins=None):
"""Returns 2D (ALEX) histogram and bins of bursts (E,S).
"""
if E_bins is None:
E_bins = np.arange(-0.6, 1.6+1e-4, bin_step)
if S_bins is None:
S_bins = np.arange(-0.6, 1.6+1e-4, bin_step)
H, E_bins, S_bins = np.histogram2d(E, S, bins=[E_bins, S_bins])
return H, E_bins, S_bins
def delta(x):
"""Return x.max() - x.min()"""
return x.max() - x.min()
def mask_empty(mask):
"""Returns True if `mask` is empty, otherwise False.
`mask` can be a boolean array or a slice object.
"""
if isinstance(mask, slice):
is_slice_empty = (mask.stop == 0)
return is_slice_empty
else:
# Bolean array
return not mask.any()
class DataContainer(dict):
"""
Generic class for storing data.
It's a dictionary in which each key is also an attribute d['nt'] or d.nt.
"""
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
for k in self:
dict.__setattr__(self, k, self[k])
def add(self, **kwargs):
"""Adds or updates elements (attributes and/or dict entries). """
self.update(**kwargs)
for k, v in kwargs.items():
setattr(self, k, v)
def delete(self, *args, **kwargs):
"""Delete an element (attribute and/or dict entry). """
warning = kwargs.get('warning', True)
for name in args:
try:
self.pop(name)
except KeyError:
if warning:
print(' WARNING: Name %s not found (dict).' % name)
try:
delattr(self, name)
except AttributeError:
if warning:
print(' WARNING: Name %s not found (attr).' % name)
class Data(DataContainer):
"""
Container for all the information (timestamps, bursts) of a dataset.
Data() contains all the information of a dataset (name, timestamps, bursts,
correction factors) and provides several methods to perform analysis
(background estimation, burst search, FRET fitting, etc...).
When loading a measurement file a Data() object is created by one
of the loader functions in `loaders.py`. Data() objects can be also
created with :meth:`Data.copy`, :meth:`Data.fuse_bursts()` or
:meth:`Data.select_bursts`.
To add or delete data-attributes use `.add()` or `.delete()` methods.
All the standard data-attributes are listed below.
Note:
Attributes of type "*list*" contain one element per channel.
Each element, in turn, can be an array. For example `.ph_times_m[i]`
is the array of timestamps for channel `i`; or `.nd[i]` is the array
of donor counts in each burst for channel `i`.
**Measurement attributes**
Attributes:
fname (string): measurements file name
nch (int): number of channels
clk_p (float): clock period in seconds for timestamps in `ph_times_m`
ph_times_m (list): list of timestamp arrays (int64). Each array
contains all the timestamps (donor+acceptor) in one channel.
A_em (list): list of boolean arrays marking acceptor timestamps. Each
array is a boolean mask for the corresponding ph_times_m array.
leakage (float or array of floats): leakage (or bleed-through) fraction.
May be scalar or same size as nch.
gamma (float or array of floats): gamma factor.
May be scalar or same size as nch.
D_em (list of boolean arrays): **[ALEX-only]**
boolean mask for `.ph_times_m[i]` for donor emission
D_ex, A_ex (list of boolean arrays): **[ALEX-only]**
boolean mask for `.ph_times_m[i]` during donor or acceptor
excitation
D_ON, A_ON (2-element tuples of int ): **[ALEX-only]**
start-end values for donor and acceptor excitation selection.
alex_period (int): **[ALEX-only]**
duration of the alternation period in clock cycles.
**Background Attributes**
The background is computed with :meth:`Data.calc_bg`
and is estimated in chunks of equal duration called *background periods*.
Estimations are performed in each spot and photon stream.
The following attributes contain the estimated background rate.
Attributes:
bg (dict): background rates for the different photon streams,
channels and background periods. Keys are `Ph_sel` objects
and values are lists (one element per channel) of arrays (one
element per background period) of background rates.
bg_mean (dict): mean background rates across the entire measurement
for the different photon streams and channels. Keys are `Ph_sel`
objects and values are lists (one element per channel) of
background rates.
nperiods (int): number of periods in which timestamps are split for
background calculation
bg_fun (function): function used to compute the background rates
Lim (list): each element of this list is a list of index pairs for
`.ph_times_m[i]` for **first** and **last** photon in each period.
Ph_p (list): each element in this list is a list of timestamps pairs
for **first** and **last** photon of each period.
bg_ph_sel (Ph_sel object): photon selection used by Lim and Ph_p.
See :mod:`fretbursts.ph_sel` for details.
Th_us (dict): thresholds in us used to select the tail of the
interphoton delay distribution. Keys are `Ph_sel` objects
and values are lists (one element per channel) of arrays (one
element per background period).
Additionlly, there are a few deprecated attributes (`bg_dd`, `bg_ad`,
`bg_da`, `bg_aa`, `rate_dd`, `rate_ad`, `rate_da`, `rate_aa` and `rate_m`)
which will be removed in a future version.
Please use :attr:`Data.bg` and :attr:`Data.bg_mean` instead.
**Burst search parameters (user input)**
These are the parameters used to perform the burst search
(see :meth:`burst_search`).
Attributes:
ph_sel (Ph_sel object): photon selection used for burst search.
See :mod:`fretbursts.ph_sel` for details.
m (int): number of consecutive timestamps used to compute the
local rate during burst search
L (int): min. number of photons for a burst to be identified and saved
P (float, probability): valid values [0..1].
Probability that a burst-start is due to a Poisson background.
The employed Poisson rate is the one computed by `.calc_bg()`.
F (float): `(F * background_rate)` is the minimum rate for burst-start
**Burst search data (available after burst search)**
When not specified, parameters marked as (list of arrays) contains arrays
with one element per bursts. `mburst` arrays contain one "row" per burst.
`TT` arrays contain one element per `period` (see above: background
attributes).
Attributes:
mburst (list of Bursts objects): list Bursts() one element per channel.
See :class:`fretbursts.phtools.burstsearch.Bursts`.
TT (list of arrays): list of arrays of *T* values (in sec.). A *T*
value is the maximum delay between `m` photons to have a
burst-start. Each channels has an array of *T* values, one for
each background "period" (see above).
T (array): per-channel mean of `TT`
nd, na (list of arrays): number of donor or acceptor photons during
donor excitation in each burst
nt (list of arrays): total number photons (nd+na+naa)
naa (list of arrays): number of acceptor photons in each burst
during acceptor excitation **[ALEX only]**
nar (list of arrays): number of acceptor photons in each burst
during donor excitation, not corrected for D-leakage and
A-direct-excitation. **[PAX only]**
bp (list of arrays): time period for each burst. Same shape as `nd`.
This is needed to identify the background rate for each burst.
bg_bs (list): background rates used for threshold computation in burst
search (is a reference to `bg`, `bg_dd` or `bg_ad`).
fuse (None or float): if not None, the burst separation in ms below
which bursts have been fused (see `.fuse_bursts()`).
E (list): FRET efficiency value for each burst:
E = na/(na + gamma*nd).
S (list): stoichiometry value for each burst:
S = (gamma*nd + na) /(gamma*nd + na + naa)
"""
# Attribute names containing per-photon data.
# Each attribute is a list (1 element per ch) of arrays (1 element
# per photon).
ph_fields = ['ph_times_m', 'nanotimes', 'particles',
'A_em', 'D_em', 'A_ex', 'D_ex']
# Attribute names containing background data.
# The attribute `bg` is a dict with photon-selections as keys and
# list of arrays as values. Each list contains one element per channel and
# each array one element per background period.
# The attributes `.Lim` and `.Ph_p` are lists with one element per channel.
# Each element is a lists-of-tuples (one tuple per background period).
# These attributes do not exist before computing the background.
bg_fields = ['bg', 'Lim', 'Ph_p']
# Attribute names containing per-burst data.
# Each attribute is a list (1 element per ch) of arrays (1 element
# per burst).
# They do not necessarly exist. For example 'naa' exists only for ALEX
# data. Also none of them exist before performing a burst search.
burst_fields = ['E', 'S', 'mburst', 'nd', 'na', 'nt', 'bp', 'nda', 'naa',
'max_rate', 'sbr', 'nar']
# Quantities (scalars or arrays) defining the current set of bursts
burst_metadata = ['m', 'L', 'T', 'TT', 'F', 'FF', 'P', 'PP', 'rate_th',
'bg_bs', 'ph_sel', 'bg_corrected', 'leakage_corrected',
'dir_ex_corrected', 'dithering', 'fuse', 'lsb']
# List of photon selections on which the background is computed
_ph_streams = [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem'),
Ph_sel(Aex='Dem'), Ph_sel(Aex='Aem')]
@property
def ph_streams(self):
if self.alternated:
return self._ph_streams
else:
return [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')]
def __init__(self, leakage=0., gamma=1., dir_ex=0., **kwargs):
# Default values
init_kw = dict(ALEX=False, _leakage=float(leakage), _gamma=float(gamma),
_dir_ex=float(dir_ex), _beta=1., _chi_ch=1., s=[])
# Override with user data
init_kw.update(**kwargs)
DataContainer.__init__(self, **init_kw)
# def __getattr__(self, name):
# """Single-channel shortcuts for per-channel fields.
#
# Appending a '_' to a per-channel field avoids specifying the channel.
# For example use d.nd_ instead if d.nd[0].
# """
# msg_missing_attr = "'%s' object has no attribute '%s'" %\
# (self.__class__.__name__, name)
# if name.startswith('_') or not name.endswith('_'):
# raise AttributeError(msg_missing_attr)
#
# field = name[:-1]
# try:
# value = self.__getitem__(field)
# except KeyError:
# raise AttributeError(msg_missing_attr)
# else:
# # Support lists, tuples and object with array interface
# if isinstance(value, (list, tuple)) or isarray(value):
# if len(value) == self.nch:
# return value[0]
# raise ValueError('Name "%s" is not a per-channel field.' % field)
def copy(self, mute=False):
"""Copy data in a new object. All arrays copied except for ph_times_m
"""
pprint('Deep copy executed.\n', mute)
new_d = Data(**self) # this make a shallow copy (like a pointer)
# Deep copy (not just reference) or array data
for field in self.burst_fields + self.bg_fields:
# Making sure k is defined
if field in self:
# Make a deepcopy of the per-channel lists
new_d[field] = copy.deepcopy(self[field])
# Set the attribute: new_d.k = new_d[k]
setattr(new_d, field, new_d[field])
return new_d
##
# Methods for photon timestamps (ph_times_m) access
#
def ph_times_hash(self, hash_name='md5', hexdigest=True):
"""Return an hash for the timestamps arrays.
"""
m = hashlib.new(hash_name)
for ph in self.iter_ph_times():
if isinstance(ph, np.ndarray):
m.update(ph.data)
else:
# TODO Handle ph_times in PyTables files
raise NotImplementedError
if hexdigest:
return m.hexdigest()
else:
return m
@property
def ph_data_sizes(self):
"""Array of total number of photons (ph-data) for each channel.
"""
if not hasattr(self, '_ph_data_sizes'):
# This works both for numpy arrays and pytables arrays
self._ph_data_sizes = np.array([ph.shape[0] for ph in
self.ph_times_m])
return self._ph_data_sizes
def _fix_ph_sel(self, ph_sel):
"""For non-ALEX data fix Aex to allow stable comparison."""
msg = 'Photon selection must be of type `Ph_sel` (it was `%s` instead).'
assert isinstance(ph_sel, Ph_sel), (msg % type(ph_sel))
if self.alternated or ph_sel.Dex != 'DAem':
return ph_sel
else:
return Ph_sel(Dex=ph_sel.Dex, Aex='DAem')
def _is_allph(self, ph_sel):
"""Return whether a photon selection `ph_sel` covers all photon."""
if self.alternated:
return ph_sel == Ph_sel(Dex='DAem', Aex='DAem')
else:
return ph_sel.Dex == 'DAem'
def get_ph_mask(self, ich=0, ph_sel=Ph_sel('all')):
"""Returns a mask for `ph_sel` photons in channel `ich`.
The masks are either boolean arrays or slices (full or empty). In
both cases they can be used to index the timestamps of the
corresponding channel.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
"""
assert isinstance(ich, int)
if self._is_allph(ph_sel):
# Note that slice(None) is equivalent to [:].
# Also, numpy arrays are not copied when sliced.
# So getting all photons with this mask is efficient
# Note: the drawback is that the slice cannot be indexed
# (where a normal boolean array would)
return slice(None)
# Handle the case when A_em contains slice objects
if isinstance(self.A_em[ich], slice):
if self.A_em[ich] == slice(None):
if ph_sel.Dex == 'Dem':
return slice(0)
if ph_sel.Dex == 'Aem':
return slice(None)
elif self.A_em[ich] == slice(0):
if ph_sel.Dex == 'Dem':
return slice(None)
if ph_sel.Dex == 'Aem':
return slice(0)
else:
msg = 'When a slice, A_em can only be slice(None) or slice(0).'
raise NotImplementedError(msg)
# Base selections
elif ph_sel == Ph_sel(Dex='Dem'):
return self.get_D_em_D_ex(ich)
elif ph_sel == Ph_sel(Dex='Aem'):
return self.get_A_em_D_ex(ich)
elif ph_sel == Ph_sel(Aex='Dem'):
return self.get_D_em(ich) * self.get_A_ex(ich)
elif ph_sel == Ph_sel(Aex='Aem'):
return self.get_A_em(ich) * self.get_A_ex(ich)
# Selection of all photon in one emission ch
elif ph_sel == Ph_sel(Dex='Dem', Aex='Dem'):
return self.get_D_em(ich)
elif ph_sel == Ph_sel(Dex='Aem', Aex='Aem'):
return self.get_A_em(ich)
# Selection of all photon in one excitation period
elif ph_sel == Ph_sel(Dex='DAem'):
return self.get_D_ex(ich)
elif ph_sel == Ph_sel(Aex='DAem'):
return self.get_A_ex(ich)
# Selection of all photons except for Dem during Aex
elif ph_sel == Ph_sel(Dex='DAem', Aex='Aem'):
return self.get_D_ex(ich) + self.get_A_em(ich) * self.get_A_ex(ich)
else:
raise ValueError('Photon selection not implemented.')
def iter_ph_masks(self, ph_sel=Ph_sel('all')):
"""Iterator returning masks for `ph_sel` photons.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
"""
for ich in range(self.nch):
yield self.get_ph_mask(ich, ph_sel=ph_sel)
def get_ph_times(self, ich=0, ph_sel=Ph_sel('all'), compact=False):
"""Returns the timestamps array for channel `ich`.
This method always returns in-memory arrays, even when ph_times_m
is a disk-backed list of arrays.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
"""
ph = self.ph_times_m[ich]
# If not a list is an on-disk array, we need to load it
if not isinstance(ph, np.ndarray):
if hasattr(self, '_ph_cache') and self._ph_cache_ich == ich:
ph = self._ph_cache
else:
ph = ph.read()
self._ph_cache = ph
self._ph_cache_ich = ich
ph = ph[self.get_ph_mask(ich, ph_sel=ph_sel)]
if compact:
ph = self._ph_times_compact(ph, ph_sel)
return ph
def iter_ph_times(self, ph_sel=Ph_sel('all'), compact=False):
"""Iterator that returns the arrays of timestamps in `.ph_times_m`.
Arguments:
Same arguments as :meth:`get_ph_mask` except for `ich`.
"""
for ich in range(self.nch):
yield self.get_ph_times(ich, ph_sel=ph_sel, compact=compact)
def _get_ph_mask_single(self, ich, mask_name, negate=False):
"""Get the bool array `mask_name` for channel `ich`.
If the internal "bool array" is a scalar return a slice (full or empty)
"""
mask = np.asarray(getattr(self, mask_name)[ich])
if negate:
mask = np.logical_not(mask)
if len(mask.shape) == 0:
# If mask is a boolean scalar, select all or nothing
mask = slice(None) if mask else slice(0)
return mask
def get_A_em(self, ich=0):
"""Returns a mask to select photons detected in the acceptor ch."""
return self._get_ph_mask_single(ich, 'A_em')
def get_D_em(self, ich=0):
"""Returns a mask to select photons detected in the donor ch."""
return self._get_ph_mask_single(ich, 'A_em', negate=True)
def get_A_ex(self, ich=0):
"""Returns a mask to select photons in acceptor-excitation periods."""
return self._get_ph_mask_single(ich, 'A_ex')
def get_D_ex(self, ich=0):
"""Returns a mask to select photons in donor-excitation periods."""
if self.alternated:
return self._get_ph_mask_single(ich, 'D_ex')
else:
return slice(None)
def get_D_em_D_ex(self, ich=0):
"""Returns a mask of donor photons during donor-excitation."""
if self.alternated:
return self.get_D_em(ich) * self.get_D_ex(ich)
else:
return self.get_D_em(ich)
def get_A_em_D_ex(self, ich=0):
"""Returns a mask of acceptor photons during donor-excitation."""
if self.alternated:
return self.get_A_em(ich) * self.get_D_ex(ich)
else:
return self.get_A_em(ich)
def iter_ph_times_period(self, ich=0, ph_sel=Ph_sel('all')):
"""Iterate through arrays of ph timestamps in each background period.
"""
mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
for period in range(self.nperiods):
yield self.get_ph_times_period(period, ich=ich, mask=mask)
def get_ph_times_period(self, period, ich=0, ph_sel=Ph_sel('all'),
mask=None):
"""Return the array of ph_times in `period`, `ich` and `ph_sel`.
"""
istart, iend = self.Lim[ich][period]
period_slice = slice(istart, iend + 1)
ph_times = self.get_ph_times(ich=ich)
if mask is None:
mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
if isinstance(mask, slice) and mask == slice(None):
ph_times_period = ph_times[period_slice]
else:
ph_times_period = ph_times[period_slice][mask[period_slice]]
return ph_times_period
def _assert_compact(self, ph_sel):
msg = ('Option compact=True requires a photon selection \n'
'from a single excitation period (either Dex or Aex).')
if not self.alternated:
raise ValueError('Option compact=True requires ALEX data.')
if ph_sel.Dex is not None and ph_sel.Aex is not None:
raise ValueError(msg)
def _excitation_width(self, ph_sel, ich=0):
"""Returns duration of alternation period outside selected excitation.
"""
self._assert_compact(ph_sel)
if ph_sel.Aex is None:
excitation_range = self._D_ON_multich[ich]
elif ph_sel.Dex is None:
excitation_range = self._A_ON_multich[ich]
return _excitation_width(excitation_range, self.alex_period)
def _ph_times_compact(self, ph, ph_sel):
"""Return timestamps in one excitation period with "gaps" removed.
It takes timestamps in the specified alternation period and removes
gaps due to time intervals outside the alternation period selection.
This allows to correct the photon rates distorsion due to alternation.
Arguments:
ph (array): timestamps array from which gaps have to be removed.
This array **is modified in-place**.
ph_sel (Ph_sel object): photon selection to be compacted.
Note that only one excitation must be specified, but the
emission can be 'Dem', 'Aem' or 'DAem'.
See :mod:`fretbursts.ph_sel` for details.
Returns:
Array of timestamps in one excitation periods with "gaps" removed.
"""
excitation_width = self._excitation_width(ph_sel)
return _ph_times_compact(ph, self.alex_period, excitation_width)
def _get_tuple_multich(self, name):
"""Get a n-element tuple field in multi-ch format (1 row per ch)."""
field = np.array(self[name])
if field.ndim == 1:
field = np.repeat([field], self.nch, axis=0)
return field
@property
def _D_ON_multich(self):
return self._get_tuple_multich('D_ON')
@property
def _A_ON_multich(self):
return self._get_tuple_multich('A_ON')
@property
def _det_donor_accept_multich(self):
return self._get_tuple_multich('det_donor_accept')
##
# Methods and properties for burst-data access
#
@property
def num_bursts(self):
"""Array of number of bursts in each channel."""
return np.array([bursts.num_bursts for bursts in self.mburst])
@property
def burst_widths(self):
"""List of arrays of burst duration in seconds. One array per channel.
"""
return [bursts.width * self.clk_p for bursts in self.mburst]
def burst_sizes_pax_ich(self, ich=0, gamma=1., add_aex=True,
beta=1., donor_ref=True, aex_corr=True):
r"""Return corrected burst sizes for channel `ich`. PAX-only.
When `donor_ref = False`, the formula for PAX-enhanced burst size is:
.. math::
\gamma(F_{D_{ex}D_{em}} + F_{DA_{ex}D_{em}}) +
\frac{1}{\alpha} F_{FRET}
where :math:`\alpha` is the Dex duty-cycle (0.5 if alternation
periods are equal) and :math:`F_{FRET}` is `na`, the AemAex
signal after leakage and direct-excitation corrections.
If `add_ex = True`, we add the term:
.. math::
\tilde{F}_{A_{ex}A_{em}} / (\alpha\beta)
where :math:`\tilde{F}_{A_{ex}A_{em}}` in A emission due to
A excitation (and not due to FRET).
If `aex_corr = False`, then :math:`\alpha` is fixed to 1.
If `donor_ref = True`, the above burst size expression is divided by
:math:`\gamma`.
Arguments:
ich (int): the spot number, only relevant for multi-spot.
In single-spot data there is only one channel (`ich=0`)
so this argument may be omitted. Default 0.
gamma (float): coefficient for gamma correction of burst
sizes. Default: 1. For more info see explanation above.
donor_ref (bool): True or False select different conventions
for burst size correction. For details see
:meth:`fretbursts.burstlib.Data.burst_sizes_ich`.
add_aex (boolean): when True, the returned burst size also
includes photons detected during the DAex. Default is True.
aex_corr (bool): If True, and `add_aex == True`, then divide
the DAexAem term (naa) by the Dex duty cycle. For example,
if Dex and DAex alternation periods are equal, naa is
multiplied by 2. This correction makes the returned value
equal to the denominator of the stoichiometry ratio S_pax
(PAX-enhanced formula). If False, naa is not divided by
the Dex duty-cycle (gamma and beta corrections may still be
applied). If `add_aex == False`, `aex_corr` is ignored.
beta (float): beta correction factor used for the DAexAem term
(naa) of the burst size.
If `add_aex == False` this argument is ignored. Default 1.
Returns
Array of burst sizes for channel `ich`.
See also:
:meth:`Data.burst_sizes_ich`
"""
assert 'PAX' in self.meas_type
naa = self._get_naa_ich(ich) # nar-subtracted
aex_dex_ratio = self._aex_dex_ratio()
alpha = 1
if aex_corr:
alpha = 1 - self._aex_fraction() # Dex duty-cycle
burst_size_dex = self.nd[ich] * gamma + self.na[ich]
burst_size_aex = (self.nda[ich] * gamma +
self.na[ich] * aex_dex_ratio +
naa / (alpha * beta))
burst_size = burst_size_dex
if add_aex:
burst_size += burst_size_aex
if donor_ref:
burst_size /= gamma
return burst_size
def burst_sizes_ich(self, ich=0, gamma=1., add_naa=False,
beta=1., donor_ref=True):
"""Return gamma corrected burst sizes for channel `ich`.
If `donor_ref == True` (default) the gamma corrected burst size is
computed according to::
1) nd + na / gamma
Otherwise, if `donor_ref == False`, the gamma corrected burst size is::
2) nd * gamma + na
With the definition (1) the corrected burst size is equal to the raw
burst size for zero-FRET or D-only bursts (that's why is `donor_ref`).
With the definition (2) the corrected burst size is equal to the raw
burst size for 100%-FRET bursts.
In an ALEX measurement, use `add_naa = True` to add counts from
AexAem stream to the returned burst size. The argument `gamma` and
`beta` are used to correctly scale `naa` so that it become
commensurate with the Dex corrected burst size. In particular,
when using definition (1) (i.e. `donor_ref = True`), the total
burst size is::
(nd + na/gamma) + naa / (beta * gamma)
Conversely, when using definition (2) (`donor_ref = False`), the
total burst size is::
(nd * gamma + na) + naa / beta
Arguments:
ich (int): the spot number, only relevant for multi-spot.
In single-spot data there is only one channel (`ich=0`)
so this argument may be omitted. Default 0.
add_naa (boolean): when True, add a term for AexAem photons when
computing burst size. Default False.
gamma (float): coefficient for gamma correction of burst
sizes. Default: 1. For more info see explanation above.
beta (float): beta correction factor used for the AexAem term
of the burst size. Default 1. If `add_naa = False` or
measurement is not ALEX this argument is ignored.
For more info see explanation above.
donor_ref (bool): select the convention for burst size correction.
See details above in the function description.
Returns
Array of burst sizes for channel `ich`.
See also :meth:`fretbursts.burstlib.Data.get_naa_corrected`.
"""
if donor_ref:
burst_size = self.nd[ich] + self.na[ich] / gamma
else:
burst_size = self.nd[ich] * gamma + self.na[ich]
if add_naa and self.alternated:
kws = dict(ich=ich, gamma=gamma, beta=beta, donor_ref=donor_ref)
burst_size += self.get_naa_corrected(**kws)
return burst_size
def get_naa_corrected(self, ich=0, gamma=1., beta=1., donor_ref=True):
"""Return corrected naa array for channel `ich`.
Arguments:
ich (int): the spot number, only relevant for multi-spot.
gamma (floats): gamma-factor to use in computing the corrected naa.
beta (float): beta-factor to use in computing the corrected naa.
donor_ref (bool): Select the convention for `naa` correction.
If True (default), uses `naa / (beta * gamma)`. Otherwise,
uses `naa / beta`. A consistent convention should be used
for the corrected Dex burst size in order to make it
commensurable with naa.
See also :meth:`fretbursts.burstlib.Data.burst_sizes_ich`.
"""
naa = self._get_naa_ich(ich) # with eventual duty-cycle correction
if donor_ref:
correction = (gamma * beta)
else:
correction = beta
return naa / correction
def _get_naa_ich(self, ich=0):
"""Return naa for `ich` both in ALEX and PAX measurements.
In case of PAX, returns naa using the duty-cycle correction::
naa = self.naa - aex_dex_ratio * self.nar
where `self.nar` is equal to `self.na` before leakage and direct
excitation correction, and `aex_dex_ratio` is the Aex duty-cycle.
"""
naa = self.naa[ich]
if 'PAX' in self.meas_type:
# ATTENTION: do not modify naa inplace
naa = naa - self._aex_dex_ratio() * self.nar[ich]
return naa
def burst_sizes(self, gamma=1., add_naa=False, beta=1., donor_ref=True):
"""Return gamma corrected burst sizes for all the channel.
Compute burst sizes by calling, for each channel,
:meth:`burst_sizes_ich`.
See :meth:`burst_sizes_ich` for description of the arguments.
Returns
List of arrays of burst sizes, one array per channel.
"""
kwargs = dict(gamma=gamma, add_naa=add_naa, beta=beta,
donor_ref=donor_ref)
bsize_list = [self.burst_sizes_ich(ich, **kwargs) for ich in
range(self.nch)]
return np.array(bsize_list)
def iter_bursts_ph(self, ich=0):
"""Iterate over (start, stop) indexes to slice photons for each burst.
"""
for istart, istop in iter_bursts_start_stop(self.mburst[ich]):
yield istart, istop
def bursts_slice(self, N1=0, N2=-1):
"""Return new Data object with bursts between `N1` and `N2`
`N1` and `N2` can be scalars or lists (one per ch).
"""
if np.isscalar(N1): N1 = [N1] * self.nch
if np.isscalar(N2): N2 = [N2] * self.nch
assert len(N1) == len(N2) == self.nch
d = Data(**self)
d.add(mburst=[b[n1:n2].copy() for b, n1, n2 in zip(d.mburst, N1, N2)])
d.add(nt=[nt[n1:n2] for nt, n1, n2 in zip(d.nt, N1, N2)])
d.add(nd=[nd[n1:n2] for nd, n1, n2 in zip(d.nd, N1, N2)])
d.add(na=[na[n1:n2] for na, n1, n2 in zip(d.na, N1, N2)])
for name in ('naa', 'nda', 'nar'):
if name in d:
d.add(**{name:
[x[n1:n2] for x, n1, n2 in zip(d[name], N1, N2)]})
if 'nda' in self:
d.add(nda=[da[n1:n2] for da, n1, n2 in zip(d.nda, N1, N2)])
d.calc_fret(pax=self.pax) # recalculate fret efficiency
return d
def delete_burst_data(self):
"""Erase all the burst data"""
for name in self.burst_fields + self.burst_metadata:
if name in self:
self.delete(name)
for name in ('E_fitter', 'S_fitter'):
if hasattr(self, name):
delattr(self, name)
##
# Methods for high-level data transformation
#
def slice_ph(self, time_s1=0, time_s2=None, s='slice'):
"""Return a new Data object with ph in [`time_s1`,`time_s2`] (seconds)
If ALEX, this method must be called right after
:func:`fretbursts.loader.alex_apply_periods` (with `delete_ph_t=True`)
and before any background estimation or burst search.
"""
if time_s2 is None:
time_s2 = self.time_max
if time_s2 >= self.time_max and time_s1 <= 0:
return self.copy()
assert time_s1 < self.time_max
t1_clk, t2_clk = int(time_s1 / self.clk_p), int(time_s2 / self.clk_p)
masks = [(ph >= t1_clk) * (ph < t2_clk) for ph in self.iter_ph_times()]
new_d = Data(**self)
for name in self.ph_fields:
if name in self:
new_d[name] = [a[mask] for a, mask in zip(self[name], masks)]
setattr(new_d, name, new_d[name])
new_d.delete_burst_data()
# Shift timestamps to start from 0 to avoid problems with BG calc
for ich in range(self.nch):
ph_i = new_d.get_ph_times(ich)
ph_i -= t1_clk
new_d.s.append(s)
# Delete eventual cached properties
for attr in ['_time_min', '_time_max']:
if hasattr(new_d, attr):
delattr(new_d, attr)
return new_d
def collapse(self, update_gamma=True, skip_ch=None):
"""Returns an object with 1-spot data joining the multi-spot data.
Arguments:
skip_ch (tuple of ints): list of channels to skip.
If None, keep all channels.
update_gamma (bool): if True, recompute gamma as mean of the
per-channel gamma. If False, do not update gamma.
If True, gamma becomes a single value and the update has the
side effect of recomputing E and S values, discarding
previous per-channel corrections. If False, gamma is not
updated (it stays with multi-spot values) and E and S are
not recomputed.
Note:
When using `update_gamma=False`, burst selections on the
collapsed `Data` object should be done with
`computefret=False`, otherwise any attempt to use multi-spot
gamma for single-spot data will raise an error.
"""
dc = Data(**self)
mch_bursts = self.mburst
if skip_ch is not None:
mch_bursts = [bursts for i, bursts in enumerate(mch_bursts)
if i not in skip_ch]
bursts = bslib.Bursts.merge(mch_bursts, sort=False)
# Sort by start times, and when equal by stop times
indexsort = np.lexsort((bursts.stop, bursts.start))
dc.add(mburst=[bursts[indexsort]])
ich_burst = [i * np.ones(nb) for i, nb in enumerate(self.num_bursts)]
dc.add(ich_burst=np.hstack(ich_burst)[indexsort])
for name in self.burst_fields:
if name in self and name is not 'mburst':
# Concatenate arrays along axis = 0
value = [np.concatenate(self[name])[indexsort]]
dc.add(**{name: value})
dc.add(nch=1)
dc.add(_chi_ch=1.)
# NOTE: Updating gamma has the side effect of recomputing E
# (and S if ALEX). We need to update gamma because, in general,
# gamma can be an array with a value for each ch.
# However, the per-channel gamma correction is lost once both
# gamma and chi_ch are made scalar.
if update_gamma:
dc._update_gamma(np.mean(self.get_gamma_array()))
return dc
##
# Utility methods
#
def get_params(self):
"""Returns a plain dict containing only parameters and no arrays.
This can be used as a summary of data analysis parameters.
Additional keys `name' and `Names` are added with values
from `.name` and `.Name()`.
"""
p_names = ['fname', 'clk_p', 'nch', 'ph_sel', 'L', 'm', 'F', 'P',
'_leakage', '_dir_ex', '_gamma', 'bg_time_s',
'T', 'rate_th',
'bg_corrected', 'leakage_corrected', 'dir_ex_corrected',
'dithering', '_chi_ch', 's', 'ALEX']
p_dict = dict(self)
for name in p_dict.keys():
if name not in p_names:
p_dict.pop(name)
p_dict.update(name=self.name, Name=self.Name(), bg_mean=self.bg_mean,
nperiods=self.nperiods)
return p_dict
def expand(self, ich=0, alex_naa=False, width=False):
"""Return per-burst D and A sizes (nd, na) and their background counts.
This method returns for each bursts the corrected signal counts and
background counts in donor and acceptor channels. Optionally, the
burst width is also returned.
Arguments:
ich (int): channel for the bursts (can be not 0 only in multi-spot)
alex_naa (bool): if True and self.ALEX, returns burst sizes and
background also for acceptor photons during accept. excitation
width (bool): whether return the burst duration (in seconds).
Returns:
List of arrays: nd, na, donor bg, acceptor bg.
If `alex_naa` is True returns: nd, na, naa, bg_d, bg_a, bg_aa.
If `width` is True returns the bursts duration (in sec.) as last
element.
"""
period = self.bp[ich]
w = self.mburst[ich].width * self.clk_p
bg_a = self.bg[Ph_sel(Dex='Aem')][ich][period] * w
bg_d = self.bg[Ph_sel(Dex='Dem')][ich][period] * w
res = [self.nd[ich], self.na[ich]]
if self.alternated and alex_naa:
bg_aa = self.bg[Ph_sel(Aex='Aem')][ich][period] * w
res.extend([self.naa[ich], bg_d, bg_a, bg_aa])
else:
res.extend([bg_d, bg_a])
if width:
res.append(w)
return res
def burst_data_ich(self, ich):
"""Return a dict of burst data for channel `ich`."""
bursts = {}
bursts['size_raw'] = self.mburst[ich].counts
bursts['t_start'] = self.mburst[ich].start * self.clk_p
bursts['t_stop'] = self.mburst[ich].stop * self.clk_p
bursts['i_start'] = self.mburst[ich].istart
bursts['i_stop'] = self.mburst[ich].istop
period = bursts['bg_period'] = self.bp[ich]
width = self.mburst[ich].width * self.clk_p
bursts['width_ms'] = width * 1e3
bursts['bg_ad'] = self.bg[Ph_sel(Dex='Aem')][ich][period] * width
bursts['bg_dd'] = self.bg[Ph_sel(Dex='Dem')][ich][period] * width
if self.alternated:
bursts['bg_aa'] = self.bg[Ph_sel(Aex='Aem')][ich][period] * width
bursts['bg_da'] = self.bg[Ph_sel(Aex='Dem')][ich][period] * width
burst_fields = self.burst_fields[:]
burst_fields.remove('mburst')
burst_fields.remove('bp')
for field in burst_fields:
if field in self:
bursts[field] = self[field][ich]
return bursts
@property
def time_max(self):
"""The last recorded time in seconds."""
if not hasattr(self, '_time_max'):
self._time_max = self._time_reduce(last=True, func=max)
return self._time_max
@property
def time_min(self):
"""The first recorded time in seconds."""
if not hasattr(self, '_time_min'):
self._time_min = self._time_reduce(last=False, func=min)
return self._time_min
def _time_reduce(self, last=True, func=max):
"""Return first or last timestamp per-ch, reduced with `func`.
"""
idx = -1 if last else 0
# Get either ph_times_m or ph_times_t
ph_times = None
for ph_times_name in ['ph_times_m', 'ph_times_t']:
try:
ph_times = self[ph_times_name]
except KeyError:
pass
else:
break
if ph_times is not None:
# This works with both numpy arrays and pytables arrays
time = func(t[idx] for t in ph_times if t.shape[0] > 0)
elif 'mburst' in self:
if last:
time = func(bursts[idx].stop for bursts in self.mburst)
else:
time = func(bursts[idx].start for bursts in self.mburst)
else:
raise ValueError("No timestamps or bursts found.")
return time * self.clk_p
def ph_in_bursts_mask_ich(self, ich=0, ph_sel=Ph_sel('all')):
"""Return mask of all photons inside bursts for channel `ich`.
Returns
Boolean array for photons in channel `ich` and photon
selection `ph_sel` that are inside any burst.
"""
bursts_mask = ph_in_bursts_mask(self.ph_data_sizes[ich],
self.mburst[ich])
if self._is_allph(ph_sel):
return bursts_mask
else:
ph_sel_mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
return ph_sel_mask * bursts_mask
def ph_in_bursts_ich(self, ich=0, ph_sel=Ph_sel('all')):
"""Return timestamps of photons inside bursts for channel `ich`.
Returns
Array of photon timestamps in channel `ich` and photon
selection `ph_sel` that are inside any burst.
"""
ph_all = self.get_ph_times(ich=ich)
bursts_mask = self.ph_in_bursts_mask_ich(ich, ph_sel)
return ph_all[bursts_mask]
##
# Background analysis methods
#
def _obsolete_bg_attr(self, attrname, ph_sel):
print('The Data.%s attribute is deprecated. Please use '
'Data.bg(%s) instead.' % (attrname, repr(ph_sel)))
bg_attrs = ('bg_dd', 'bg_ad', 'bg_da', 'bg_aa')
bg_mean_attrs = ('rate_m', 'rate_dd', 'rate_ad', 'rate_da', 'rate_aa')
assert attrname in bg_attrs or attrname in bg_mean_attrs
if attrname in bg_attrs:
bg_field = 'bg'
elif attrname in bg_mean_attrs:
bg_field = 'bg_mean'
try:
value = getattr(self, bg_field)[ph_sel]
except AttributeError as e:
# This only happens when trying to access 'bg' because
# 'bg_mean' raises RuntimeError when missing.
msg = 'No attribute `%s` found. Please compute background first.'
raise_from(RuntimeError(msg % bg_field), e)
return value
@property
def rate_m(self):
return self._obsolete_bg_attr('rate_m', Ph_sel('all'))
@property
def rate_dd(self):
return self._obsolete_bg_attr('rate_dd', Ph_sel(Dex='Dem'))
@property
def rate_ad(self):
return self._obsolete_bg_attr('rate_ad', Ph_sel(Dex='Aem'))
@property
def rate_da(self):
return self._obsolete_bg_attr('rate_da', Ph_sel(Aex='Dem'))
@property
def rate_aa(self):
return self._obsolete_bg_attr('rate_aa', Ph_sel(Aex='Aem'))
@property
def bg_dd(self):
return self._obsolete_bg_attr('bg_dd', Ph_sel(Dex='Dem'))
@property
def bg_ad(self):
return self._obsolete_bg_attr('bg_ad', Ph_sel(Dex='Aem'))
@property
def bg_da(self):
return self._obsolete_bg_attr('bg_da', Ph_sel(Aex='Dem'))
@property
def bg_aa(self):
return self._obsolete_bg_attr('bg_aa', Ph_sel(Aex='Aem'))
def calc_bg_cache(self, fun, time_s=60, tail_min_us=500, F_bg=2,
error_metrics=None, fit_allph=True,
recompute=False):
"""Compute time-dependent background rates for all the channels.
This version is the cached version of :meth:`calc_bg`.
This method tries to load the background data from a cache file.
If a saved background data is not found, it computes
the background and stores it to disk.
The arguments are the same as :meth:`calc_bg` with the only addition
of `recompute` (bool) to force a background recomputation even if
a cached version is found.
Form more details on the other arguments see :meth:`calc_bg`.
"""
bg_cache.calc_bg_cache(self, fun, time_s=time_s,
tail_min_us=tail_min_us, F_bg=F_bg,
error_metrics=error_metrics, fit_allph=fit_allph,
recompute=recompute)
def _get_auto_bg_th_arrays(self, F_bg=2, tail_min_us0=250):
"""Return a dict of threshold values for background estimation.
The keys are the ph selections in self.ph_streams and the values
are 1-D arrays of size nch.
"""
Th_us = {}
for ph_sel in self.ph_streams:
th_us = np.zeros(self.nch)
for ich, ph in enumerate(self.iter_ph_times(ph_sel=ph_sel)):
if ph.size > 0:
bg_rate, _ = bg.exp_fit(ph, tail_min_us=tail_min_us0)
th_us[ich] = 1e6 * F_bg / bg_rate
Th_us[ph_sel] = th_us
# Save the input used to generate Th_us
self.add(bg_auto_th_us0=tail_min_us0, bg_auto_F_bg=F_bg)
return Th_us
def _get_bg_th_arrays(self, tail_min_us, nperiods):
"""Return a dict of threshold values for background estimation.
The keys are the ph selections in self.ph_streams and the values
are 1-D arrays of size nch.
"""
n_streams = len(self.ph_streams)
if np.size(tail_min_us) == 1:
tail_min_us = np.repeat(tail_min_us, n_streams)
elif np.size(tail_min_us) == n_streams:
tail_min_us = np.asarray(tail_min_us)
elif np.size(tail_min_us) != n_streams:
raise ValueError('Wrong tail_min_us length (%d).' %
len(tail_min_us))
th_us = {}
for i, key in enumerate(self.ph_streams):
th_us[key] = np.ones(nperiods) * tail_min_us[i]
# Save the input used to generate Th_us
self.add(bg_th_us_user=tail_min_us)
return th_us
def _clean_bg_data(self):
"""Remove background fields specific of only one fit type.
Computing background with manual or 'auto' threshold results in
different sets of attributes being saved. This method removes these
attributes and should be called before recomputing the background
to avoid having old stale attributes of a previous background fit.
"""
# Attributes specific of manual or 'auto' bg fit
field_list = ['bg_auto_th_us0', 'bg_auto_F_bg', 'bg_th_us_user']
for field in field_list:
if field in self:
self.delete(field)
if hasattr(self, '_bg_mean'):
delattr(self, '_bg_mean')
def _get_num_periods(self, time_s):
"""Return the number of periods using `time_s` as period duration.
"""
duration = self.time_max - self.time_min
# Take the ceil to have at least 1 periods
nperiods = np.ceil(duration / time_s)
# Discard last period if negligibly small to avoid problems with
# background fit with very few photons.
if nperiods > 1:
last_period = self.time_max - time_s * (nperiods - 1)
# Discard last period if smaller than 3% of the bg period
if last_period < time_s * 0.03:
nperiods -= 1
return int(nperiods)
def calc_bg(self, fun, time_s=60, tail_min_us=500, F_bg=2,
error_metrics=None, fit_allph=True):
"""Compute time-dependent background rates for all the channels.
Compute background rates for donor, acceptor and both detectors.
The rates are computed every `time_s` seconds, allowing to
track possible variations during the measurement.
Arguments:
fun (function): function for background estimation (example
`bg.exp_fit`)
time_s (float, seconds): compute background each time_s seconds
tail_min_us (float, tuple or string): min threshold in us for
photon waiting times to use in background estimation.
If float is the same threshold for 'all', DD, AD and AA photons
and for all the channels.
If a 3 or 4 element tuple, each value is used for 'all', DD, AD
or AA photons, same value for all the channels.
If 'auto', the threshold is computed for each stream ('all',
DD, DA, AA) and for each channel as `bg_F * rate_ml0`.
`rate_ml0` is an initial estimation of the rate performed using
:func:`bg.exp_fit` and a fixed threshold (default 250us).
F_bg (float): when `tail_min_us` is 'auto', is the factor by which
the initial background estimation if multiplied to compute the
threshold.
error_metrics (string): Specifies the error metric to use.
See :func:`fretbursts.background.exp_fit` for more details.
fit_allph (bool): if True (default) the background for the
all-photon is fitted. If False it is computed as the sum of
backgrounds in all the other streams.
The background estimation functions are defined in the module
`background` (conventionally imported as `bg`).
Example:
Compute background with `bg.exp_fit` (inter-photon delays MLE
tail fitting), every 30s, with automatic tail-threshold::
d.calc_bg(bg.exp_fit, time_s=20, tail_min_us='auto')
Returns:
None, all the results are saved in the object itself.
"""
pprint(" - Calculating BG rates ... ")
self._clean_bg_data()
kwargs = dict(clk_p=self.clk_p, error_metrics=error_metrics)
nperiods = self._get_num_periods(time_s)
streams_noall = [s for s in self.ph_streams if s != Ph_sel('all')]
bg_auto_th = tail_min_us == 'auto'
if bg_auto_th:
tail_min_us0 = 250
self.add(bg_auto_th_us0=tail_min_us0, bg_auto_F_bg=F_bg)
auto_th_kwargs = dict(clk_p=self.clk_p, tail_min_us=tail_min_us0)
th_us = {}
for key in self.ph_streams:
th_us[key] = np.zeros(nperiods)
else:
th_us = self._get_bg_th_arrays(tail_min_us, nperiods)
Lim, Ph_p = [], []
BG, BG_err = [], []
Th_us = []
for ich, ph_ch in enumerate(self.iter_ph_times()):
masks = {sel: self.get_ph_mask(ich, ph_sel=sel)
for sel in self.ph_streams}
bins = ((np.arange(nperiods + 1) * time_s + self.time_min) /
self.clk_p)
# Note: histogram bins are half-open, e.g. [a, b)
counts, _ = np.histogram(ph_ch, bins=bins)
lim, ph_p = [], []
bg = {sel: np.zeros(nperiods) for sel in self.ph_streams}
bg_err = {sel: np.zeros(nperiods) for sel in self.ph_streams}
i1 = 0
for ip in range(nperiods):
i0 = i1
i1 += counts[ip]
lim.append((i0, i1 - 1))
ph_p.append((ph_ch[i0], ph_ch[i1 - 1]))
ph_i = ph_ch[i0:i1]
if fit_allph:
sel = Ph_sel('all')
if bg_auto_th:
_bg, _ = fun(ph_i, **auto_th_kwargs)
th_us[sel][ip] = 1e6 * F_bg / _bg
bg[sel][ip], bg_err[sel][ip] = \
fun(ph_i, tail_min_us=th_us[sel][ip], **kwargs)
for sel in streams_noall:
# This supports cases of D-only or A-only timestamps
# where self.A_em[ich] is a bool and not a bool-array
# In this case, the mask of either DexDem or DexAem is
# slice(None) (all-elements selection).
if isinstance(masks[sel], slice):
if masks[sel] == slice(None):
bg[sel][ip] = bg[Ph_sel('all')][ip]
bg_err[sel][ip] = bg_err[Ph_sel('all')][ip]
continue
else:
ph_i_sel = ph_i[masks[sel][i0:i1]]
if ph_i_sel.size > 0:
if bg_auto_th:
_bg, _ = fun(ph_i_sel, **auto_th_kwargs)
th_us[sel][ip] = 1e6 * F_bg / _bg
bg[sel][ip], bg_err[sel][ip] = \
fun(ph_i_sel, tail_min_us=th_us[sel][ip], **kwargs)
if not fit_allph:
bg[Ph_sel('all')] += sum(bg[s] for s in streams_noall)
bg_err[Ph_sel('all')] += sum(bg_err[s] for s in streams_noall)
Lim.append(lim)
Ph_p.append(ph_p)
BG.append(bg)
BG_err.append(bg_err)
Th_us.append(th_us)
# Make Dict Of Lists (DOL) from Lists of Dicts
BG_dol, BG_err_dol, Th_us_dol = {}, {}, {}
for sel in self.ph_streams:
BG_dol[sel] = [bg_ch[sel] for bg_ch in BG]
BG_err_dol[sel] = [err_ch[sel] for err_ch in BG_err]
Th_us_dol[sel] = [th_ch[sel] for th_ch in Th_us]
self.add(bg=BG_dol, bg_err=BG_err_dol, bg_th_us=Th_us_dol,
Lim=Lim, Ph_p=Ph_p,
bg_fun=fun, bg_fun_name=fun.__name__,
bg_time_s=time_s, bg_ph_sel=Ph_sel('all'),
bg_auto_th=bg_auto_th, # bool, True if the using auto-threshold
)
pprint("[DONE]\n")
@property
def nperiods(self):
return len(self.bg[Ph_sel('all')][0])
@property
def bg_mean(self):
if 'bg' not in self:
raise RuntimeError('No background found, compute it first.')
if not hasattr(self, '_bg_mean'):
self._bg_mean = {k: [bg_ch.mean() for bg_ch in bg_ph_sel]
for k, bg_ph_sel in self.bg.items()}
return self._bg_mean
def recompute_bg_lim_ph_p(self, ph_sel, mute=False):
"""Recompute self.Lim and selp.Ph_p relative to ph selection `ph_sel`
`ph_sel` is a Ph_sel object selecting the timestamps in which self.Lim
and self.Ph_p are being computed.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if self.bg_ph_sel == ph_sel:
return
pprint(" - Recomputing background limits for %s ... " %
str(ph_sel), mute)
bg_time_clk = self.bg_time_s / self.clk_p
Lim, Ph_p = [], []
for ph_ch, lim in zip(self.iter_ph_times(ph_sel), self.Lim):
bins = np.arange(self.nperiods + 1) * bg_time_clk
# Note: histogram bins are half-open, e.g. [a, b)
counts, _ = np.histogram(ph_ch, bins=bins)
lim, ph_p = [], []
i1 = 0
for ip in range(self.nperiods):
i0 = i1
i1 += counts[ip]
lim.append((i0, i1 - 1))
ph_p.append((ph_ch[i0], ph_ch[i1-1]))
Lim.append(lim)
Ph_p.append(ph_p)
self.add(Lim=Lim, Ph_p=Ph_p, bg_ph_sel=ph_sel)
pprint("[DONE]\n", mute)
##
# Burst analysis methods
#
def _calc_burst_period(self):
"""Compute for each burst the "background period" `bp`.
Background periods are the time intervals on which the BG is computed.
"""
P = []
for b, lim in zip(self.mburst, self.Lim):
p = zeros(b.num_bursts, dtype=np.int16)
if b.num_bursts > 0:
istart = b.istart
for i, (l0, l1) in enumerate(lim):
p[(istart >= l0) * (istart <= l1)] = i
P.append(p)
self.add(bp=P)
def _param_as_mch_array(self, par):
"""Regardless of `par` size, return an arrays with size == nch.
if `par` is scalar the arrays repeats the calar multiple times
if `par is a list/array must be of length `nch`.
"""
assert size(par) == 1 or size(par) == self.nch
return np.repeat(par, self.nch) if size(par) == 1 else np.asarray(par)
def bg_from(self, ph_sel):
"""Return the background rates for the specified photon selection.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if ph_sel in self.ph_streams:
return self.bg[ph_sel]
elif ph_sel == Ph_sel(Dex='DAem'):
sel = Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Aex='DAem'):
sel = Ph_sel(Aex='Dem'), Ph_sel(Aex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='Dem', Aex='Dem'):
sel = Ph_sel(Dex='Dem'), Ph_sel(Aex='Dem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='Aem', Aex='Aem'):
sel = Ph_sel(Dex='Aem'), Ph_sel(Aex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='DAem', Aex='Aem'):
sel = (Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem'), Ph_sel(Aex='Aem'))
bg = [b1 + b2 + b3 for b1, b2, b3 in
zip(self.bg[sel[0]], self.bg[sel[1]], self.bg[sel[2]])]
else:
raise NotImplementedError('Photon selection %s not implemented.' %
str(ph_sel))
return bg
def _calc_T(self, m, P, F=1., ph_sel=Ph_sel('all'), c=-1):
"""If P is None use F, otherwise uses both P *and* F (F defaults to 1).
When P is None, compute the time lag T for burst search according to::
T = (m - 1 - c) / (F * bg_rate)
"""
# Regardless of F and P sizes, FF and PP are arrays with size == nch
FF = self._param_as_mch_array(F)
PP = self._param_as_mch_array(P)
if P is None:
# NOTE: the following lambda ignores Pi
find_T = lambda m, Fi, Pi, bg: (m - 1 - c) / (bg * Fi)
else:
if F != 1:
print("WARNING: BS prob. th. with modified BG rate (F=%.1f)"
% F)
find_T = lambda m, Fi, Pi, bg: find_optimal_T_bga(bg*Fi, m, 1-Pi)
TT, T, rate_th = [], [], []
bg_bs = self.bg_from(ph_sel)
for bg_ch, F_ch, P_ch in zip(bg_bs, FF, PP):
# All "T" are in seconds
Tch = find_T(m, F_ch, P_ch, bg_ch)
TT.append(Tch)
T.append(Tch.mean())
rate_th.append(np.mean(m / Tch))
self.add(TT=TT, T=T, bg_bs=bg_bs, FF=FF, PP=PP, F=F, P=P,
rate_th=rate_th)
def _burst_search_rate(self, m, L, min_rate_cps, c=-1, ph_sel=Ph_sel('all'),
compact=False, index_allph=True, verbose=True,
pure_python=False):
"""Compute burst search using a fixed minimum photon rate.
The burst starts when, for `m` consecutive photons::
(m - 1 - c) / (t[last] - t[first]) >= min_rate_cps
Arguments:
min_rate_cps (float or array): minimum photon rate for burst start.
If array is one value per channel.
"""
bsearch = _get_bsearch_func(pure_python=pure_python)
Min_rate_cps = self._param_as_mch_array(min_rate_cps)
mburst = []
T_clk = (m - 1 - c) / Min_rate_cps / self.clk_p
for ich, t_clk in enumerate(T_clk):
ph_bs = ph = self.get_ph_times(ich=ich, ph_sel=ph_sel)
if compact:
ph_bs = self._ph_times_compact(ph, ph_sel)
label = '%s CH%d' % (ph_sel, ich + 1) if verbose else None
burstarray = bsearch(ph_bs, L, m, t_clk, label=label, verbose=verbose)
if burstarray.size > 1:
bursts = bslib.Bursts(burstarray)
if compact:
bursts.recompute_times(ph, out=bursts)
else:
bursts = bslib.Bursts.empty()
mburst.append(bursts)
self.add(mburst=mburst, rate_th=Min_rate_cps, T=T_clk * self.clk_p)
if ph_sel != Ph_sel('all') and index_allph:
self._fix_mburst_from(ph_sel=ph_sel)
def _burst_search_TT(self, m, L, ph_sel=Ph_sel('all'), verbose=True,
compact=False, index_allph=True, pure_python=False,
mute=False):
"""Compute burst search with params `m`, `L` on ph selection `ph_sel`
Requires the list of arrays `self.TT` with the max time-thresholds in
the different burst periods for each channel (use `._calc_T()`).
"""
bsearch = _get_bsearch_func(pure_python=pure_python)
self.recompute_bg_lim_ph_p(ph_sel=ph_sel, mute=mute)
MBurst = []
label = ''
for ich, T in enumerate(self.TT):
ph_bs = ph = self.get_ph_times(ich=ich, ph_sel=ph_sel)
if compact:
ph_bs = self._ph_times_compact(ph, ph_sel)
burstarray_ch_list = []
Tck = T / self.clk_p
for ip, (l0, l1) in enumerate(self.Lim[ich]):
if verbose:
label = '%s CH%d-%d' % (ph_sel, ich + 1, ip)
burstarray = bsearch(ph_bs, L, m, Tck[ip], slice_=(l0, l1 + 1),
label=label, verbose=verbose)
if burstarray.size > 1:
burstarray_ch_list.append(burstarray)
if len(burstarray_ch_list) > 0:
data = np.vstack(burstarray_ch_list)
bursts = bslib.Bursts(data)
if compact:
bursts.recompute_times(ph, out=bursts)
else:
bursts = bslib.Bursts.empty()
MBurst.append(bursts)
self.add(mburst=MBurst)
if ph_sel != Ph_sel('all') and index_allph:
# Convert the burst data to be relative to ph_times_m.
# Convert both Lim/Ph_p and mburst, as they are both needed
# to compute `.bp`.
self.recompute_bg_lim_ph_p(ph_sel=Ph_sel('all'), mute=mute)
self._fix_mburst_from(ph_sel=ph_sel, mute=mute)
def _fix_mburst_from(self, ph_sel, mute=False):
"""Convert burst data from any ph_sel to 'all' timestamps selection.
"""
assert isinstance(ph_sel, Ph_sel) and not self._is_allph(ph_sel)
pprint(' - Fixing burst data to refer to ph_times_m ... ', mute)
for bursts, mask in zip(self.mburst,
self.iter_ph_masks(ph_sel=ph_sel)):
bursts.recompute_index_expand(mask, out=bursts)
pprint('[DONE]\n', mute)
def burst_search(self, L=None, m=10, F=6., P=None, min_rate_cps=None,
ph_sel=Ph_sel('all'), compact=False, index_allph=True,
c=-1, computefret=True, max_rate=False, dither=False,
pure_python=False, verbose=False, mute=False, pax=False):
"""Performs a burst search with specified parameters.
This method performs a sliding-window burst search without
binning the timestamps. The burst starts when the rate of `m`
photons is above a minimum rate, and stops when the rate falls below
the threshold. The result of the burst search is stored in the
`mburst` attribute (a list of Bursts objects, one per channel)
containing start/stop times and indexes. By default, after burst
search, this method computes donor and acceptor counts, it applies
burst corrections (background, leakage, etc...) and computes
E (and S in case of ALEX). You can skip these steps by passing
`computefret=False`.
The minimum rate can be explicitly specified with the `min_rate_cps`
argument, or computed as a function of the background rate with the
`F` argument.
Parameters:
m (int): number of consecutive photons used to compute the
photon rate. Typical values 5-20. Default 10.
L (int or None): minimum number of photons in burst. If None
(default) L = m is used.
F (float): defines how many times higher than the background rate
is the minimum rate used for burst search
(`min rate = F * bg. rate`), assuming that `P = None` (default).
Typical values are 3-9. Default 6.
P (float): threshold for burst detection expressed as a
probability that a detected bursts is not due to a Poisson
background. If not None, `P` overrides `F`. Note that the
background process is experimentally super-Poisson so this
probability is not physically very meaningful. Using this
argument is discouraged.
min_rate_cps (float or list/array): minimum rate in cps for burst
start. If not None, it has the precedence over `P` and `F`.
If non-scalar, contains one rate per each multispot channel.
Typical values range from 20e3 to 100e3.
ph_sel (Ph_sel object): defines the "photon selection" (or stream)
to be used for burst search. Default: all photons.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
index_allph (bool): if True (default), the indexes of burst start
and stop (`istart`, `istop`) are relative to the full
timestamp array. If False, the indexes are relative to
timestamps selected by the `ph_sel` argument.
c (float): correction factor used in the rate vs time-lags relation.
`c` affects the computation of the burst-search parameter `T`.
When `F` is not None, `T = (m - 1 - c) / (F * bg_rate)`.
When using `min_rate_cps`, `T = (m - 1 - c) / min_rate_cps`.
computefret (bool): if True (default) compute donor and acceptor
counts, apply corrections (background, leakage, direct
excitation) and compute E (and S). If False, skip all these
steps and stop just after the initial burst search.
max_rate (bool): if True compute the max photon rate inside each
burst using the same `m` used for burst search. If False
(default) skip this step.
dither (bool): if True applies dithering corrections to burst
counts. Default False. See :meth:`Data.dither`.
pure_python (bool): if True, uses the pure python functions even
when optimized Cython functions are available.
pax (bool): this has effect only if measurement is PAX.
In this case, when True computes E using a PAX-enhanced
formula: ``(2 na) / (2 na + nd + nda)``.
Otherwise use the usual usALEX formula: ``na / na + nd``.
Quantities `nd`/`na` are D/A burst counts during D excitation
period, while `nda` is D emission during A excitation period.
Note:
when using `P` or `F` the background rates are needed, so
`.calc_bg()` must be called before the burst search.
Example:
d.burst_search(m=10, F=6)
Returns:
None, all the results are saved in the `Data` object.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if compact:
self._assert_compact(ph_sel)
pprint(" - Performing burst search (verbose=%s) ..." % verbose, mute)
# Erase any previous burst data
self.delete_burst_data()
if L is None:
L = m
if min_rate_cps is not None:
# Saves rate_th in self
self._burst_search_rate(m=m, L=L, min_rate_cps=min_rate_cps, c=c,
ph_sel=ph_sel, compact=compact,
index_allph=index_allph,
verbose=verbose, pure_python=pure_python)
else:
# Compute TT, saves P and F in self
self._calc_T(m=m, P=P, F=F, ph_sel=ph_sel, c=c)
# Use TT and compute mburst
self._burst_search_TT(L=L, m=m, ph_sel=ph_sel, compact=compact,
index_allph=index_allph, verbose=verbose,
pure_python=pure_python, mute=mute)
pprint("[DONE]\n", mute)
pprint(" - Calculating burst periods ...", mute)
self._calc_burst_period() # writes bp
pprint("[DONE]\n", mute)
# (P, F) or rate_th are saved in _calc_T() or _burst_search_rate()
self.add(m=m, L=L, ph_sel=ph_sel)
# The correction flags are both set here and in calc_ph_num() so that
# they are always consistent. Case 1: we perform only burst search
# (with no call to calc_ph_num). Case 2: we re-call calc_ph_num()
# without doing a new burst search
self.add(bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
self._burst_search_postprocess(
computefret=computefret, max_rate=max_rate, dither=dither,
pure_python=pure_python, mute=mute, pax=pax)
def _burst_search_postprocess(self, computefret, max_rate, dither,
pure_python, mute, pax):
if computefret:
pprint(" - Counting D and A ph and calculating FRET ... \n", mute)
self.calc_fret(count_ph=True, corrections=True, dither=dither,
mute=mute, pure_python=pure_python, pax=pax)
pprint(" [DONE Counting D/A]\n", mute)
if max_rate:
pprint(" - Computing max rates in burst ...", mute)
self.calc_max_rate(m=self.m)
pprint("[DONE]\n", mute)
def calc_ph_num(self, alex_all=False, pure_python=False):
"""Computes number of D, A (and AA) photons in each burst.
Arguments:
alex_all (bool): if True and self.ALEX is True, computes also the
donor channel photons during acceptor excitation (`nda`)
pure_python (bool): if True, uses the pure python functions even
when the optimized Cython functions are available.
Returns:
Saves `nd`, `na`, `nt` (and eventually `naa`, `nda`) in self.
Returns None.
"""
mch_count_ph_in_bursts = _get_mch_count_ph_in_bursts_func(pure_python)
if not self.alternated:
nt = [b.counts.astype(float) if b.num_bursts > 0 else np.array([])
for b in self.mburst]
A_em = [self.get_A_em(ich) for ich in range(self.nch)]
if isinstance(A_em[0], slice):
# This is to support the case of A-only or D-only data
n0 = [np.zeros(mb.num_bursts) for mb in self.mburst]
if A_em[0] == slice(None):
nd, na = n0, nt # A-only case
elif A_em[0] == slice(0):
nd, na = nt, n0 # D-only case
else:
# This is the usual case with photons in both D and A channels
na = mch_count_ph_in_bursts(self.mburst, A_em)
nd = [t - a for t, a in zip(nt, na)]
assert (nt[0] == na[0] + nd[0]).all()
else:
# The "new style" would be:
#Mask = [m for m in self.iter_ph_masks(Ph_sel(Dex='Dem'))]
Mask = [d_em * d_ex for d_em, d_ex in zip(self.D_em, self.D_ex)]
nd = mch_count_ph_in_bursts(self.mburst, Mask)
Mask = [a_em * d_ex for a_em, d_ex in zip(self.A_em, self.D_ex)]
na = mch_count_ph_in_bursts(self.mburst, Mask)
Mask = [a_em * a_ex for a_em, a_ex in zip(self.A_em, self.A_ex)]
naa = mch_count_ph_in_bursts(self.mburst, Mask)
self.add(naa=naa)
if alex_all or 'PAX' in self.meas_type:
Mask = [d_em * a_ex for d_em, a_ex in zip(self.D_em, self.A_ex)]
nda = mch_count_ph_in_bursts(self.mburst, Mask)
self.add(nda=nda)
if self.ALEX:
nt = [d + a + aa for d, a, aa in zip(nd, na, naa)]
assert (nt[0] == na[0] + nd[0] + naa[0]).all()
elif 'PAX' in self.meas_type:
nt = [d + a + da + aa for d, a, da, aa in zip(nd, na, nda, naa)]
assert (nt[0] == na[0] + nd[0] + nda[0] + naa[0]).all()
# This is a copy of na which will never be corrected
# (except for background). It is used to compute the
# equivalent of naa for PAX:
# naa~ = naa - nar
# where naa~ is the A emission due to direct excitation
# by A laser during D+A-excitation,
# nar is the uncorrected A-channel signal during D-excitation,
# and naa is the A-channel signal during D+A excitation.
nar = [a.copy() for a in na]
self.add(nar=nar)
self.add(nd=nd, na=na, nt=nt,
bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
def fuse_bursts(self, ms=0, process=True, mute=False):
"""Return a new :class:`Data` object with nearby bursts fused together.
Arguments:
ms (float): fuse all burst separated by less than `ms` millisecs.
If < 0 no burst is fused. Note that with ms = 0, overlapping
bursts are fused.
process (bool): if True (default), reprocess the burst data in
the new object applying corrections and computing FRET.
mute (bool): if True suppress any printed output.
"""
if ms < 0:
return self
mburst = mch_fuse_bursts(self.mburst, ms=ms, clk_p=self.clk_p)
new_d = Data(**self)
for k in ['E', 'S', 'nd', 'na', 'naa', 'nda', 'nar', 'nt', 'lsb', 'bp']:
if k in new_d:
new_d.delete(k)
new_d.add(bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
new_d.add(mburst=mburst, fuse=ms)
if 'bg' in new_d:
new_d._calc_burst_period()
if process:
pprint(" - Counting D and A ph and calculating FRET ... \n", mute)
new_d.calc_fret(count_ph=True, corrections=True,
dither=self.dithering, mute=mute, pax=self.pax)
pprint(" [DONE Counting D/A and FRET]\n", mute)
return new_d
##
# Burst selection and filtering
#
def select_bursts(self, filter_fun, negate=False, computefret=True,
args=None, **kwargs):
"""Return an object with bursts filtered according to `filter_fun`.
This is the main method to select bursts according to different
criteria. The selection rule is defined by the selection function
`filter_fun`. FRETBursts provides a several predefined selection
functions see :ref:`burst_selection`. New selection
functions can be defined and passed to this method to implement
arbitrary selection rules.
Arguments:
filter_fun (fuction): function used for burst selection
negate (boolean): If True, negates (i.e. take the complementary)
of the selection returned by `filter_fun`. Default `False`.
computefret (boolean): If True (default) recompute donor and
acceptor counts, corrections and FRET quantities (i.e. E, S)
in the new returned object.
args (tuple or None): positional arguments for `filter_fun()`
kwargs:
Additional keyword arguments passed to `filter_fun()`.
Returns:
A new :class:`Data` object containing only the selected bursts.
Note:
In order to save RAM, the timestamp arrays (`ph_times_m`)
of the new Data() points to the same arrays of the original
Data(). Conversely, all the bursts data (`mburst`, `nd`, `na`,
etc...) are new distinct objects.
"""
Masks, str_sel = self.select_bursts_mask(filter_fun, negate=negate,
return_str=True, args=args,
**kwargs)
d_sel = self.select_bursts_mask_apply(Masks, computefret=computefret,
str_sel=str_sel)
return d_sel
def select_bursts_mask(self, filter_fun, negate=False, return_str=False,
args=None, **kwargs):
"""Returns mask arrays to select bursts according to `filter_fun`.
The function `filter_fun` is called to compute the mask arrays for
each channel.
This method is useful when you want to apply a selection from one
object to a second object. Otherwise use :meth:`Data.select_bursts`.
Arguments:
filter_fun (fuction): function used for burst selection
negate (boolean): If True, negates (i.e. take the complementary)
of the selection returned by `filter_fun`. Default `False`.
return_str: if True return, for each channel, a tuple with
a bool array and a string that can be added to the measurement
name to indicate the selection. If False returns only
the bool array. Default False.
args (tuple or None): positional arguments for `filter_fun()`
kwargs:
Additional keyword arguments passed to `filter_fun()`.
Returns:
A list of boolean arrays (one per channel) that define the burst
selection. If `return_str` is True returns a list of tuples, where
each tuple is a bool array and a string.
See also:
:meth:`Data.select_bursts`, :meth:`Data.select_bursts_mask_apply`
"""
# Create the list of bool masks for the bursts selection
if args is None:
args = tuple()
M = [filter_fun(self, i, *args, **kwargs) for i in range(self.nch)]
# Make sure the selection function has the right return signature
msg = 'The second argument returned by `%s` must be a string.'
assert np.all([isinstance(m[1], str) for m in M]), msg % filter_fun
# Make sure all boolean masks have the right size
msg = ("The size of boolean masks returned by `%s` needs to match "
"the number of bursts.")
assert np.all([m[0].size == n for m, n in zip(M, self.num_bursts)]), (
msg % filter_fun)
Masks = [-m[0] if negate else m[0] for m in M]
str_sel = M[0][1]
if return_str:
return Masks, str_sel
else:
return Masks
def select_bursts_mask_apply(self, masks, computefret=True, str_sel=''):
"""Returns a new Data object with bursts selected according to `masks`.
This method select bursts using a list of boolean arrays as input.
Since the user needs to create the boolean arrays first, this method
is useful when experimenting with new selection criteria that don't
have a dedicated selection function. Usually, however, it is easier
to select bursts through :meth:`Data.select_bursts` (using a
selection function).
Arguments:
masks (list of arrays): each element in this list is a boolean
array that selects bursts in a channel.
computefret (boolean): If True (default) recompute donor and
acceptor counts, corrections and FRET quantities (i.e. E, S)
in the new returned object.
Returns:
A new :class:`Data` object containing only the selected bursts.
Note:
In order to save RAM, the timestamp arrays (`ph_times_m`)
of the new Data() points to the same arrays of the original
Data(). Conversely, all the bursts data (`mburst`, `nd`, `na`,
etc...) are new distinct objects.
See also:
:meth:`Data.select_bursts`, :meth:`Data.select_mask`
"""
# Attributes of ds point to the same objects of self
ds = Data(**self)
##Copy the per-burst fields that must be filtered
used_fields = [field for field in Data.burst_fields if field in self]
for name in used_fields:
# Recreate the current attribute as a new list to avoid modifying
# the old list that is also in the original object.
# The list is initialized with empty arrays because this is the
# valid value when a ch has no bursts.
empty = bslib.Bursts.empty() if name == 'mburst' else np.array([])
ds.add(**{name: [empty] * self.nch})
# Assign the new data
for ich, mask in enumerate(masks):
if self[name][ich].size == 0:
continue # -> no bursts in ch
# Note that boolean masking implies numpy array copy
# On the contrary slicing only makes a new view of the array
ds[name][ich] = self[name][ich][mask]
# Recompute E and S
if computefret:
ds.calc_fret(count_ph=False, pax=self.pax)
# Add the annotation about the filter function
ds.s = list(self.s + [str_sel]) # using append would modify also self
return ds
##
# Burst corrections
#
def background_correction(self, relax_nt=False, mute=False):
"""Apply background correction to burst sizes (nd, na,...)
"""
if self.bg_corrected:
return -1
pprint(" - Applying background correction.\n", mute)
self.add(bg_corrected=True)
for ich, bursts in enumerate(self.mburst):
if bursts.num_bursts == 0:
continue # if no bursts skip this ch
period = self.bp[ich]
nd, na, bg_d, bg_a, width = self.expand(ich, width=True)
nd -= bg_d
na -= bg_a
if 'nar' in self:
# Apply background correction to PAX field nar
self.nar[ich][:] = na
if relax_nt:
# This does not guarantee that nt = nd + na
self.nt[ich] -= self.bg_from(Ph_sel('all'))[ich][period] * width
else:
self.nt[ich] = nd + na
if self.alternated:
bg_aa = self.bg_from(Ph_sel(Aex='Aem'))
self.naa[ich] -= bg_aa[ich][period] * width
if 'nda' in self:
bg_da = self.bg_from(Ph_sel(Aex='Dem'))
self.nda[ich] -= bg_da[ich][period] * width
self.nt[ich] += self.naa[ich]
if 'PAX' in self.meas_type:
self.nt[ich] += self.nda[ich]
def leakage_correction(self, mute=False):
"""Apply leakage correction to burst sizes (nd, na,...)
"""
if self.leakage_corrected:
return -1
elif self.leakage != 0:
pprint(" - Applying leakage correction.\n", mute)
Lk = self.get_leakage_array()
for i, num_bursts in enumerate(self.num_bursts):
if num_bursts == 0:
continue # if no bursts skip this ch
self.na[i] -= self.nd[i] * Lk[i]
self.nt[i] = self.nd[i] + self.na[i]
if self.ALEX:
self.nt[i] += self.naa[i]
elif 'PAX' in self.meas_type:
self.nt[i] += (self.nda[i] + self.naa[i])
self.add(leakage_corrected=True)
def direct_excitation_correction(self, mute=False):
"""Apply direct excitation correction to bursts (ALEX-only).
The applied correction is: na -= naa*dir_ex
"""
if self.dir_ex_corrected:
return -1
elif self.dir_ex != 0:
pprint(" - Applying direct excitation correction.\n", mute)
for i, num_bursts in enumerate(self.num_bursts):
if num_bursts == 0:
continue # if no bursts skip this ch
naa = self.naa[i]
if 'PAX' in self.meas_type:
naa = naa - self.nar[i] # do not modify inplace
self.na[i] -= naa * self.dir_ex
self.nt[i] = self.nd[i] + self.na[i]
if self.ALEX:
self.nt[i] += self.naa[i]
elif 'PAX' in self.meas_type:
self.nt[i] += (self.nda[i] + self.naa[i])
self.add(dir_ex_corrected=True)
def dither(self, lsb=2, mute=False):
"""Add dithering (uniform random noise) to burst counts (nd, na,...).
The dithering amplitude is the range -0.5*lsb .. 0.5*lsb.
"""
if self.dithering:
return -1
pprint(" - Applying burst-size dithering.\n", mute)
self.add(dithering=True)
for nd, na in zip(self.nd, self.na):
nd += lsb * (np.random.rand(nd.size) - 0.5)
na += lsb * (np.random.rand(na.size) - 0.5)
if self.alternated:
for naa in self.naa:
naa += lsb * (np.random.rand(naa.size) - 0.5)
if 'nda' in self:
for nda in self.nda:
nda += lsb * (np.random.rand(nda.size) - 0.5)
self.add(lsb=lsb)
def calc_chi_ch(self, E):
"""Calculate the gamma correction prefactor factor `chi_ch` (array).
Computes `chi_ch`, a channel-dependent prefactor for gamma used
to correct dispersion of E across channels.
Returns:
array of `chi_ch` correction factors (one per spot).
To apply the correction assign the returned array to `Data.chi_ch`.
Upon assignment E values for all bursts will be corrected.
"""
chi_ch = (1 / E.mean() - 1) / (1 / E - 1)
return chi_ch
def corrections(self, mute=False):
"""Apply corrections on burst-counts: nd, na, nda, naa.
The corrections are: background, leakage (or bleed-through) and
direct excitation (dir_ex).
"""
self.background_correction(mute=mute)
self.leakage_correction(mute=mute)
if self.alternated:
self.direct_excitation_correction(mute=mute)
def _update_corrections(self):
"""Recompute corrections whose flag is True.
Checks the flags .bg_corrected, .leakage_corrected, .dir_ex_corrected,
.dithering and recomputes the correction if the corresponding flag
is True (i.e. if the correction was already applied).
Note that this method is not used for gamma and beta corrections
because these do not affect the `nd`, `na` and `naa` quantities but
are only applied when computing E, S and corrected size.
Differently from :meth:`corrections`, this allows to recompute
corrections that have already been applied.
"""
if 'mburst' not in self:
return # no burst search performed yet
old_bg_corrected = self.bg_corrected
old_leakage_corrected = self.leakage_corrected
old_dir_ex_corrected = self.dir_ex_corrected
old_dithering = self.dithering
self.calc_ph_num() # recompute uncorrected na, nd, nda, naa
if old_bg_corrected:
self.background_correction()
if old_leakage_corrected:
self.leakage_correction()
if old_dir_ex_corrected:
self.direct_excitation_correction()
if old_dithering:
self.dither(self.lsb)
# Recompute E and S with no corrections (because already applied)
self.calc_fret(count_ph=False, corrections=False, pax=self.pax)
@property
def leakage(self):
"""Spectral leakage (bleed-through) of D emission in the A channel.
"""
return self._leakage
@leakage.setter
def leakage(self, leakage):
self._update_leakage(leakage)
def _update_leakage(self, leakage):
"""Apply/update leakage (or bleed-through) correction.
"""
assert (np.size(leakage) == 1) or (np.size(leakage) == self.nch)
self.add(_leakage=np.asfarray(leakage), leakage_corrected=True)
self._update_corrections()
@property
def dir_ex(self):
"""Direct excitation correction factor."""
return self._dir_ex
@dir_ex.setter
def dir_ex(self, value):
self._update_dir_ex(value)
def _update_dir_ex(self, dir_ex):
"""Apply/update direct excitation correction with value `dir_ex`.
"""
assert np.size(dir_ex) == 1
self.add(_dir_ex=float(dir_ex), dir_ex_corrected=True)
self._update_corrections()
@property
def beta(self):
"""Beta factor used to correct S (compensates Dex and Aex unbalance).
"""
return self._beta
@beta.setter
def beta(self, value):
self._update_beta(value)
def _update_beta(self, beta):
"""Change the `beta` value and recompute E and S."""
assert np.size(beta) == 1
self.add(_beta=float(beta))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
@property
def chi_ch(self):
"""Per-channel relative gamma factor."""
return self._chi_ch
@chi_ch.setter
def chi_ch(self, value):
self._update_chi_ch(value)
def _update_chi_ch(self, chi_ch):
"""Change the `chi_ch` value and recompute E and S."""
msg = 'chi_ch is a per-channel correction and must have size == nch.'
assert np.size(chi_ch) == self.nch, ValueError(msg)
self.add(_chi_ch=np.asfarray(chi_ch))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
@property
def gamma(self):
"""Gamma correction factor (compensates DexDem and DexAem unbalance).
"""
return self._gamma
@gamma.setter
def gamma(self, value):
self._update_gamma(value)
def _update_gamma(self, gamma):
"""Change the `gamma` value and recompute E and S."""
assert (np.size(gamma) == 1) or (np.size(gamma) == self.nch)
self.add(_gamma=np.asfarray(gamma))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
def get_gamma_array(self):
"""Get the array of gamma factors, one per ch.
It always returns an array of gamma factors regardless of
whether `self.gamma` is scalar or array.
Each element of the returned array is multiplied by `chi_ch`.
"""
gamma = self.gamma
G = np.repeat(gamma, self.nch) if np.size(gamma) == 1 else gamma
G *= self.chi_ch
return G
def get_leakage_array(self):
"""Get the array of leakage coefficients, one per ch.
It always returns an array of leakage coefficients regardless of
whether `self.leakage` is scalar or array.
Each element of the returned array is multiplied by `chi_ch`.
"""
leakage = self.leakage
Lk = np.r_[[leakage] * self.nch] if np.size(leakage) == 1 else leakage
Lk *= self.chi_ch
return Lk
##
# Methods to compute burst quantities: FRET, S, SBR, max_rate, etc ...
#
def calc_sbr(self, ph_sel=Ph_sel('all'), gamma=1.):
"""Return Signal-to-Background Ratio (SBR) for each burst.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection
for which to compute the sbr. Changes the photons used for
burst size and the corresponding background rate. Valid values
here are Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem').
See :mod:`fretbursts.ph_sel` for details.
gamma (float): gamma value used to compute corrected burst size
in the case `ph_sel` is Ph_sel('all'). Ignored otherwise.
Returns:
A list of arrays (one per channel) with one value per burst.
The list is also saved in `sbr` attribute.
"""
ph_sel = self._fix_ph_sel(ph_sel)
sbr = []
for ich, mb in enumerate(self.mburst):
if mb.num_bursts == 0:
sbr.append(np.array([]))
continue # if no bursts skip this ch
nd, na, bg_d, bg_a = self.expand(ich)
nt = self.burst_sizes_ich(ich=ich, gamma=gamma)
signal = {Ph_sel('all'): nt,
Ph_sel(Dex='Dem'): nd, Ph_sel(Dex='Aem'): na}
background = {Ph_sel('all'): bg_d + bg_a,
Ph_sel(Dex='Dem'): bg_d, Ph_sel(Dex='Aem'): bg_a}
sbr.append(signal[ph_sel] / background[ph_sel])
self.add(sbr=sbr)
return sbr
def calc_burst_ph_func(self, func, func_kw, ph_sel=Ph_sel('all'),
compact=False, ich=0):
"""Evaluate a scalar function from photons in each burst.
This method allow calling an arbitrary function on the photon
timestamps of each burst. For example if `func` is `np.mean` it
computes the mean time in each bursts.
Arguments:
func (callable): function that takes as first argument an array of
timestamps for one burst.
func_kw (callable): additional arguments to be passed `func`.
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
Returns:
A list (on element per channel) array. The array size is equal to
the number of bursts in the corresponding channel.
"""
if compact:
self._assert_compact(ph_sel)
kwargs = dict(func=func, func_kw=func_kw, compact=compact)
if self.alternated:
kwargs.update(alex_period=self.alex_period)
if compact:
kwargs.update(excitation_width=self._excitation_width(ph_sel))
results_mch = [burst_ph_stats(ph, bursts, mask=mask, **kwargs)
for ph, mask, bursts in
zip(self.iter_ph_times(),
self.iter_ph_masks(ph_sel=ph_sel),
self.mburst)]
return results_mch
def calc_max_rate(self, m, ph_sel=Ph_sel('all'), compact=False,
c=phrates.default_c):
"""Compute the max m-photon rate reached in each burst.
Arguments:
m (int): number of timestamps to use to compute the rate.
As for burst search, typical values are 5-20.
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
c (float): this parameter is used in the definition of the
rate estimator which is `(m - 1 - c) / t[last] - t[first]`.
For more details see :func:`.phtools.phrates.mtuple_rates`.
"""
ph_sel = self._fix_ph_sel(ph_sel)
Max_Rate = self.calc_burst_ph_func(func=phrates.mtuple_rates_max,
func_kw=dict(m=m, c=c),
ph_sel=ph_sel, compact=compact)
Max_Rate = [mr / self.clk_p - bg[bp] for bp, bg, mr in
zip(self.bp, self.bg_from(ph_sel), Max_Rate)]
params = dict(m=m, ph_sel=ph_sel, compact=compact)
self.add(max_rate=Max_Rate, max_rate_params=params)
def calc_fret(self, count_ph=False, corrections=True, dither=False,
mute=False, pure_python=False, pax=False):
"""Compute FRET (and stoichiometry if ALEX) for each burst.
This is an high-level functions that can be run after burst search.
By default, it will count Donor and Acceptor photons, perform
corrections (background, leakage), and compute gamma-corrected
FRET efficiencies (and stoichiometry if ALEX).
Arguments:
count_ph (bool): if True (default), calls :meth:`calc_ph_num` to
counts Donor and Acceptor photons in each bursts
corrections (bool): if True (default), applies background and
bleed-through correction to burst data
dither (bool): whether to apply dithering to burst size.
Default False.
mute (bool): whether to mute all the printed output. Default False.
pure_python (bool): if True, uses the pure python functions even
when the optimized Cython functions are available.
pax (bool): this has effect only if measurement is PAX.
In this case, when True computes E using a PAX-enhanced
formula: ``(2 na) / (2 na + nd + nda)``.
Otherwise use the usual usALEX formula: ``na / na + nd``.
Quantities `nd`/`na` are D/A burst counts during D excitation
period, while `nda` is D emission during A excitation period.
Returns:
None, all the results are saved in the object.
"""
if count_ph:
self.calc_ph_num(pure_python=pure_python, alex_all=True)
if dither:
self.dither(mute=mute)
if corrections:
self.corrections(mute=mute)
self._calculate_fret_eff(pax=pax)
if self.alternated:
self._calculate_stoich(pax=pax)
#self._calc_alex_hist()
for attr in ('ES_binwidth', 'ES_hist', 'E_fitter', 'S_fitter'):
# E_fitter and S_fitter are only attributes
# so we cannot use the membership syntax (attr in self)
if hasattr(self, attr):
self.delete(attr, warning=False)
def _aex_fraction(self):
"""Proportion of Aex period versus Dex + Aex."""
assert self.alternated
D_ON, A_ON = self.D_ON, self.A_ON
return ((A_ON[1] - A_ON[0]) /
(A_ON[1] - A_ON[0] + D_ON[1] - D_ON[0]))
def _aex_dex_ratio(self):
"""Ratio of Aex and Dex period durations."""
assert self.alternated
D_ON, A_ON = self.D_ON, self.A_ON
return (A_ON[1] - A_ON[0]) / (D_ON[1] - D_ON[0])
def _calculate_fret_eff(self, pax=False):
"""Compute FRET efficiency (`E`) for each burst."""
G = self.get_gamma_array()
if not pax:
E = [na / (g * nd + na) for nd, na, g in zip(self.nd, self.na, G)]
else:
alpha = 1 - self._aex_fraction()
E = [(na / alpha) / (g * (nd + nda) + (na / alpha))
for nd, na, nda, g in zip(self.nd, self.na, self.nda, G)]
self.add(E=E, pax=pax)
def _calculate_stoich(self, pax=False):
"""Compute "stoichiometry" (the `S` parameter) for each burst."""
G = self.get_gamma_array()
naa = self.naa
if 'PAX' in self.meas_type:
naa = [self._get_naa_ich(i) for i in range(self.nch)]
if not pax:
S = [(g * d + a) / (g * d + a + aa / self.beta)
for d, a, aa, g in zip(self.nd, self.na, naa, G)]
else:
# This is a PAX-enhanced formula which uses information
# from both alternation periods in order to compute S
alpha = 1 - self._aex_fraction()
S = [(g * (d + da) + a / alpha) /
(g * (d + da) + a / alpha + aa / (alpha * self.beta))
for d, a, da, aa, g in
zip(self.nd, self.na, self.nda, naa, G)]
self.add(S=S)
def _calc_alex_hist(self, binwidth=0.05):
"""Compute the ALEX histogram with given bin width `bin_step`"""
if 'ES_binwidth' in self and self.ES_binwidth == binwidth:
return
ES_hist_tot = [ES_histog(E, S, binwidth) for E, S in
zip(self.E, self.S)]
E_bins, S_bins = ES_hist_tot[0][1], ES_hist_tot[0][2]
ES_hist = [h[0] for h in ES_hist_tot]
E_ax = E_bins[:-1] + 0.5 * binwidth
S_ax = S_bins[:-1] + 0.5 * binwidth
self.add(ES_hist=ES_hist, E_bins=E_bins, S_bins=S_bins,
E_ax=E_ax, S_ax=S_ax, ES_binwidth=binwidth)
##
# Methods for measurement info
#
def status(self, add="", noname=False):
"""Return a string with burst search, corrections and selection info.
"""
name = "" if noname else self.name
s = name
if 'L' in self: # burst search has been done
if 'rate_th' in self:
s += " BS_%s L%d m%d MR%d" % (self.ph_sel, self.L, self.m,
np.mean(self.rate_th) * 1e-3)
else:
P_str = '' if self.P is None else ' P%s' % self.P
s += " BS_%s L%d m%d F%.1f%s" % \
(self.ph_sel, self.L, self.m, np.mean(self.F), P_str)
s += " G%.3f" % np.mean(self.gamma)
if 'bg_fun' in self: s += " BG%s" % self.bg_fun.__name__[:-4]
if 'bg_time_s' in self: s += "-%ds" % self.bg_time_s
if 'fuse' in self: s += " Fuse%.1fms" % self.fuse
if 'bg_corrected' in self and self.bg_corrected:
s += " bg"
if 'leakage_corrected' in self and self.leakage_corrected:
s += " Lk%.3f" % np.mean(self.leakage*100)
if 'dir_ex_corrected' in self and self.dir_ex_corrected:
s += " dir%.1f" % (self.dir_ex*100)
if 'dithering' in self and self.dithering:
s += " Dith%d" % self.lsb
if 's' in self: s += ' '.join(self.s)
return s + add
@property
def name(self):
"""Measurement name: last subfolder + file name with no extension."""
if not hasattr(self, '_name'):
basename = str(os.path.splitext(os.path.basename(self.fname))[0])
name = basename
last_dir = str(os.path.basename(os.path.dirname(self.fname)))
if len(last_dir) > 0:
name = '_'.join([last_dir, basename])
self.add(_name=name)
return self._name
@name.setter
def name(self, value):
self.add(_name=value)
def Name(self, add=""):
"""Return short filename + status information."""
n = self.status(add=add)
return n
def __repr__(self):
return self.status()
def stats(self, string=False):
"""Print common statistics (BG rates, #bursts, mean size, ...)"""
s = print_burst_stats(self)
if string:
return s
else:
print(s)
##
# FRET fitting methods
#
def fit_E_m(self, E1=-1, E2=2, weights='size', gamma=1.):
"""Fit E in each channel with the mean using bursts in [E1,E2] range.
Note:
This two fitting are equivalent (but the first is much faster)::
fit_E_m(weights='size')
fit_E_minimize(kind='E_size', weights='sqrt')
However `fit_E_minimize()` does not provide a model curve.
"""
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res, fit_model_F = zeros((self.nch, 2)), zeros(self.nch)
for ich, (nd, na, E, mask) in enumerate(zip(
self.nd, self.na, self.E, Mask)):
w = fret_fit.get_weights(nd[mask], na[mask],
weights=weights, gamma=gamma)
# Compute weighted mean
fit_res[ich, 0] = np.dot(w, E[mask])/w.sum()
# Compute weighted variance
fit_res[ich, 1] = np.sqrt(
np.dot(w, (E[mask] - fit_res[ich, 0])**2)/w.sum())
fit_model_F[ich] = mask.sum()/mask.size
fit_model = lambda x, p: SS.norm.pdf(x, p[0], p[1])
self.add(fit_E_res=fit_res, fit_E_name='Moments',
E_fit=fit_res[:, 0], fit_E_curve=True, fit_E_E1=E1,
fit_E_E2=E2, fit_E_model=fit_model,
fit_E_model_F=fit_model_F)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_ML_poiss(self, E1=-1, E2=2, method=1, **kwargs):
"""ML fit for E modeling size ~ Poisson, using bursts in [E1,E2] range.
"""
assert method in [1, 2, 3]
fit_fun = {1: fret_fit.fit_E_poisson_na, 2: fret_fit.fit_E_poisson_nt,
3: fret_fit.fit_E_poisson_nd}
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = zeros(self.nch)
for ich, mask in zip(range(self.nch), Mask):
nd, na, bg_d, bg_a = self.expand(ich)
bg_x = bg_d if method == 3 else bg_a
fit_res[ich] = fit_fun[method](nd[mask], na[mask],
bg_x[mask], **kwargs)
self.add(fit_E_res=fit_res, fit_E_name='MLE: na ~ Poisson',
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_ML_binom(self, E1=-1, E2=2, **kwargs):
"""ML fit for E modeling na ~ Binomial, using bursts in [E1,E2] range.
"""
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = np.array([fret_fit.fit_E_binom(_d[mask], _a[mask], **kwargs)
for _d, _a, mask in zip(self.nd, self.na, Mask)])
self.add(fit_E_res=fit_res, fit_E_name='MLE: na ~ Binomial',
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_minimize(self, kind='slope', E1=-1, E2=2, **kwargs):
"""Fit E using method `kind` ('slope' or 'E_size') and bursts in [E1,E2]
If `kind` is 'slope' the fit function is fret_fit.fit_E_slope()
If `kind` is 'E_size' the fit function is fret_fit.fit_E_E_size()
Additional arguments in `kwargs` are passed to the fit function.
"""
assert kind in ['slope', 'E_size']
# Build a dictionary fun_d so we'll call the function fun_d[kind]
fun_d = dict(slope=fret_fit.fit_E_slope,
E_size=fret_fit.fit_E_E_size)
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = np.array([fun_d[kind](nd[mask], na[mask], **kwargs)
for nd, na, mask in
zip(self.nd, self.na, Mask)])
fit_name = dict(slope='Linear slope fit', E_size='E_size fit')
self.add(fit_E_res=fit_res, fit_E_name=fit_name[kind],
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_two_gauss_EM(self, fit_func=two_gaussian_fit_EM,
weights='size', gamma=1., **kwargs):
"""Fit the E population to a Gaussian mixture model using EM method.
Additional arguments in `kwargs` are passed to the fit_func().
"""
fit_res = zeros((self.nch, 5))
for ich, (nd, na, E) in enumerate(zip(self.nd, self.na, self.E)):
w = fret_fit.get_weights(nd, na, weights=weights, gamma=gamma)
fit_res[ich, :] = fit_func(E, weights=w, **kwargs)
self.add(fit_E_res=fit_res, fit_E_name=fit_func.__name__,
E_fit=fit_res[:, 2], fit_E_curve=True,
fit_E_model=two_gauss_mix_pdf,
fit_E_model_F=np.repeat(1, self.nch))
return self.E_fit
def fit_E_generic(self, E1=-1, E2=2, fit_fun=two_gaussian_fit_hist,
weights=None, gamma=1., **fit_kwargs):
"""Fit E in each channel with `fit_fun` using burst in [E1,E2] range.
All the fitting functions are defined in
:mod:`fretbursts.fit.gaussian_fitting`.
Parameters:
weights (string or None): specifies the type of weights
If not None `weights` will be passed to
`fret_fit.get_weights()`. `weights` can be not-None only when
using fit functions that accept weights (the ones ending in
`_hist` or `_EM`)
gamma (float): passed to `fret_fit.get_weights()` to compute
weights
All the additional arguments are passed to `fit_fun`. For example `p0`
or `mu_fix` can be passed (see `fit.gaussian_fitting` for details).
Note:
Use this method for CDF/PDF or hist fitting.
For EM fitting use :meth:`fit_E_two_gauss_EM()`.
"""
if fit_fun.__name__.startswith("gaussian_fit"):
fit_model = lambda x, p: SS.norm.pdf(x, p[0], p[1])
if 'mu0' not in fit_kwargs: fit_kwargs.update(mu0=0.5)
if 'sigma0' not in fit_kwargs: fit_kwargs.update(sigma0=0.3)
iE, nparam = 0, 2
elif fit_fun.__name__ == "two_gaussian_fit_hist_min_ab":
fit_model = two_gauss_mix_ab
if 'p0' not in fit_kwargs:
fit_kwargs.update(p0=[0, .05, 0.5, 0.6, 0.1, 0.5])
iE, nparam = 3, 6
elif fit_fun.__name__.startswith("two_gaussian_fit"):
fit_model = two_gauss_mix_pdf
if 'p0' not in fit_kwargs:
fit_kwargs.update(p0=[0, .05, 0.6, 0.1, 0.5])
iE, nparam = 2, 5
else:
raise ValueError("Fitting function not recognized.")
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res, fit_model_F = zeros((self.nch, nparam)), zeros(self.nch)
for ich, (nd, na, E, mask) in enumerate(zip(
self.nd, self.na, self.E, Mask)):
if '_hist' in fit_fun.__name__ or '_EM' in fit_fun.__name__:
if weights is None:
w = None
else:
w = fret_fit.get_weights(nd[mask], na[mask],
weights=weights, gamma=gamma)
fit_res[ich, :] = fit_fun(E[mask], weights=w, **fit_kwargs)
else:
# Non-histogram fits (PDF/CDF) do not support weights
fit_res[ich, :] = fit_fun(E[mask], **fit_kwargs)
fit_model_F[ich] = mask.sum()/mask.size
# Save enough info to generate a fit plot (see hist_fret in burst_plot)
self.add(fit_E_res=fit_res, fit_E_name=fit_fun.__name__,
E_fit=fit_res[:, iE], fit_E_curve=True, fit_E_E1=E1,
fit_E_E2=E2, fit_E_model=fit_model,
fit_E_model_F=fit_model_F, fit_E_weights=weights,
fit_E_gamma=gamma, fit_E_kwargs=fit_kwargs)
return self.E_fit
def fit_from(self, D):
"""Copy fit results from another Data() variable.
Now that the fit methods accept E1,E1 parameter this probabily useless.
"""
# NOTE Are 'fit_guess' and 'fit_fix' still used ?
fit_data = ['fit_E_res', 'fit_E_name', 'E_fit', 'fit_E_curve',
'fit_E_E1', 'fit_E_E2=E2', 'fit_E_model',
'fit_E_model_F', 'fit_guess', 'fit_fix']
for name in fit_data:
if name in D:
self[name] = D[name]
setattr(self, name, self[name])
# Deal with the normalization to the number of bursts
self.add(fit_model_F=r_[[old_E.size/new_E.size \
for old_E, new_E in zip(D.E, self.E)]])
def fit_E_calc_variance(self, weights='sqrt', dist='DeltaE',
E_fit=None, E1=-1, E2=2):
"""Compute several versions of WEIGHTED std.dev. of the E estimator.
`weights` are multiplied *BEFORE* squaring the distance/error
`dist` can be 'DeltaE' or 'SlopeEuclid'
Note:
This method is still experimental
"""
assert dist in ['DeltaE', 'SlopeEuclid']
if E_fit is None:
E_fit = self.E_fit
E1 = self.fit_E_E1 if 'fit_E_E1' in self else -1
E2 = self.fit_E_E2 if 'fit_E_E2' in self else 2
else:
# If E_fit is not None the specified E1,E2 range is used
if E1 < 0 and E2 > 1:
pprint('WARN: E1 < 0 and E2 > 1 (wide range of E eff.)\n')
if size(E_fit) == 1 and self.nch > 0:
E_fit = np.repeat(E_fit, self.nch)
assert size(E_fit) == self.nch
E_sel = [Ei[(Ei > E1)*(Ei < E2)] for Ei in self.E]
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
E_var, E_var_bu, E_var_ph = \
zeros(self.nch), zeros(self.nch), zeros(self.nch)
for i, (Ech, nt, mask) in enumerate(zip(E_sel, self.nt, Mask)):
nt_s = nt[mask]
nd_s, na_s = self.nd[i][mask], self.na[i][mask]
w = fret_fit.get_weights(nd_s, na_s, weights=weights)
info_ph = nt_s.sum()
info_bu = nt_s.size
if dist == 'DeltaE':
distances = (Ech - E_fit[i])
elif dist == 'SlopeEuclid':
distances = fret_fit.get_dist_euclid(nd_s, na_s, E_fit[i])
residuals = distances * w
var = np.mean(residuals**2)
var_bu = np.mean(residuals**2)/info_bu
var_ph = np.mean(residuals**2)/info_ph
#lvar = np.mean(log(residuals**2))
#lvar_bu = np.mean(log(residuals**2)) - log(info_bu)
#lvar_ph = np.mean(log(residuals**2)) - log(info_ph)
E_var[i], E_var_bu[i], E_var_ph[i] = var, var_bu, var_ph
assert (-np.isnan(E_var[i])).all() # check there is NO NaN
self.add(E_var=E_var, E_var_bu=E_var_bu, E_var_ph=E_var_ph)
return E_var
| gpl-2.0 |
julien78910/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/savefrom.py | 185 | 1131 | # coding: utf-8
from __future__ import unicode_literals
import os.path
import re
from .common import InfoExtractor
class SaveFromIE(InfoExtractor):
IE_NAME = 'savefrom.net'
_VALID_URL = r'https?://[^.]+\.savefrom\.net/\#url=(?P<url>.*)$'
_TEST = {
'url': 'http://en.savefrom.net/#url=http://youtube.com/watch?v=UlVRAPW2WJY&utm_source=youtube.com&utm_medium=short_domains&utm_campaign=ssyoutube.com',
'info_dict': {
'id': 'UlVRAPW2WJY',
'ext': 'mp4',
'title': 'About Team Radical MMA | MMA Fighting',
'upload_date': '20120816',
'uploader': 'Howcast',
'uploader_id': 'Howcast',
'description': 're:(?s).* Hi, my name is Rene Dreifuss\. And I\'m here to show you some MMA.*',
},
'params': {
'skip_download': True
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = os.path.splitext(url.split('/')[-1])[0]
return {
'_type': 'url',
'id': video_id,
'url': mobj.group('url'),
}
| gpl-3.0 |
steveb/heat | heat/tests/openstack/heat/test_cloud_config.py | 4 | 2112 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
class CloudConfigTest(common.HeatTestCase):
def setUp(self):
super(CloudConfigTest, self).setUp()
self.ctx = utils.dummy_context()
self.properties = {
'cloud_config': {'foo': 'bar'}
}
self.stack = stack.Stack(
self.ctx, 'software_config_test_stack',
template.Template({
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'config_mysql': {
'Type': 'OS::Heat::CloudConfig',
'Properties': self.properties
}}}))
self.config = self.stack['config_mysql']
self.rpc_client = mock.MagicMock()
self.config._rpc_client = self.rpc_client
def test_handle_create(self):
config_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
value = {'id': config_id}
self.rpc_client.create_software_config.return_value = value
self.config.id = 5
self.config.uuid = uuid.uuid4().hex
self.config.handle_create()
self.assertEqual(config_id, self.config.resource_id)
kwargs = self.rpc_client.create_software_config.call_args[1]
self.assertEqual({
'name': self.config.physical_resource_name(),
'config': '#cloud-config\n{foo: bar}\n',
'group': 'Heat::Ungrouped'
}, kwargs)
| apache-2.0 |
takuya/namebench | libnamebench/reporter.py | 173 | 16737 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Report generation class."""
import csv
import datetime
import operator
import os.path
import platform
# external dependencies (from third_party)
import jinja2
import simplejson
import addr_util
import charts
import nameserver
import nameserver_list
import url_map
import util
# Only bother showing a percentage if we have this many tests.
MIN_RELEVANT_COUNT = 50
class ReportGenerator(object):
"""Generate reports - ASCII, HTML, etc."""
def __init__(self, config, nameservers, results, index=None, geodata=None,
status_callback=None):
"""Constructor.
Args:
config: A dictionary of configuration information.
nameservers: A list of nameserver objects to include in the report.
results: A dictionary of results from Benchmark.Run()
index: A dictionary of results for index hosts.
geodata: A dictionary of geographic information.
status_callback: where to send msg() calls.
"""
self.nameservers = nameservers
self.results = results
self.index = index
self.config = config
self.geodata = geodata
self.status_callback = status_callback
self.cached_averages = {}
self.cached_summary = None
def msg(self, msg, **kwargs):
if self.status_callback:
self.status_callback(msg, **kwargs)
def ComputeAverages(self):
"""Process all runs for all hosts, yielding an average for each host."""
if len(self.results) in self.cached_averages:
return self.cached_averages[len(self.results)]
records = []
for ns in self.results:
if ns.is_disabled or ns.is_hidden:
continue
failure_count = 0
nx_count = 0
run_averages = []
for test_run in self.results[ns]:
# x: record, req_type, duration, response
total_count = len(test_run)
failure_count += len([x for x in test_run if not x[3]])
nx_count += len([x for x in test_run if x[3] and not x[3].answer])
duration = sum([x[2] for x in test_run])
run_averages.append(duration / len(test_run))
# This appears to be a safe use of averaging averages
overall_average = util.CalculateListAverage(run_averages)
(fastest, slowest) = self.FastestAndSlowestDurationForNameServer(ns)
records.append((ns, overall_average, run_averages, fastest, slowest,
failure_count, nx_count, total_count))
self.cached_averages[len(self.results)] = records
return self.cached_averages[len(self.results)]
def FastestAndSlowestDurationForNameServer(self, ns):
"""For a given nameserver, find the fastest/slowest non-error durations."""
fastest_duration = 2**32
slowest_duration = -1
durations = []
for test_run_results in self.results[ns]:
for (unused_host, unused_type, duration, response, unused_error) in test_run_results:
durations.append(duration)
if response and response.answer:
if duration < fastest_duration:
fastest_duration = duration
if duration > slowest_duration:
slowest_duration = duration
# If we have no error-free durations, settle for anything.
if fastest_duration == 2**32:
fastest_duration = min(durations)
if slowest_duration == -1:
slowest_duration = max(durations)
return (fastest_duration, slowest_duration)
def FastestNameServerResult(self):
"""Process all runs for all hosts, yielding an average for each host."""
# TODO(tstromberg): This should not count queries which failed.
fastest = [(ns, self.FastestAndSlowestDurationForNameServer(ns)[0]) for ns in self.results]
return sorted(fastest, key=operator.itemgetter(1))
def BestOverallNameServer(self):
"""Return the best nameserver we found."""
sorted_averages = sorted(self.ComputeAverages(), key=operator.itemgetter(1))
hosts = [x[0] for x in sorted_averages]
for host in hosts:
if not host.is_failure_prone and not host.is_disabled:
return host
# return something if none of them are good.
return hosts[0]
def NearestNameServers(self, count=2):
"""Return the nameservers with the least latency."""
min_responses = sorted(self.FastestNameServerResult(),
key=operator.itemgetter(1))
return [x[0] for x in min_responses if not x.is_disabled][0:count]
def _LowestLatencyAsciiChart(self):
"""Return a simple set of tuples to generate an ASCII chart from."""
fastest = self.FastestNameServerResult()
slowest_result = fastest[-1][1]
chart = []
for (ns, duration) in fastest:
textbar = util.DrawTextBar(duration, slowest_result)
chart.append((ns.name, textbar, duration))
return chart
def _MeanRequestAsciiChart(self):
"""Creates an ASCII Chart of Mean Response Time."""
sorted_averages = sorted(self.ComputeAverages(), key=operator.itemgetter(1))
max_result = sorted_averages[-1][1]
chart = []
for result in sorted_averages:
(ns, overall_mean) = result[0:2]
textbar = util.DrawTextBar(overall_mean, max_result)
chart.append((ns.name, textbar, overall_mean))
return chart
def CreateReport(self, format='ascii', output_fp=None, csv_path=None,
sharing_url=None, sharing_state=None):
"""Create a Report in a given format.
Args:
format: string (ascii, html, etc.) which defines what template to load.
output_fp: A File object to send the output to (optional)
csv_path: A string pathname to the CSV output to link to (optional)
sharing_url: A string URL where the results have been shared to. (optional)
sharing_state: A string showing what the shared result state is (optional)
Returns:
A rendered template (string)
"""
# First generate all of the charts necessary.
if format == 'ascii':
lowest_latency = self._LowestLatencyAsciiChart()
mean_duration = self._MeanRequestAsciiChart()
else:
lowest_latency = None
mean_duration = None
sorted_averages = sorted(self.ComputeAverages(), key=operator.itemgetter(1))
runs_data = [(x[0].name, x[2]) for x in sorted_averages]
mean_duration_url = charts.PerRunDurationBarGraph(runs_data)
min_duration_url = charts.MinimumDurationBarGraph(self.FastestNameServerResult())
distribution_url_200 = charts.DistributionLineGraph(self.DigestedResults(),
scale=200)
distribution_url = charts.DistributionLineGraph(self.DigestedResults(),
scale=self.config.timeout * 1000)
# Now generate all of the required textual information.
ns_summary = self._GenerateNameServerSummary()
best_ns = self.BestOverallNameServer()
recommended = [ns_summary[0]]
for row in sorted(ns_summary, key=operator.itemgetter('duration_min')):
if row['ip'] != ns_summary[0]['ip']:
recommended.append(row)
if len(recommended) == 3:
break
compare_title = 'Undecided'
compare_subtitle = 'Not enough servers to compare.'
compare_reference = None
for ns_record in ns_summary:
if ns_record.get('is_reference'):
if ns_record == ns_summary[0]:
compare_reference = ns_record
compare_title = 'N/A'
compare_subtitle = ''
elif len(ns_record['durations'][0]) >= MIN_RELEVANT_COUNT:
compare_reference = ns_record
compare_title = '%0.1f%%' % ns_summary[0]['diff']
compare_subtitle = 'Faster'
else:
compare_subtitle = 'Too few tests (needs %s)' % (MIN_RELEVANT_COUNT)
break
# Fragile, makes assumption about the CSV being in the same path as the HTML file
if csv_path:
csv_link = os.path.basename(csv_path)
else:
csv_link = None
template_name = '%s.tmpl' % format
template_path = util.FindDataFile(os.path.join('templates', template_name))
filtered_config = self.FilteredConfig()
template_dir = os.path.dirname(template_path)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
template = env.get_template(template_name)
rendered = template.render(
best_ns=best_ns,
timestamp=datetime.datetime.now(),
lowest_latency=lowest_latency,
version=self.config.version,
compare_subtitle=compare_subtitle,
compare_title=compare_title,
compare_reference=compare_reference,
sharing_url=sharing_url,
sharing_state=sharing_state,
config=filtered_config,
mean_duration=mean_duration,
ns_summary=ns_summary,
mean_duration_url=mean_duration_url,
min_duration_url=min_duration_url,
distribution_url=distribution_url,
distribution_url_200=distribution_url_200,
recommended=recommended,
csv_link=csv_link
)
if output_fp:
output_fp.write(rendered)
output_fp.close()
else:
return rendered
def FilteredConfig(self):
"""Generate a watered down config listing for our report."""
keys = [x for x in dir(self.config) if not x.startswith('_') and x not in ('config', 'site_url')]
config_items = []
for key in keys:
value = getattr(self.config, key)
# > values are ConfigParser internals. None values are just noise.
if isinstance(value, int) or isinstance(value, float) or isinstance(value, str):
config_items.append((key, value))
return sorted(config_items)
def DigestedResults(self):
"""Return a tuple of nameserver and all associated durations."""
duration_data = []
for ns in self.results:
durations = []
for test_run_results in self.results[ns]:
durations += [x[2] for x in test_run_results]
duration_data.append((ns, durations))
return duration_data
def _GenerateNameServerSummary(self):
if self.cached_summary:
return self.cached_summary
nsdata = {}
sorted_averages = sorted(self.ComputeAverages(), key=operator.itemgetter(1))
placed_at = -1
fastest = {}
fastest_normal = {}
reference = {}
# Fill in basic information for all nameservers, even those without scores.
fake_position = 1000
for ns in sorted(self.nameservers.visible_servers, key=operator.attrgetter('check_average')):
if ns.is_hidden:
continue
fake_position += 1
nsdata[ns] = {
'ip': ns.ip,
'name': ns.name,
'hostname': ns.hostname,
'version': ns.version,
'node_ids': list(ns.node_ids),
'sys_position': ns.system_position,
'is_failure_prone': ns.is_failure_prone,
'duration_min': float(ns.fastest_check_duration),
'is_reference': False,
'is_disabled': ns.is_disabled,
'check_average': ns.check_average,
'error_count': ns.error_count,
'timeout_count': ns.timeout_count,
'notes': url_map.CreateNoteUrlTuples(ns.notes),
'position': fake_position
}
# Fill the scores in.
for (ns, unused_avg, run_averages, fastest, slowest, unused_failures, nx_count, unused_total) in sorted_averages:
placed_at += 1
durations = []
for _ in self.results[ns]:
durations.append([x[2] for x in self.results[ns][0]])
nsdata[ns].update({
'position': placed_at,
'overall_average': util.CalculateListAverage(run_averages),
'averages': run_averages,
'duration_min': float(fastest),
'duration_max': slowest,
'nx_count': nx_count,
'durations': durations,
'index': self._GenerateIndexSummary(ns),
})
# Determine which nameserver to refer to for improvement scoring
if not ns.is_disabled:
if ns.system_position == 0:
reference = ns
elif not fastest_normal and not ns.HasTag('preferred'):
fastest_normal = ns
# If no reference was found, use the fastest non-global nameserver record.
if not reference:
if fastest_normal:
reference = fastest_normal
else:
# The second ns.
if len(sorted_averages) > 1:
reference = sorted_averages[1][0]
# Update the improvement scores for each nameserver.
if reference:
for ns in nsdata:
if nsdata[ns]['ip'] != nsdata[reference]['ip']:
if 'overall_average' in nsdata[ns]:
nsdata[ns]['diff'] = ((nsdata[reference]['overall_average'] /
nsdata[ns]['overall_average']) - 1) * 100
else:
nsdata[ns]['is_reference'] = True
self.cached_summary = sorted(nsdata.values(), key=operator.itemgetter('position'))
return self.cached_summary
def _GenerateIndexSummary(self, ns):
# Get the meat out of the index data.
index = []
if ns in self.index:
for host, req_type, duration, response, unused_x in self.index[ns]:
answer_count, ttl = self._ResponseToCountTtlText(response)[0:2]
index.append((host, req_type, duration, answer_count, ttl,
nameserver.ResponseToAscii(response)))
return index
def _GetPlatform(self):
my_platform = platform.system()
if my_platform == 'Darwin':
if os.path.exists('/usr/sbin/sw_vers') or os.path.exists('/usr/sbin/system_profiler'):
my_platform = 'Mac OS X'
if my_platform == 'Linux':
distro = platform.dist()[0]
if distro:
my_platform = 'Linux (%s)' % distro
return my_platform
def _CreateSharingData(self):
config = dict(self.FilteredConfig())
config['platform'] = self._GetPlatform()
# Purge sensitive information (be aggressive!)
purged_rows = []
for row in self._GenerateNameServerSummary():
# This will be our censored record.
p = dict(row)
p['notes'] = []
for note in row['notes']:
p['notes'].append({'text': addr_util.MaskStringWithIPs(note['text']), 'url': note['url']})
p['ip'], p['hostname'], p['name'] = addr_util.MaskPrivateHost(row['ip'], row['hostname'], row['name'])
if (addr_util.IsPrivateIP(row['ip']) or addr_util.IsLoopbackIP(row['ip'])
or addr_util.IsPrivateHostname(row['hostname'])):
p['node_ids'] = []
p['version'] = None
purged_rows.append(p)
return {'config': config, 'nameservers': purged_rows, 'geodata': self.geodata}
def CreateJsonData(self):
sharing_data = self._CreateSharingData()
return simplejson.dumps(sharing_data)
def _ResponseToCountTtlText(self, response):
"""For a given DNS response, parse the most important details out.
Args:
response: DNS response
Returns:
tuple of (answer_count, ttl, answer_text)
"""
answer_text = ''
answer_count = -1
ttl = -1
if response:
if response.answer:
answer_count = len(response.answer)
ttl = response.answer[0].ttl
answer_text = nameserver.ResponseToAscii(response)
return (answer_count, ttl, answer_text)
def SaveResultsToCsv(self, filename):
"""Write out a CSV file with detailed results on each request.
Args:
filename: full path on where to save results (string)
Sample output:
nameserver, test_number, test, type, duration, answer_count, ttl
"""
self.msg('Opening %s for write' % filename, debug=True)
csv_file = open(filename, 'w')
output = csv.writer(csv_file)
output.writerow(['IP', 'Name', 'Test_Num', 'Record',
'Record_Type', 'Duration', 'TTL', 'Answer_Count',
'Response'])
for ns in self.results:
self.msg('Saving detailed data for %s' % ns, debug=True)
for (test_run, test_results) in enumerate(self.results[ns]):
for (record, req_type, duration, response, error_msg) in test_results:
(answer_count, ttl, answer_text) = self._ResponseToCountTtlText(response)
output.writerow([ns.ip, ns.name, test_run, record, req_type, duration,
ttl, answer_count, answer_text, error_msg])
csv_file.close()
self.msg('%s saved.' % filename, debug=True)
| apache-2.0 |
sl2017/campos | campos_jobber_final/models/campos_jobber_accom_group.py | 1 | 1182 | # -*- coding: utf-8 -*-
# Copyright 2017 Stein & Gabelgaard ApS
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import api, fields, models, _
class CamposJobberAccomGroup(models.Model):
_name = 'campos.jobber.accom.group'
_description = 'Campos Jobber Accom Group' # TODO
name = fields.Char(required=True)
code = fields.Char(required=True)
owner_id = fields.Many2one('campos.event.participant', 'Owner')
accom_participant_ids = fields.One2many('campos.jobber.accomodation', 'accom_group_id', string='Participants')
number_participants = fields.Integer('# participants', compute='_compute_number_participants')
subcamp_id = fields.Many2one('campos.subcamp', 'Sub Camp')
_sql_constraints = [
('code_uniq', 'unique(code)', 'Code already in use. Choose another'),
('name_uniq', 'unique(name)', 'Name already in use. Choose another'),
]
@api.depends('accom_participant_ids')
@api.multi
def _compute_number_participants(self):
for cjag in self:
cjag.number_participants = len(cjag.accom_participant_ids) | agpl-3.0 |
lakshayg/tensorflow | tensorflow/python/eager/execute.py | 16 | 8289 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions called by the generated code to execute an eager-mode op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
def quick_execute(op_name, num_outputs, inputs, attrs, ctx, name=None):
"""Execute a TensorFlow operation.
Args:
op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
execute.
num_outputs: The number of outputs of the operation to fetch.
(Explicitly provided instead of being inferred for performance
reasons).
inputs: A list of inputs to the operation. Each entry should be a Tensor, or
a value which can be passed to the Tensor constructor to create one.
attrs: A tuple with alternating string attr names and attr values for this
operation.
ctx: The value of context.context().
name: Customized name for the operation.
Returns:
List of output Tensor objects. The list is empty if there are no outputs
Raises:
An exception on error.
"""
device_name = ctx.device_name
# pylint: disable=protected-access
try:
tensors = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name,
op_name, inputs, attrs,
num_outputs)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
# pylint: enable=protected-access
return tensors
def execute_with_callbacks(op_name, num_outputs, inputs, attrs, ctx, name=None):
"""Monkey-patch to execute to enable execution callbacks."""
tensors = quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
for callback in ctx.post_execution_callbacks:
callback(op_name, name, attrs, inputs, tensors)
return tensors
execute = quick_execute
def record_gradient(unused_op_name, unused_inputs, unused_attrs, unused_results,
unused_name):
"""Import backprop if you want gradients recorded."""
pass
def make_float(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def make_int(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def make_str(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def make_bool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def make_type(v, arg_name):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(arg_name, repr(v)))
i = v.as_datatype_enum
return i
def make_shape(v, arg_name):
"""Convert v into a list."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# None if the rank is unknown, otherwise a list of ints (or Nones in the
# position where the dimension is unknown).
try:
shape = tensor_shape.as_shape(v)
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s." % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s." % (arg_name,
e))
if shape.ndims is None:
return None
else:
return shape.as_list()
def make_tensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
elif isinstance(v, six.string_types):
pb = tensor_pb2.TensorProto()
text_format.Merge(v, pb)
return pb
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'." %
(repr(v), arg_name))
def args_to_matching_eager(l, ctx, default_dtype=None):
"""Convert sequence `l` to eager same-type Tensors."""
EagerTensor = ops.EagerTensor # pylint: disable=invalid-name
for x in l:
if not isinstance(x, EagerTensor):
break
else: # note: intentional for-else
return l[0]._datatype_enum(), l # pylint: disable=protected-access
# TODO(josh11b): Could we do a better job if we also passed in the
# allowed dtypes when that was known?
# Is some input already a Tensor with a dtype?
dtype = None
for t in l:
if isinstance(t, EagerTensor):
dtype = t.dtype
break
internal_convert_to_tensor = ops.internal_convert_to_tensor
if dtype is None:
# Infer a dtype based on the first value, and use that dtype for the
# remaining values.
ret = []
for t in l:
ret.append(internal_convert_to_tensor(
t, dtype, preferred_dtype=default_dtype, ctx=ctx))
if dtype is None:
dtype = ret[-1].dtype
else:
ret = [internal_convert_to_tensor(t, dtype, ctx=ctx) for t in l]
return dtype.as_datatype_enum, ret
def convert_to_mixed_eager_tensors(values, ctx):
v = [
t if isinstance(t, ops.EagerTensor) else ops.EagerTensor(
t, context=ctx._handle, device=ctx.device_name) # pylint: disable=protected-access
for t in values
]
types = [t._datatype_enum() for t in v] # pylint: disable=protected-access
return types, v
def args_to_mixed_eager_tensors(lists, ctx):
"""Converts a list of same-length lists of values to eager tensors."""
assert len(lists) > 1
# Generate an error if len(lists[i]) is not the same for all i.
lists_ret = []
for l in lists[1:]:
if len(l) != len(lists[0]):
raise ValueError(
"Expected list arguments to be the same length: %d != %d (%r vs. %r)."
% (len(lists[0]), len(l), lists[0], l))
lists_ret.append([])
# Convert the first element of each list first, then the second element, etc.
types = []
for i in range(len(lists[0])):
dtype = None
# If any list has a Tensor, use that dtype
for l in lists:
if isinstance(l[i], ops.EagerTensor):
dtype = l[i].dtype
break
if dtype is None:
# Convert the first one and use its dtype.
lists_ret[0].append(ops.internal_convert_to_tensor(lists[0][i], ctx=ctx))
dtype = lists_ret[0][i].dtype
for j in range(1, len(lists)):
lists_ret[j].append(
ops.internal_convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
else:
# Convert everything to the found dtype.
for j in range(len(lists)):
lists_ret[j].append(
ops.internal_convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
types.append(dtype.as_datatype_enum)
return types, lists_ret
| apache-2.0 |
zephyrplugins/zephyr | zephyr.plugin.jython/jython2.5.2rc3/Lib/aifc.py | 91 | 33417 | """Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, ypu must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
"""
import struct
import __builtin__
__all__ = ["Error","open","openfp"]
class Error(Exception):
pass
_AIFC_version = 0xA2805140L # Version 1 of AIFF-C
_skiplist = 'COMT', 'INST', 'MIDI', 'AESD', \
'APPL', 'NAME', 'AUTH', '(c) ', 'ANNO'
def _read_long(file):
try:
return struct.unpack('>l', file.read(4))[0]
except struct.error:
raise EOFError
def _read_ulong(file):
try:
return struct.unpack('>L', file.read(4))[0]
except struct.error:
raise EOFError
def _read_short(file):
try:
return struct.unpack('>h', file.read(2))[0]
except struct.error:
raise EOFError
def _read_string(file):
length = ord(file.read(1))
if length == 0:
data = ''
else:
data = file.read(length)
if length & 1 == 0:
dummy = file.read(1)
return data
_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
def _read_float(f): # 10 bytes
expon = _read_short(f) # 2 bytes
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
himant = _read_ulong(f) # 4 bytes
lomant = _read_ulong(f) # 4 bytes
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
f = _HUGE_VAL
else:
expon = expon - 16383
f = (himant * 0x100000000L + lomant) * pow(2.0, expon - 63)
return sign * f
def _write_short(f, x):
f.write(struct.pack('>h', x))
def _write_long(f, x):
f.write(struct.pack('>L', x))
def _write_string(f, s):
if len(s) > 255:
raise ValueError("string exceeds maximum pstring length")
f.write(chr(len(s)))
f.write(s)
if len(s) & 1 == 0:
f.write(chr(0))
def _write_float(f, x):
import math
if x < 0:
sign = 0x8000
x = x * -1
else:
sign = 0
if x == 0:
expon = 0
himant = 0
lomant = 0
else:
fmant, expon = math.frexp(x)
if expon > 16384 or fmant >= 1: # Infinity or NaN
expon = sign|0x7FFF
himant = 0
lomant = 0
else: # Finite
expon = expon + 16382
if expon < 0: # denormalized
fmant = math.ldexp(fmant, expon)
expon = 0
expon = expon | sign
fmant = math.ldexp(fmant, 32)
fsmant = math.floor(fmant)
himant = long(fsmant)
fmant = math.ldexp(fmant - fsmant, 32)
fsmant = math.floor(fmant)
lomant = long(fsmant)
_write_short(f, expon)
_write_long(f, himant)
_write_long(f, lomant)
from chunk import Chunk
class Aifc_read:
# Variables used in this class:
#
# These variables are available to the user though appropriate
# methods of this class:
# _file -- the open file with methods read(), close(), and seek()
# set through the __init__() method
# _nchannels -- the number of audio channels
# available through the getnchannels() method
# _nframes -- the number of audio frames
# available through the getnframes() method
# _sampwidth -- the number of bytes per audio sample
# available through the getsampwidth() method
# _framerate -- the sampling frequency
# available through the getframerate() method
# _comptype -- the AIFF-C compression type ('NONE' if AIFF)
# available through the getcomptype() method
# _compname -- the human-readable AIFF-C compression type
# available through the getcomptype() method
# _markers -- the marks in the audio file
# available through the getmarkers() and getmark()
# methods
# _soundpos -- the position in the audio stream
# available through the tell() method, set through the
# setpos() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _decomp -- the decompressor from builtin module cl
# _comm_chunk_read -- 1 iff the COMM chunk has been read
# _aifc -- 1 iff reading an AIFF-C file
# _ssnd_seek_needed -- 1 iff positioned correctly in audio
# file for readframes()
# _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
# _framesize -- size of one frame in the file
def initfp(self, file):
self._version = 0
self._decomp = None
self._convert = None
self._markers = []
self._soundpos = 0
self._file = Chunk(file)
if self._file.getname() != 'FORM':
raise Error, 'file does not start with FORM id'
formdata = self._file.read(4)
if formdata == 'AIFF':
self._aifc = 0
elif formdata == 'AIFC':
self._aifc = 1
else:
raise Error, 'not an AIFF or AIFF-C file'
self._comm_chunk_read = 0
while 1:
self._ssnd_seek_needed = 1
try:
chunk = Chunk(self._file)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == 'COMM':
self._read_comm_chunk(chunk)
self._comm_chunk_read = 1
elif chunkname == 'SSND':
self._ssnd_chunk = chunk
dummy = chunk.read(8)
self._ssnd_seek_needed = 0
elif chunkname == 'FVER':
self._version = _read_ulong(chunk)
elif chunkname == 'MARK':
self._readmark(chunk)
elif chunkname in _skiplist:
pass
else:
raise Error, 'unrecognized chunk type '+chunk.chunkname
chunk.skip()
if not self._comm_chunk_read or not self._ssnd_chunk:
raise Error, 'COMM chunk and/or SSND chunk missing'
if self._aifc and self._decomp:
import cl
params = [cl.ORIGINAL_FORMAT, 0,
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
cl.FRAME_RATE, self._framerate]
if self._nchannels == 1:
params[1] = cl.MONO
elif self._nchannels == 2:
params[1] = cl.STEREO_INTERLEAVED
else:
raise Error, 'cannot compress more than 2 channels'
self._decomp.SetParams(params)
def __init__(self, f):
if type(f) == type(''):
f = __builtin__.open(f, 'rb')
# else, assume it is an open file object already
self.initfp(f)
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._ssnd_seek_needed = 1
self._soundpos = 0
def close(self):
if self._decomp:
self._decomp.CloseDecompressor()
self._decomp = None
self._file = None
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def getversion(self):
## return self._version
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error, 'marker %r does not exist' % (id,)
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error, 'position not in range'
self._soundpos = pos
self._ssnd_seek_needed = 1
def readframes(self, nframes):
if self._ssnd_seek_needed:
self._ssnd_chunk.seek(0)
dummy = self._ssnd_chunk.read(8)
pos = self._soundpos * self._framesize
if pos:
self._ssnd_chunk.seek(pos + 8)
self._ssnd_seek_needed = 0
if nframes == 0:
return ''
data = self._ssnd_chunk.read(nframes * self._framesize)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) / (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _decomp_data(self, data):
import cl
dummy = self._decomp.SetParam(cl.FRAME_BUFFER_SIZE,
len(data) * 2)
return self._decomp.Decompress(len(data) / self._nchannels,
data)
def _ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
def _adpcm2lin(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
# first time
self._adpcmstate = None
data, self._adpcmstate = audioop.adpcm2lin(data, 2,
self._adpcmstate)
return data
def _read_comm_chunk(self, chunk):
self._nchannels = _read_short(chunk)
self._nframes = _read_long(chunk)
self._sampwidth = (_read_short(chunk) + 7) / 8
self._framerate = int(_read_float(chunk))
self._framesize = self._nchannels * self._sampwidth
if self._aifc:
#DEBUG: SGI's soundeditor produces a bad size :-(
kludge = 0
if chunk.chunksize == 18:
kludge = 1
print 'Warning: bad COMM chunk size'
chunk.chunksize = 23
#DEBUG end
self._comptype = chunk.read(4)
#DEBUG start
if kludge:
length = ord(chunk.file.read(1))
if length & 1 == 0:
length = length + 1
chunk.chunksize = chunk.chunksize + length
chunk.file.seek(-1, 1)
#DEBUG end
self._compname = _read_string(chunk)
if self._comptype != 'NONE':
if self._comptype == 'G722':
try:
import audioop
except ImportError:
pass
else:
self._convert = self._adpcm2lin
self._framesize = self._framesize / 4
return
# for ULAW and ALAW try Compression Library
try:
import cl
except ImportError:
if self._comptype == 'ULAW':
try:
import audioop
self._convert = self._ulaw2lin
self._framesize = self._framesize / 2
return
except ImportError:
pass
raise Error, 'cannot read compressed AIFF-C files'
if self._comptype == 'ULAW':
scheme = cl.G711_ULAW
self._framesize = self._framesize / 2
elif self._comptype == 'ALAW':
scheme = cl.G711_ALAW
self._framesize = self._framesize / 2
else:
raise Error, 'unsupported compression type'
self._decomp = cl.OpenDecompressor(scheme)
self._convert = self._decomp_data
else:
self._comptype = 'NONE'
self._compname = 'not compressed'
def _readmark(self, chunk):
nmarkers = _read_short(chunk)
# Some files appear to contain invalid counts.
# Cope with this by testing for EOF.
try:
for i in range(nmarkers):
id = _read_short(chunk)
pos = _read_long(chunk)
name = _read_string(chunk)
if pos or name:
# some files appear to have
# dummy markers consisting of
# a position 0 and name ''
self._markers.append((id, pos, name))
except EOFError:
print 'Warning: MARK chunk contains only',
print len(self._markers),
if len(self._markers) == 1: print 'marker',
else: print 'markers',
print 'instead of', nmarkers
class Aifc_write:
# Variables used in this class:
#
# These variables are user settable through appropriate methods
# of this class:
# _file -- the open file with methods write(), close(), tell(), seek()
# set through the __init__() method
# _comptype -- the AIFF-C compression type ('NONE' in AIFF)
# set through the setcomptype() or setparams() method
# _compname -- the human-readable AIFF-C compression type
# set through the setcomptype() or setparams() method
# _nchannels -- the number of audio channels
# set through the setnchannels() or setparams() method
# _sampwidth -- the number of bytes per audio sample
# set through the setsampwidth() or setparams() method
# _framerate -- the sampling frequency
# set through the setframerate() or setparams() method
# _nframes -- the number of audio frames written to the header
# set through the setnframes() or setparams() method
# _aifc -- whether we're writing an AIFF-C file or an AIFF file
# set through the aifc() method, reset through the
# aiff() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _comp -- the compressor from builtin module cl
# _nframeswritten -- the number of audio frames actually written
# _datalength -- the size of the audio samples written to the header
# _datawritten -- the size of the audio samples actually written
def __init__(self, f):
if type(f) == type(''):
filename = f
f = __builtin__.open(f, 'wb')
else:
# else, assume it is an open file object already
filename = '???'
self.initfp(f)
if filename[-5:] == '.aiff':
self._aifc = 0
else:
self._aifc = 1
def initfp(self, file):
self._file = file
self._version = _AIFC_version
self._comptype = 'NONE'
self._compname = 'not compressed'
self._comp = None
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._markers = []
self._marklength = 0
self._aifc = 1 # AIFF-C is default
def __del__(self):
if self._file:
self.close()
#
# User visible methods.
#
def aiff(self):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._aifc = 0
def aifc(self):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._aifc = 1
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if nchannels < 1:
raise Error, 'bad # of channels'
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error, 'number of channels not set'
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if sampwidth < 1 or sampwidth > 4:
raise Error, 'bad sample width'
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error, 'sample width not set'
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if framerate <= 0:
raise Error, 'bad frame rate'
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error, 'frame rate not set'
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
raise Error, 'unsupported compression type'
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def setversion(self, version):
## if self._nframeswritten:
## raise Error, 'cannot change parameters after starting to write'
## self._version = version
def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
raise Error, 'unsupported compression type'
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error, 'not all parameters set'
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
if id <= 0:
raise Error, 'marker ID must be > 0'
if pos < 0:
raise Error, 'marker position must be >= 0'
if type(name) != type(''):
raise Error, 'marker name must be a string'
for i in range(len(self._markers)):
if id == self._markers[i][0]:
self._markers[i] = id, pos, name
return
self._markers.append((id, pos, name))
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error, 'marker %r does not exist' % (id,)
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) / (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
self._ensure_header_written(0)
if self._datawritten & 1:
# quick pad to even size
self._file.write(chr(0))
self._datawritten = self._datawritten + 1
self._writemarkers()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten or \
self._marklength:
self._patchheader()
if self._comp:
self._comp.CloseCompressor()
self._comp = None
self._file.flush()
self._file = None
#
# Internal methods.
#
def _comp_data(self, data):
import cl
dummy = self._comp.SetParam(cl.FRAME_BUFFER_SIZE, len(data))
dummy = self._comp.SetParam(cl.COMPRESSED_BUFFER_SIZE, len(data))
return self._comp.Compress(self._nframes, data)
def _lin2ulaw(self, data):
import audioop
return audioop.lin2ulaw(data, 2)
def _lin2adpcm(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
self._adpcmstate = None
data, self._adpcmstate = audioop.lin2adpcm(data, 2,
self._adpcmstate)
return data
def _ensure_header_written(self, datasize):
if not self._nframeswritten:
if self._comptype in ('ULAW', 'ALAW'):
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error, 'sample width must be 2 when compressing with ULAW or ALAW'
if self._comptype == 'G722':
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error, 'sample width must be 2 when compressing with G7.22 (ADPCM)'
if not self._nchannels:
raise Error, '# channels not specified'
if not self._sampwidth:
raise Error, 'sample width not specified'
if not self._framerate:
raise Error, 'sampling rate not specified'
self._write_header(datasize)
def _init_compression(self):
if self._comptype == 'G722':
self._convert = self._lin2adpcm
return
try:
import cl
except ImportError:
if self._comptype == 'ULAW':
try:
import audioop
self._convert = self._lin2ulaw
return
except ImportError:
pass
raise Error, 'cannot write compressed AIFF-C files'
if self._comptype == 'ULAW':
scheme = cl.G711_ULAW
elif self._comptype == 'ALAW':
scheme = cl.G711_ALAW
else:
raise Error, 'unsupported compression type'
self._comp = cl.OpenCompressor(scheme)
params = [cl.ORIGINAL_FORMAT, 0,
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
cl.FRAME_RATE, self._framerate,
cl.FRAME_BUFFER_SIZE, 100,
cl.COMPRESSED_BUFFER_SIZE, 100]
if self._nchannels == 1:
params[1] = cl.MONO
elif self._nchannels == 2:
params[1] = cl.STEREO_INTERLEAVED
else:
raise Error, 'cannot compress more than 2 channels'
self._comp.SetParams(params)
# the compressor produces a header which we ignore
dummy = self._comp.Compress(0, '')
self._convert = self._comp_data
def _write_header(self, initlength):
if self._aifc and self._comptype != 'NONE':
self._init_compression()
self._file.write('FORM')
if not self._nframes:
self._nframes = initlength / (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
if self._datalength & 1:
self._datalength = self._datalength + 1
if self._aifc:
if self._comptype in ('ULAW', 'ALAW'):
self._datalength = self._datalength / 2
if self._datalength & 1:
self._datalength = self._datalength + 1
elif self._comptype == 'G722':
self._datalength = (self._datalength + 3) / 4
if self._datalength & 1:
self._datalength = self._datalength + 1
self._form_length_pos = self._file.tell()
commlength = self._write_form_length(self._datalength)
if self._aifc:
self._file.write('AIFC')
self._file.write('FVER')
_write_long(self._file, 4)
_write_long(self._file, self._version)
else:
self._file.write('AIFF')
self._file.write('COMM')
_write_long(self._file, commlength)
_write_short(self._file, self._nchannels)
self._nframes_pos = self._file.tell()
_write_long(self._file, self._nframes)
_write_short(self._file, self._sampwidth * 8)
_write_float(self._file, self._framerate)
if self._aifc:
self._file.write(self._comptype)
_write_string(self._file, self._compname)
self._file.write('SSND')
self._ssnd_length_pos = self._file.tell()
_write_long(self._file, self._datalength + 8)
_write_long(self._file, 0)
_write_long(self._file, 0)
def _write_form_length(self, datalength):
if self._aifc:
commlength = 18 + 5 + len(self._compname)
if commlength & 1:
commlength = commlength + 1
verslength = 12
else:
commlength = 18
verslength = 0
_write_long(self._file, 4 + verslength + self._marklength + \
8 + commlength + 16 + datalength)
return commlength
def _patchheader(self):
curpos = self._file.tell()
if self._datawritten & 1:
datalength = self._datawritten + 1
self._file.write(chr(0))
else:
datalength = self._datawritten
if datalength == self._datalength and \
self._nframes == self._nframeswritten and \
self._marklength == 0:
self._file.seek(curpos, 0)
return
self._file.seek(self._form_length_pos, 0)
dummy = self._write_form_length(datalength)
self._file.seek(self._nframes_pos, 0)
_write_long(self._file, self._nframeswritten)
self._file.seek(self._ssnd_length_pos, 0)
_write_long(self._file, datalength + 8)
self._file.seek(curpos, 0)
self._nframes = self._nframeswritten
self._datalength = datalength
def _writemarkers(self):
if len(self._markers) == 0:
return
self._file.write('MARK')
length = 2
for marker in self._markers:
id, pos, name = marker
length = length + len(name) + 1 + 6
if len(name) & 1 == 0:
length = length + 1
_write_long(self._file, length)
self._marklength = length + 8
_write_short(self._file, len(self._markers))
for marker in self._markers:
id, pos, name = marker
_write_short(self._file, id)
_write_long(self._file, pos)
_write_string(self._file, name)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Aifc_read(f)
elif mode in ('w', 'wb'):
return Aifc_write(f)
else:
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
openfp = open # B/W compatibility
if __name__ == '__main__':
import sys
if not sys.argv[1:]:
sys.argv.append('/usr/demos/data/audio/bach.aiff')
fn = sys.argv[1]
f = open(fn, 'r')
print "Reading", fn
print "nchannels =", f.getnchannels()
print "nframes =", f.getnframes()
print "sampwidth =", f.getsampwidth()
print "framerate =", f.getframerate()
print "comptype =", f.getcomptype()
print "compname =", f.getcompname()
if sys.argv[2:]:
gn = sys.argv[2]
print "Writing", gn
g = open(gn, 'w')
g.setparams(f.getparams())
while 1:
data = f.readframes(1024)
if not data:
break
g.writeframes(data)
g.close()
f.close()
print "Done."
| epl-1.0 |
aristanetworks/neutron | neutron/agent/l3/ha_router.py | 3 | 13905 | # Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import netaddr
from oslo_log import log as logging
from neutron.agent.l3 import router_info as router
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import keepalived
from neutron.common import constants as n_consts
from neutron.common import utils as common_utils
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
HA_DEV_PREFIX = 'ha-'
IP_MONITOR_PROCESS_SERVICE = 'ip_monitor'
class HaRouter(router.RouterInfo):
def __init__(self, state_change_callback, *args, **kwargs):
super(HaRouter, self).__init__(*args, **kwargs)
self.ha_port = None
self.keepalived_manager = None
self.state_change_callback = state_change_callback
@property
def is_ha(self):
# TODO(Carl) Remove when refactoring to use sub-classes is complete.
return self.router is not None
@property
def ha_priority(self):
return self.router.get('priority', keepalived.HA_DEFAULT_PRIORITY)
@property
def ha_vr_id(self):
return self.router.get('ha_vr_id')
@property
def ha_state(self):
ha_state_path = self.keepalived_manager.get_full_config_file_path(
'state')
try:
with open(ha_state_path, 'r') as f:
return f.read()
except (OSError, IOError):
LOG.debug('Error while reading HA state for %s', self.router_id)
return None
@ha_state.setter
def ha_state(self, new_state):
ha_state_path = self.keepalived_manager.get_full_config_file_path(
'state')
try:
with open(ha_state_path, 'w') as f:
f.write(new_state)
except (OSError, IOError):
LOG.error(_LE('Error while writing HA state for %s'),
self.router_id)
def initialize(self, process_monitor):
super(HaRouter, self).initialize(process_monitor)
ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
if not ha_port:
LOG.error(_LE('Unable to process HA router %s without HA port'),
self.router_id)
return
self.ha_port = ha_port
self._init_keepalived_manager(process_monitor)
self.ha_network_added()
self.update_initial_state(self.state_change_callback)
self.spawn_state_change_monitor(process_monitor)
def _init_keepalived_manager(self, process_monitor):
self.keepalived_manager = keepalived.KeepalivedManager(
self.router['id'],
keepalived.KeepalivedConf(),
process_monitor,
conf_path=self.agent_conf.ha_confs_path,
namespace=self.ns_name)
config = self.keepalived_manager.config
interface_name = self.get_ha_device_name()
subnets = self.ha_port.get('subnets', [])
ha_port_cidrs = [subnet['cidr'] for subnet in subnets]
instance = keepalived.KeepalivedInstance(
'BACKUP',
interface_name,
self.ha_vr_id,
ha_port_cidrs,
nopreempt=True,
advert_int=self.agent_conf.ha_vrrp_advert_int,
priority=self.ha_priority)
instance.track_interfaces.append(interface_name)
if self.agent_conf.ha_vrrp_auth_password:
# TODO(safchain): use oslo.config types when it will be available
# in order to check the validity of ha_vrrp_auth_type
instance.set_authentication(self.agent_conf.ha_vrrp_auth_type,
self.agent_conf.ha_vrrp_auth_password)
config.add_instance(instance)
def enable_keepalived(self):
self.keepalived_manager.spawn()
def disable_keepalived(self):
self.keepalived_manager.disable()
conf_dir = self.keepalived_manager.get_conf_dir()
shutil.rmtree(conf_dir)
def _get_keepalived_instance(self):
return self.keepalived_manager.config.get_instance(self.ha_vr_id)
def _get_primary_vip(self):
return self._get_keepalived_instance().get_primary_vip()
def get_ha_device_name(self):
return (HA_DEV_PREFIX + self.ha_port['id'])[:self.driver.DEV_NAME_LEN]
def ha_network_added(self):
interface_name = self.get_ha_device_name()
self.driver.plug(self.ha_port['network_id'],
self.ha_port['id'],
interface_name,
self.ha_port['mac_address'],
namespace=self.ns_name,
prefix=HA_DEV_PREFIX)
ip_cidrs = common_utils.fixed_ip_cidrs(self.ha_port['fixed_ips'])
self.driver.init_l3(interface_name, ip_cidrs,
namespace=self.ns_name,
preserve_ips=[self._get_primary_vip()])
def ha_network_removed(self):
self.driver.unplug(self.get_ha_device_name(),
namespace=self.ns_name,
prefix=HA_DEV_PREFIX)
self.ha_port = None
def _add_vip(self, ip_cidr, interface, scope=None):
instance = self._get_keepalived_instance()
instance.add_vip(ip_cidr, interface, scope)
def _remove_vip(self, ip_cidr):
instance = self._get_keepalived_instance()
instance.remove_vip_by_ip_address(ip_cidr)
def _clear_vips(self, interface):
instance = self._get_keepalived_instance()
instance.remove_vips_vroutes_by_interface(interface)
def _get_cidrs_from_keepalived(self, interface_name):
instance = self._get_keepalived_instance()
return instance.get_existing_vip_ip_addresses(interface_name)
def get_router_cidrs(self, device):
return set(self._get_cidrs_from_keepalived(device.name))
def routes_updated(self):
new_routes = self.router['routes']
instance = self._get_keepalived_instance()
instance.virtual_routes.extra_routes = [
keepalived.KeepalivedVirtualRoute(
route['destination'], route['nexthop'])
for route in new_routes]
self.routes = new_routes
def _add_default_gw_virtual_route(self, ex_gw_port, interface_name):
subnets = ex_gw_port.get('subnets', [])
default_gw_rts = []
for subnet in subnets:
gw_ip = subnet['gateway_ip']
if gw_ip:
# TODO(Carl) This is repeated everywhere. A method would
# be nice.
default_gw = (n_consts.IPv4_ANY if
netaddr.IPAddress(gw_ip).version == 4 else
n_consts.IPv6_ANY)
instance = self._get_keepalived_instance()
default_gw_rts.append(keepalived.KeepalivedVirtualRoute(
default_gw, gw_ip, interface_name))
instance.virtual_routes.gateway_routes = default_gw_rts
def _should_delete_ipv6_lladdr(self, ipv6_lladdr):
"""Only the master should have any IP addresses configured.
Let keepalived manage IPv6 link local addresses, the same way we let
it manage IPv4 addresses. If the router is not in the master state,
we must delete the address first as it is autoconfigured by the kernel.
"""
manager = self.keepalived_manager
if manager.get_process().active:
if self.ha_state != 'master':
conf = manager.get_conf_on_disk()
managed_by_keepalived = conf and ipv6_lladdr in conf
if managed_by_keepalived:
return False
else:
return False
return True
def _disable_ipv6_addressing_on_interface(self, interface_name):
"""Disable IPv6 link local addressing on the device and add it as
a VIP to keepalived. This means that the IPv6 link local address
will only be present on the master.
"""
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
ipv6_lladdr = ip_lib.get_ipv6_lladdr(device.link.address)
if self._should_delete_ipv6_lladdr(ipv6_lladdr):
device.addr.flush(n_consts.IP_VERSION_6)
self._remove_vip(ipv6_lladdr)
self._add_vip(ipv6_lladdr, interface_name, scope='link')
def _add_gateway_vip(self, ex_gw_port, interface_name):
for ip_cidr in common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips']):
self._add_vip(ip_cidr, interface_name)
self._add_default_gw_virtual_route(ex_gw_port, interface_name)
def add_floating_ip(self, fip, interface_name, device):
fip_ip = fip['floating_ip_address']
ip_cidr = common_utils.ip_to_cidr(fip_ip)
self._add_vip(ip_cidr, interface_name)
# TODO(Carl) Should this return status?
# return l3_constants.FLOATINGIP_STATUS_ACTIVE
def remove_floating_ip(self, device, ip_cidr):
self._remove_vip(ip_cidr)
def internal_network_added(self, port):
port_id = port['id']
interface_name = self.get_internal_device_name(port_id)
if not ip_lib.device_exists(interface_name, namespace=self.ns_name):
self.driver.plug(port['network_id'],
port_id,
interface_name,
port['mac_address'],
namespace=self.ns_name,
prefix=router.INTERNAL_DEV_PREFIX)
self._disable_ipv6_addressing_on_interface(interface_name)
for ip_cidr in common_utils.fixed_ip_cidrs(port['fixed_ips']):
self._add_vip(ip_cidr, interface_name)
def internal_network_removed(self, port):
super(HaRouter, self).internal_network_removed(port)
interface_name = self.get_internal_device_name(port['id'])
self._clear_vips(interface_name)
def _get_state_change_monitor_process_manager(self):
return external_process.ProcessManager(
self.agent_conf,
'%s.monitor' % self.router_id,
self.ns_name,
default_cmd_callback=self._get_state_change_monitor_callback())
def _get_state_change_monitor_callback(self):
ha_device = self.get_ha_device_name()
ha_cidr = self._get_primary_vip()
def callback(pid_file):
cmd = [
'neutron-keepalived-state-change',
'--router_id=%s' % self.router_id,
'--namespace=%s' % self.ns_name,
'--conf_dir=%s' % self.keepalived_manager.get_conf_dir(),
'--monitor_interface=%s' % ha_device,
'--monitor_cidr=%s' % ha_cidr,
'--pid_file=%s' % pid_file,
'--state_path=%s' % self.agent_conf.state_path,
'--user=%s' % os.geteuid(),
'--group=%s' % os.getegid()]
return cmd
return callback
def spawn_state_change_monitor(self, process_monitor):
pm = self._get_state_change_monitor_process_manager()
pm.enable()
process_monitor.register(
self.router_id, IP_MONITOR_PROCESS_SERVICE, pm)
def destroy_state_change_monitor(self, process_monitor):
pm = self._get_state_change_monitor_process_manager()
process_monitor.unregister(
self.router_id, IP_MONITOR_PROCESS_SERVICE)
pm.disable()
def update_initial_state(self, callback):
ha_device = ip_lib.IPDevice(
self.get_ha_device_name(),
self.ns_name)
addresses = ha_device.addr.list()
cidrs = (address['cidr'] for address in addresses)
ha_cidr = self._get_primary_vip()
state = 'master' if ha_cidr in cidrs else 'backup'
self.ha_state = state
callback(self.router_id, state)
def external_gateway_added(self, ex_gw_port, interface_name):
self._plug_external_gateway(ex_gw_port, interface_name, self.ns_name)
self._add_gateway_vip(ex_gw_port, interface_name)
self._disable_ipv6_addressing_on_interface(interface_name)
def external_gateway_updated(self, ex_gw_port, interface_name):
self._plug_external_gateway(ex_gw_port, interface_name, self.ns_name)
ip_cidrs = common_utils.fixed_ip_cidrs(self.ex_gw_port['fixed_ips'])
for old_gateway_cidr in ip_cidrs:
self._remove_vip(old_gateway_cidr)
self._add_gateway_vip(ex_gw_port, interface_name)
def external_gateway_removed(self, ex_gw_port, interface_name):
self._clear_vips(interface_name)
super(HaRouter, self).external_gateway_removed(ex_gw_port,
interface_name)
def delete(self, agent):
self.destroy_state_change_monitor(self.process_monitor)
self.ha_network_removed()
self.disable_keepalived()
super(HaRouter, self).delete(agent)
def process(self, agent):
super(HaRouter, self).process(agent)
if self.ha_port:
self.enable_keepalived()
def enable_radvd(self, internal_ports=None):
if (self.keepalived_manager.get_process().active and
self.ha_state == 'master'):
super(HaRouter, self).enable_radvd(internal_ports)
| apache-2.0 |
OpenTouch/python-facette | src/facette/v1/groupentry.py | 1 | 1278 | # Copyright (c) 2014 Alcatel-Lucent Enterprise
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from facette.utils import *
import json
GROUP_ENTRY_ORIGIN = "origin"
GROUP_ENTRY_PATTERN = "pattern"
class GroupEntry:
def __init__(self, js=""):
self.entry = {}
self.origin = facette_to_json(GROUP_ENTRY_ORIGIN, js, self.entry)
self.pattern = facette_to_json(GROUP_ENTRY_PATTERN, js, self.entry)
def set(self, origin=None, pattern=None):
self.origin = facette_set(id, GROUP_ENTRY_ORIGIN, self.entry)
self.pattern = facette_set(id, GROUP_ENTRY_PATTERN, self.entry)
def __str__(self):
return json.dumps(self.entry)
def __repr__(self):
return str(self)
| apache-2.0 |
RafaelRMachado/qtwebkit | Tools/Scripts/webkitpy/tool/commands/download.py | 113 | 18403 | # Copyright (c) 2009, 2011 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.tool import steps
from webkitpy.common.checkout.changelog import ChangeLog
from webkitpy.common.config import urls
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
from webkitpy.tool.commands.stepsequence import StepSequence
from webkitpy.tool.comments import bug_comment_from_commit_text
from webkitpy.tool.grammar import pluralize
from webkitpy.tool.multicommandtool import Command
_log = logging.getLogger(__name__)
class Clean(AbstractSequencedCommand):
name = "clean"
help_text = "Clean the working copy"
steps = [
steps.DiscardLocalChanges,
]
def _prepare_state(self, options, args, tool):
options.force_clean = True
class Update(AbstractSequencedCommand):
name = "update"
help_text = "Update working copy (used internally)"
steps = [
steps.DiscardLocalChanges,
steps.Update,
]
class Build(AbstractSequencedCommand):
name = "build"
help_text = "Update working copy and build"
steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.Build,
]
def _prepare_state(self, options, args, tool):
options.build = True
class BuildAndTest(AbstractSequencedCommand):
name = "build-and-test"
help_text = "Update working copy, build, and run the tests"
steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.Build,
steps.RunTests,
]
class Land(AbstractSequencedCommand):
name = "land"
help_text = "Land the current working directory diff and updates the associated bug if any"
argument_names = "[BUGID]"
show_in_main_help = True
steps = [
steps.AddSvnMimetypeForPng,
steps.UpdateChangeLogsWithReviewer,
steps.ValidateReviewer,
steps.ValidateChangeLogs, # We do this after UpdateChangeLogsWithReviewer to avoid not having to cache the diff twice.
steps.Build,
steps.RunTests,
steps.Commit,
steps.CloseBugForLandDiff,
]
long_help = """land commits the current working copy diff (just as svn or git commit would).
land will NOT build and run the tests before committing, but you can use the --build option for that.
If a bug id is provided, or one can be found in the ChangeLog land will update the bug after committing."""
def _prepare_state(self, options, args, tool):
changed_files = self._tool.scm().changed_files(options.git_commit)
return {
"changed_files": changed_files,
"bug_id": (args and args[0]) or tool.checkout().bug_id_for_this_commit(options.git_commit, changed_files),
}
class LandCowhand(AbstractSequencedCommand):
# Gender-blind term for cowboy, see: http://en.wiktionary.org/wiki/cowhand
name = "land-cowhand"
help_text = "Prepares a ChangeLog and lands the current working directory diff."
steps = [
steps.PrepareChangeLog,
steps.EditChangeLog,
steps.CheckStyle,
steps.ConfirmDiff,
steps.Build,
steps.RunTests,
steps.Commit,
steps.CloseBugForLandDiff,
]
def _prepare_state(self, options, args, tool):
options.check_style_filter = "-changelog"
class LandCowboy(LandCowhand):
name = "land-cowboy"
def _prepare_state(self, options, args, tool):
_log.warning("land-cowboy is deprecated, use land-cowhand instead.")
LandCowhand._prepare_state(self, options, args, tool)
class CheckStyleLocal(AbstractSequencedCommand):
name = "check-style-local"
help_text = "Run check-webkit-style on the current working directory diff"
steps = [
steps.CheckStyle,
]
class AbstractPatchProcessingCommand(Command):
# Subclasses must implement the methods below. We don't declare them here
# because we want to be able to implement them with mix-ins.
#
# pylint: disable=E1101
# def _fetch_list_of_patches_to_process(self, options, args, tool):
# def _prepare_to_process(self, options, args, tool):
# def _process_patch(self, options, args, tool):
@staticmethod
def _collect_patches_by_bug(patches):
bugs_to_patches = {}
for patch in patches:
bugs_to_patches[patch.bug_id()] = bugs_to_patches.get(patch.bug_id(), []) + [patch]
return bugs_to_patches
def execute(self, options, args, tool):
self._prepare_to_process(options, args, tool)
patches = self._fetch_list_of_patches_to_process(options, args, tool)
# It's nice to print out total statistics.
bugs_to_patches = self._collect_patches_by_bug(patches)
_log.info("Processing %s from %s." % (pluralize("patch", len(patches)), pluralize("bug", len(bugs_to_patches))))
for patch in patches:
self._process_patch(patch, options, args, tool)
class AbstractPatchSequencingCommand(AbstractPatchProcessingCommand):
prepare_steps = None
main_steps = None
def __init__(self):
options = []
self._prepare_sequence = StepSequence(self.prepare_steps)
self._main_sequence = StepSequence(self.main_steps)
options = sorted(set(self._prepare_sequence.options() + self._main_sequence.options()))
AbstractPatchProcessingCommand.__init__(self, options)
def _prepare_to_process(self, options, args, tool):
try:
self.state = self._prepare_state(options, args, tool)
except ScriptError, e:
_log.error(e.message_with_output())
self._exit(e.exit_code or 2)
self._prepare_sequence.run_and_handle_errors(tool, options, self.state)
def _process_patch(self, patch, options, args, tool):
state = {}
state.update(self.state or {})
state["patch"] = patch
self._main_sequence.run_and_handle_errors(tool, options, state)
def _prepare_state(self, options, args, tool):
return None
class ProcessAttachmentsMixin(object):
def _fetch_list_of_patches_to_process(self, options, args, tool):
return map(lambda patch_id: tool.bugs.fetch_attachment(patch_id), args)
class ProcessBugsMixin(object):
def _fetch_list_of_patches_to_process(self, options, args, tool):
all_patches = []
for bug_id in args:
patches = tool.bugs.fetch_bug(bug_id).reviewed_patches()
_log.info("%s found on bug %s." % (pluralize("reviewed patch", len(patches)), bug_id))
all_patches += patches
if not all_patches:
_log.info("No reviewed patches found, looking for unreviewed patches.")
for bug_id in args:
patches = tool.bugs.fetch_bug(bug_id).patches()
_log.info("%s found on bug %s." % (pluralize("patch", len(patches)), bug_id))
all_patches += patches
return all_patches
class ProcessURLsMixin(object):
def _fetch_list_of_patches_to_process(self, options, args, tool):
all_patches = []
for url in args:
bug_id = urls.parse_bug_id(url)
if bug_id:
patches = tool.bugs.fetch_bug(bug_id).patches()
_log.info("%s found on bug %s." % (pluralize("patch", len(patches)), bug_id))
all_patches += patches
attachment_id = urls.parse_attachment_id(url)
if attachment_id:
all_patches += tool.bugs.fetch_attachment(attachment_id)
return all_patches
class CheckStyle(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
name = "check-style"
help_text = "Run check-webkit-style on the specified attachments"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.CheckStyle,
]
class BuildAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
name = "build-attachment"
help_text = "Apply and build patches from bugzilla"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.Build,
]
class BuildAndTestAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
name = "build-and-test-attachment"
help_text = "Apply, build, and test patches from bugzilla"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.Build,
steps.RunTests,
]
class AbstractPatchApplyingCommand(AbstractPatchSequencingCommand):
prepare_steps = [
steps.EnsureLocalCommitIfNeeded,
steps.CleanWorkingDirectory,
steps.Update,
]
main_steps = [
steps.ApplyPatchWithLocalCommit,
]
long_help = """Updates the working copy.
Downloads and applies the patches, creating local commits if necessary."""
class ApplyAttachment(AbstractPatchApplyingCommand, ProcessAttachmentsMixin):
name = "apply-attachment"
help_text = "Apply an attachment to the local working directory"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
show_in_main_help = True
class ApplyFromBug(AbstractPatchApplyingCommand, ProcessBugsMixin):
name = "apply-from-bug"
help_text = "Apply reviewed patches from provided bugs to the local working directory"
argument_names = "BUGID [BUGIDS]"
show_in_main_help = True
class ApplyWatchList(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
name = "apply-watchlist"
help_text = "Applies the watchlist to the specified attachments"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.ApplyWatchList,
]
long_help = """"Applies the watchlist to the specified attachments.
Downloads the attachment, applies it locally, runs the watchlist against it, and updates the bug with the result."""
class AbstractPatchLandingCommand(AbstractPatchSequencingCommand):
main_steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.ValidateChangeLogs,
steps.ValidateReviewer,
steps.Build,
steps.RunTests,
steps.Commit,
steps.ClosePatch,
steps.CloseBug,
]
long_help = """Checks to make sure builders are green.
Updates the working copy.
Applies the patch.
Builds.
Runs the layout tests.
Commits the patch.
Clears the flags on the patch.
Closes the bug if no patches are marked for review."""
class LandAttachment(AbstractPatchLandingCommand, ProcessAttachmentsMixin):
name = "land-attachment"
help_text = "Land patches from bugzilla, optionally building and testing them first"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
show_in_main_help = True
class LandFromBug(AbstractPatchLandingCommand, ProcessBugsMixin):
name = "land-from-bug"
help_text = "Land all patches on the given bugs, optionally building and testing them first"
argument_names = "BUGID [BUGIDS]"
show_in_main_help = True
class LandFromURL(AbstractPatchLandingCommand, ProcessURLsMixin):
name = "land-from-url"
help_text = "Land all patches on the given URLs, optionally building and testing them first"
argument_names = "URL [URLS]"
class ValidateChangelog(AbstractSequencedCommand):
name = "validate-changelog"
help_text = "Validate that the ChangeLogs and reviewers look reasonable"
long_help = """Examines the current diff to see whether the ChangeLogs
and the reviewers listed in the ChangeLogs look reasonable.
"""
steps = [
steps.ValidateChangeLogs,
steps.ValidateReviewer,
]
class AbstractRolloutPrepCommand(AbstractSequencedCommand):
argument_names = "REVISION [REVISIONS] REASON"
def _commit_info(self, revision):
commit_info = self._tool.checkout().commit_info_for_revision(revision)
if commit_info and commit_info.bug_id():
# Note: Don't print a bug URL here because it will confuse the
# SheriffBot because the SheriffBot just greps the output
# of create-rollout for bug URLs. It should do better
# parsing instead.
_log.info("Preparing rollout for bug %s." % commit_info.bug_id())
else:
_log.info("Unable to parse bug number from diff.")
return commit_info
def _prepare_state(self, options, args, tool):
revision_list = []
for revision in str(args[0]).split():
if revision.isdigit():
revision_list.append(int(revision))
else:
raise ScriptError(message="Invalid svn revision number: " + revision)
revision_list.sort()
# We use the earliest revision for the bug info
earliest_revision = revision_list[0]
state = {
"revision": earliest_revision,
"revision_list": revision_list,
"reason": args[1],
}
commit_info = self._commit_info(earliest_revision)
if commit_info:
state["bug_id"] = commit_info.bug_id()
cc_list = sorted([party.bugzilla_email()
for party in commit_info.responsible_parties()
if party.bugzilla_email()])
# FIXME: We should used the list as the canonical representation.
state["bug_cc"] = ",".join(cc_list)
return state
class PrepareRollout(AbstractRolloutPrepCommand):
name = "prepare-rollout"
help_text = "Revert the given revision(s) in the working copy and prepare ChangeLogs with revert reason"
long_help = """Updates the working copy.
Applies the inverse diff for the provided revision(s).
Creates an appropriate rollout ChangeLog, including a trac link and bug link.
"""
steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.RevertRevision,
steps.PrepareChangeLogForRevert,
]
class CreateRollout(AbstractRolloutPrepCommand):
name = "create-rollout"
help_text = "Creates a bug to track the broken SVN revision(s) and uploads a rollout patch."
steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.RevertRevision,
steps.CreateBug,
steps.PrepareChangeLogForRevert,
steps.PostDiffForRevert,
]
def _prepare_state(self, options, args, tool):
state = AbstractRolloutPrepCommand._prepare_state(self, options, args, tool)
# Currently, state["bug_id"] points to the bug that caused the
# regression. We want to create a new bug that blocks the old bug
# so we move state["bug_id"] to state["bug_blocked"] and delete the
# old state["bug_id"] so that steps.CreateBug will actually create
# the new bug that we want (and subsequently store its bug id into
# state["bug_id"])
state["bug_blocked"] = state["bug_id"]
del state["bug_id"]
state["bug_title"] = "REGRESSION(r%s): %s" % (state["revision"], state["reason"])
state["bug_description"] = "%s broke the build:\n%s" % (urls.view_revision_url(state["revision"]), state["reason"])
# FIXME: If we had more context here, we could link to other open bugs
# that mention the test that regressed.
if options.parent_command == "sheriff-bot":
state["bug_description"] += """
This is an automatic bug report generated by the sheriff-bot. If this bug
report was created because of a flaky test, please file a bug for the flaky
test (if we don't already have one on file) and dup this bug against that bug
so that we can track how often these flaky tests case pain.
"Only you can prevent forest fires." -- Smokey the Bear
"""
return state
class Rollout(AbstractRolloutPrepCommand):
name = "rollout"
show_in_main_help = True
help_text = "Revert the given revision(s) in the working copy and optionally commit the revert and re-open the original bug"
long_help = """Updates the working copy.
Applies the inverse diff for the provided revision.
Creates an appropriate rollout ChangeLog, including a trac link and bug link.
Opens the generated ChangeLogs in $EDITOR.
Shows the prepared diff for confirmation.
Commits the revert and updates the bug (including re-opening the bug if necessary)."""
steps = [
steps.DiscardLocalChanges,
steps.Update,
steps.RevertRevision,
steps.PrepareChangeLogForRevert,
steps.EditChangeLog,
steps.ConfirmDiff,
steps.Build,
steps.Commit,
steps.ReopenBugAfterRollout,
]
| gpl-2.0 |
homann/stand-browser | test/test_stand_browser_dockwidget.py | 1 | 1123 | # coding=utf-8
"""DockWidget test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'magnus@homann.se'
__date__ = '2017-02-18'
__copyright__ = 'Copyright 2017, Magnus Homann'
import unittest
from PyQt4.QtGui import QDockWidget
from stand_browser_dockwidget import StandBrowserDockWidget
from utilities import get_qgis_app
QGIS_APP = get_qgis_app()
class StandBrowserDockWidgetTest(unittest.TestCase):
"""Test dockwidget works."""
def setUp(self):
"""Runs before each test."""
self.dockwidget = StandBrowserDockWidget(None)
def tearDown(self):
"""Runs after each test."""
self.dockwidget = None
def test_dockwidget_ok(self):
"""Test we can click OK."""
pass
if __name__ == "__main__":
suite = unittest.makeSuite(StandBrowserDialogTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| gpl-2.0 |
macarthur-lab/xbrowse | xbrowse_server/api/views.py | 1 | 67273 | import datetime
import csv
import json
import logging
import sys
import traceback
from collections import defaultdict
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from settings import LOGIN_URL
from seqr.utils.gene_utils import get_queried_genes
from xbrowse.analysis_modules.combine_mendelian_families import get_variants_by_family_for_gene
from xbrowse_server.analysis.diagnostic_search import get_gene_diangostic_info
from xbrowse_server.base.model_utils import update_xbrowse_model, get_or_create_xbrowse_model, delete_xbrowse_model, \
create_xbrowse_model
from xbrowse_server.base.models import Project, Family, FamilySearchFlag, VariantNote, ProjectTag, VariantTag, GeneNote, \
AnalysedBy, VariantFunctionalData
from seqr.models import Individual as SeqrIndividual, MatchmakerResult
from xbrowse_server.api.utils import get_project_and_family_for_user, get_project_and_cohort_for_user, \
add_extra_info_to_variants_project, add_notes_to_genes, get_variant_notes, get_variant_tags, get_variant_functional_data
from xbrowse.variant_search.family import get_variants_with_inheritance_mode
from xbrowse_server.api import utils as api_utils
from xbrowse_server.api import forms as api_forms
from xbrowse_server.mall import get_reference, get_datastore, get_mall
from xbrowse_server.search_cache import utils as cache_utils
from xbrowse_server.decorators import log_request
from xbrowse_server.server_utils import JSONResponse
import utils
from xbrowse.variant_search import cohort as cohort_search
from xbrowse import Variant
from xbrowse.analysis_modules.mendelian_variant_search import MendelianVariantSearchSpec
from xbrowse.core import displays as xbrowse_displays
from xbrowse_server import server_utils
from . import basicauth
from xbrowse_server import user_controls
from django.utils import timezone
from xbrowse_server.phenotips.reporting_utilities import phenotype_entry_metric_for_individual
from xbrowse_server.base.models import ANALYSIS_STATUS_CHOICES
from xbrowse_server.matchmaker.utilities import get_all_clinical_data_for_family
from xbrowse_server.matchmaker.utilities import is_a_valid_patient_structure
from xbrowse_server.matchmaker.utilities import generate_slack_notification_for_seqr_match
from xbrowse_server.matchmaker.utilities import gather_all_annotated_genes_in_seqr
from xbrowse_server.matchmaker.utilities import find_projects_with_families_in_matchbox
from xbrowse_server.matchmaker.utilities import find_families_of_this_project_in_matchbox
from xbrowse_server.matchmaker.utilities import extract_hpo_id_list_from_mme_patient_struct
import requests
from django.contrib.admin.views.decorators import staff_member_required
logger = logging.getLogger()
@csrf_exempt
@basicauth.logged_in_or_basicauth()
@log_request('projects_api')
def projects(request):
"""
List the projects that this user has access to
"""
user_projects = user_controls.get_projects_for_user(request.user)
project_ids = [p.project_id for p in user_projects]
response_format = request.GET.get('format', 'json')
if response_format == 'json':
return JSONResponse({'projects': project_ids})
elif response_format == 'tsv':
return HttpResponse('\n'.join(project_ids))
else:
raise Exception("Invalid format")
@csrf_exempt
@login_required
@log_request('mendelian_variant_search_api')
def mendelian_variant_search(request):
# TODO: how about we move project getter into the form, and just test for authX here?
# esp because error should be described in json, not just 404
request_dict = request.GET or request.POST
project, family = get_project_and_family_for_user(request.user, request_dict)
form = api_forms.MendelianVariantSearchForm(request_dict)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_id = family.family_id
try:
variants = api_utils.calculate_mendelian_variant_search(search_spec, family, user=request.user)
except Exception as e:
traceback.print_exc()
return JSONResponse({
'is_error': True,
'error': str(e.args[0]) if e.args else str(e)
})
hashable_search_params = search_spec.toJSON()
hashable_search_params['family_id'] = family.family_id
list_of_variants = [v.toJSON(encode_indiv_id=True) for v in variants]
search_hash = cache_utils.save_results_for_spec(project.project_id, hashable_search_params, list_of_variants)
add_extra_info_to_variants_project(get_reference(), project, variants, add_family_tags=True, add_populations=True)
return_type = request_dict.get('return_type', 'json')
if return_type == 'json':
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_hash': search_hash,
})
elif return_type == 'csv':
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
indiv_ids = family.indiv_ids_with_variant_data()
headers = xbrowse_displays.get_variant_display_headers(get_mall(project), project, indiv_ids)
writer.writerow(headers)
for variant in variants:
fields = xbrowse_displays.get_display_fields_for_variant(get_mall(project), project, variant, indiv_ids, genes_to_return=search_spec.variant_filter.genes)
writer.writerow(fields)
return response
else:
return HttpResponse("Return type not implemented")
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('mendelian_variant_search_spec_api')
def mendelian_variant_search_spec(request):
project, family = get_project_and_family_for_user(request.user, request.GET)
search_hash = request.GET.get('search_hash')
search_spec_dict, variants = cache_utils.get_cached_results(project.project_id, search_hash)
search_spec = MendelianVariantSearchSpec.fromJSON(search_spec_dict)
if variants is None:
variants = api_utils.calculate_mendelian_variant_search(search_spec, family, user=request.user)
else:
variants = [Variant.fromJSON(v) for v in variants]
for variant in variants:
variant.set_extra('family_id', family.family_id)
add_extra_info_to_variants_project(get_reference(), project, variants, add_family_tags=True, add_populations=True)
return_type = request.GET.get('return_type')
if return_type == 'json' or not return_type:
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_spec': search_spec_dict,
})
elif request.GET.get('return_type') == 'csv':
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
indiv_ids = family.indiv_ids_with_variant_data()
headers = xbrowse_displays.get_variant_display_headers(get_mall(project), project, indiv_ids)
writer.writerow(headers)
for variant in variants:
fields = xbrowse_displays.get_display_fields_for_variant(get_mall(project), project, variant, indiv_ids)
writer.writerow(fields)
return response
@csrf_exempt
@login_required
@log_request('get_cohort_variants')
def cohort_variant_search(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.CohortVariantSearchForm(request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_id = cohort.cohort_id
variants = api_utils.calculate_mendelian_variant_search(search_spec, cohort, user=request.user)
list_of_variants = [v.toJSON(encode_indiv_id=True) for v in variants]
search_hash = cache_utils.save_results_for_spec(project.project_id, search_spec.toJSON(), list_of_variants)
api_utils.add_extra_info_to_variants_cohort(get_reference(), cohort, variants)
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_hash': search_hash,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('cohort_variant_search_spec_api')
def cohort_variant_search_spec(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
# TODO: use form
search_spec_dict, variants = cache_utils.get_cached_results(project.project_id, request.GET.get('search_hash'))
search_spec = MendelianVariantSearchSpec.fromJSON(search_spec_dict)
if variants is None:
variants = api_utils.calculate_mendelian_variant_search(search_spec, cohort, user=request.user)
else:
variants = [Variant.fromJSON(v) for v in variants]
api_utils.add_extra_info_to_variants_cohort(get_reference(), cohort, variants)
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_spec': search_spec.toJSON(),
})
@csrf_exempt
@login_required
@log_request('cohort_gene_search')
def cohort_gene_search(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
sys.stderr.write("cohort_gene_search %s %s: starting ... \n" % (project.project_id, cohort.cohort_id))
form = api_forms.CohortGeneSearchForm(request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.cohort_id = cohort.cohort_id
sys.stderr.write("cohort_gene_search %s %s: search spec: %s \n" % (project.project_id, cohort.cohort_id, str(search_spec.toJSON())))
genes = api_utils.calculate_cohort_gene_search(cohort, search_spec)
sys.stderr.write("cohort_gene_search %s %s: get %s genes \n" % (project.project_id, cohort.cohort_id, len(genes)))
search_hash = cache_utils.save_results_for_spec(project.project_id, search_spec.toJSON(), genes)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
sys.stderr.write("cohort_gene_search %s %s: done adding extra info \n" % (project.project_id, cohort.cohort_id))
return JSONResponse({
'is_error': False,
'genes': genes,
'search_hash': search_hash,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('cohort_gene_search_spec')
def cohort_gene_search_spec(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
search_spec, genes = cache_utils.get_cached_results(project.project_id, request.GET.get('search_hash'))
if genes is None:
genes = api_utils.calculate_cohort_gene_search(cohort, search_spec)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
return JSONResponse({
'is_error': False,
'genes': genes,
'search_spec': search_spec,
})
@csrf_exempt
@login_required
@log_request('cohort_gene_search_variants')
def cohort_gene_search_variants(request):
error = None
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.CohortGeneSearchVariantsForm(request.GET)
if form.is_valid():
gene_id = form.cleaned_data['gene_id']
inheritance_mode = form.cleaned_data['inheritance_mode']
variant_filter = form.cleaned_data['variant_filter']
quality_filter = form.cleaned_data['quality_filter']
else:
error = server_utils.form_error_string(form)
if not error:
indivs_with_inheritance, gene_variation = cohort_search.get_individuals_with_inheritance_in_gene(
get_datastore(project),
get_reference(),
cohort.xcohort(),
inheritance_mode,
gene_id,
variant_filter=variant_filter,
quality_filter=quality_filter
)
relevant_variants = gene_variation.get_relevant_variants_for_indiv_ids(cohort.indiv_id_list())
api_utils.add_extra_info_to_variants_project(get_reference(), project, relevant_variants, add_family_tags=True,
add_populations=True)
ret = {
'is_error': False,
'variants': [v.toJSON() for v in relevant_variants],
'gene_info': get_reference().get_gene(gene_id),
}
return JSONResponse(ret)
else:
ret = {
'is_error': True,
'error': error
}
return JSONResponse(ret)
@login_required
@log_request('gene_info')
def gene_info(request, gene_id):
gene = get_reference().get_gene(gene_id)
gene['expression'] = get_reference().get_tissue_expression_display_values(gene_id)
add_notes_to_genes([gene], request.user)
ret = {
'gene': gene,
'is_error': False,
'found_gene': gene is not None,
}
return JSONResponse(ret)
@login_required
@log_request('family_variant_annotation')
def family_variant_annotation(request):
# TODO: this view not like the others - refactor to forms
error = None
for key in ['project_id', 'family_id', 'xpos', 'ref', 'alt']:
if request.GET.get(key) is None:
error = "%s is requred", key
if not error:
project = get_object_or_404(Project, project_id=request.GET.get('project_id'))
family = get_object_or_404(Family, project=project, family_id=request.GET.get('family_id'))
if not project.can_view(request.user):
raise PermissionDenied
if not error:
variant = get_datastore(project).get_single_variant(
family.project.project_id,
family.family_id,
int(request.GET['xpos']),
request.GET['ref'],
request.GET['alt']
)
if not variant:
error = "Variant does not exist"
if not error:
ret = {
'variant': variant.toJSON(),
'is_error': False,
}
else:
ret = {
'is_error': True,
'error': error,
}
return JSONResponse(ret)
@login_required
@log_request('add_flag')
def add_family_search_flag(request):
error = None
for key in ['project_id', 'family_id', 'xpos', 'ref', 'alt', 'note', 'flag_type', 'flag_inheritance_mode']:
if request.GET.get(key, None) == None:
error = "%s is requred" % key
if not error:
project = get_object_or_404(Project, project_id=request.GET.get('project_id'))
family = get_object_or_404(Family, project=project, family_id=request.GET.get('family_id'))
if not project.can_edit(request.user):
raise PermissionDenied
if not error:
xpos = int(request.GET['xpos'])
ref=request.GET.get('ref')
alt=request.GET['alt']
note=request.GET.get('note')
flag_type=request.GET.get('flag_type')
flag_inheritance_mode=request.GET.get('flag_inheritance_mode')
# todo: more validation - is variant valid?
flag = FamilySearchFlag(user=request.user,
family=family,
xpos=int(request.GET['xpos']),
ref=ref,
alt=alt,
note=note,
flag_type=flag_type,
suggested_inheritance=flag_inheritance_mode,
date_saved=timezone.now(),
)
if not error:
flag.save()
variant = get_datastore(project).get_single_variant(family.project.project_id, family.family_id,
xpos, ref, alt )
api_utils.add_extra_info_to_variants_project(get_reference(), project, [variant], add_family_tags=True,
add_populations=True)
ret = {
'is_error': False,
'variant': variant.toJSON(),
}
else:
ret = {
'is_error': True,
'error': error,
}
return JSONResponse(ret)
@login_required
# @csrf_exempt
@log_request('add_analysed_by')
def add_family_analysed_by(request, data=None):
if not data:
data = request.GET
family_id = data.get('family_id')
project_id = data.get('project_id')
if not (family_id and project_id):
raise HttpResponseBadRequest('family_id and project_id are required')
try:
family = Family.objects.get(project__project_id=project_id, family_id=family_id)
except ObjectDoesNotExist:
raise Http404('No family matches the given query')
if not family.project.can_edit(request.user):
raise PermissionDenied
analysed_by = create_xbrowse_model(AnalysedBy, user=request.user, family=family, date_saved=timezone.now())
return JSONResponse({
'is_error': False,
'analysed_by': analysed_by.toJSON(),
})
@login_required
@log_request('delete_variant_note')
def delete_variant_note(request, note_id):
ret = {
'is_error': False,
}
notes = VariantNote.objects.filter(id=note_id)
if not notes:
ret['is_error'] = True
ret['error'] = 'note id %s not found' % note_id
else:
note = list(notes)[0]
if not note.project.can_edit(request.user):
raise PermissionDenied
delete_xbrowse_model(note)
return JSONResponse(ret)
@login_required
@log_request('add_or_edit_variant_note')
def add_or_edit_variant_note(request):
"""Add a variant note"""
family = None
if 'family_id' in request.GET:
project, family = get_project_and_family_for_user(request.user, request.GET)
else:
project = utils.get_project_for_user(request.user, request.GET)
form = api_forms.VariantNoteForm(project, request.GET)
if not form.is_valid():
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
if 'note_id' in form.cleaned_data and form.cleaned_data['note_id']:
event_type = "edit_variant_note"
notes = VariantNote.objects.filter(
id=form.cleaned_data['note_id'],
project=project,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
)
if not notes:
return JSONResponse({
'is_error': True,
'error': 'note id %s not found' % form.cleaned_data['note_id']
})
note = notes[0]
update_xbrowse_model(
note,
user=request.user,
note=form.cleaned_data['note_text'],
submit_to_clinvar=form.cleaned_data['submit_to_clinvar'],
date_saved=timezone.now(),
family=family)
else:
event_type = "add_variant_note"
create_xbrowse_model(
VariantNote,
user=request.user,
project=project,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
note=form.cleaned_data['note_text'],
submit_to_clinvar = form.cleaned_data['submit_to_clinvar'],
date_saved=timezone.now(),
family=family)
notes = get_variant_notes(project=project, family_id=request.GET.get('family_id'), **form.cleaned_data)
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'project_id': ''.join(project.project_id),
'family_id': family.family_id,
'note': form.cleaned_data['note_text'],
'xpos':form.cleaned_data['xpos'],
'ref':form.cleaned_data['ref'],
'alt':form.cleaned_data['alt'],
'username': request.user.username,
'email': request.user.email,
})
except Exception as e:
logging.error("Error while logging %s event: %s" % (event_type, e))
return JSONResponse({
'is_error': False,
'notes': notes,
})
@login_required
@log_request('add_or_edit_variant_tags')
def add_or_edit_variant_tags(request):
family = None
if 'family_id' in request.GET:
project, family = get_project_and_family_for_user(request.user, request.GET)
else:
project = utils.get_project_for_user(request.user, request.GET)
form = api_forms.VariantTagsForm(project, request.GET)
if not form.is_valid():
ret = {
'is_error': True,
'error': server_utils.form_error_string(form)
}
return JSONResponse(ret)
variant_tags_to_delete = {
variant_tag.id: variant_tag for variant_tag in VariantTag.objects.filter(
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'])
}
project_tag_events = {}
for project_tag in form.cleaned_data['project_tags']:
# retrieve tags
tag, created = get_or_create_xbrowse_model(
VariantTag,
project_tag=project_tag,
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
)
if not created:
# this tag already exists so just keep it (eg. remove it from the set of tags that will be deleted)
del variant_tags_to_delete[tag.id]
continue
# this a new tag, so update who saved it and when
project_tag_events[project_tag] = "add_variant_tag"
update_xbrowse_model(
tag,
user=request.user,
date_saved=timezone.now(),
search_url=form.cleaned_data['search_url'])
# delete the tags that are no longer checked.
for variant_tag in variant_tags_to_delete.values():
project_tag_events[variant_tag.project_tag] = "delete_variant_tag"
delete_xbrowse_model(variant_tag)
# Get tags after updating the tag info in the database, so that the new tag info is added to the variant JSON
tags = get_variant_tags(project=project, family_id=request.GET.get('family_id'), **form.cleaned_data)
# log tag creation
for project_tag, event_type in project_tag_events.items():
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'project_id': ''.join(project.project_id),
'family_id': family.family_id,
'tag': project_tag.tag,
'title': project_tag.title,
'xpos':form.cleaned_data['xpos'],
'ref':form.cleaned_data['ref'],
'alt':form.cleaned_data['alt'],
'username': request.user.username,
'email': request.user.email,
'search_url': form.cleaned_data.get('search_url'),
})
except Exception as e:
logging.error("Error while logging add_variant_tag event: %s" % e)
return JSONResponse({
'is_error': False,
'tags': tags,
})
@login_required
@csrf_exempt
@log_request('add_or_edit_functional_data')
def add_or_edit_functional_data(request):
request_data = json.loads(request.body)
project, family = get_project_and_family_for_user(request.user, request_data)
form = api_forms.VariantFunctionalDataForm(request_data)
if not form.is_valid():
ret = {
'is_error': True,
'error': server_utils.form_error_string(form)
}
return JSONResponse(ret)
project_tag_events = {}
tag_ids = set()
for tag_data in form.cleaned_data['tags']:
# retrieve tags
tag, created = get_or_create_xbrowse_model(
VariantFunctionalData,
functional_data_tag=tag_data['tag'],
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
)
tag_ids.add(tag.id)
if created:
project_tag_events[tag_data['tag']] = "add_variant_functional_data"
elif tag.metadata != tag_data.get('metadata'):
project_tag_events[tag_data['tag']] = "edit_variant_functional_data"
else:
continue
# this a new/changed tag, so update who saved it and when
update_xbrowse_model(
tag,
metadata=tag_data.get('metadata'),
user=request.user,
date_saved=timezone.now(),
search_url=form.cleaned_data['search_url'])
# delete the tags that are no longer checked.
variant_tags_to_delete = VariantFunctionalData.objects.filter(
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
).exclude(id__in=tag_ids)
for variant_tag in variant_tags_to_delete:
project_tag_events[variant_tag.functional_data_tag] = "delete_variant_functional_data"
delete_xbrowse_model(variant_tag)
# get the tags after updating the tag info in the database, so that the new tag info is added to the variant JSON
functional_data = get_variant_functional_data(project=project, family_id=request_data.get('family_id'), **form.cleaned_data)
# log tag creation
for project_tag, event_type in project_tag_events.items():
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'project_id': ''.join(project.project_id),
'family_id': family.family_id,
'tag': project_tag,
'xpos':form.cleaned_data['xpos'],
'ref':form.cleaned_data['ref'],
'alt':form.cleaned_data['alt'],
'username': request.user.username,
'email': request.user.email,
'search_url': form.cleaned_data.get('search_url'),
})
except Exception as e:
logging.error("Error while logging add_variant_tag event: %s" % e)
return JSONResponse({
'is_error': False,
'functional_data': functional_data,
})
@login_required
@log_request('delete_gene_note')
def delete_gene_note(request, note_id):
try:
note = GeneNote.objects.get(id=note_id)
except ObjectDoesNotExist:
return JSONResponse({
'is_error': True,
'error': 'note id %s not found' % note_id
})
if not note.can_edit(request.user):
raise PermissionDenied
delete_xbrowse_model(note)
return JSONResponse({
'is_error': False,
})
@login_required
@log_request('add_or_edit_gene_note')
def add_or_edit_gene_note(request):
"""Add a gene note"""
form = api_forms.GeneNoteForm(request.GET)
if not form.is_valid():
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
if form.cleaned_data.get('note_id'):
event_type = "edit_gene_note"
try:
note = GeneNote.objects.get(id=form.cleaned_data['note_id'])
except ObjectDoesNotExist:
return JSONResponse({
'is_error': True,
'error': 'note id %s not found' % form.cleaned_data['note_id']
})
if not note.can_edit(request.user):
raise PermissionDenied
update_xbrowse_model(
note,
note=form.cleaned_data['note_text'],
user=request.user,
date_saved=timezone.now(),
)
else:
event_type = "add_variant_note"
note = create_xbrowse_model(
GeneNote,
user=request.user,
gene_id=form.cleaned_data['gene_id'],
note=form.cleaned_data['note_text'],
date_saved=timezone.now(),
)
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'note': form.cleaned_data['note_text'],
'gene_id':form.cleaned_data['gene_id'],
'username': request.user.username,
'email': request.user.email,
})
except Exception as e:
logging.error("Error while logging %s event: %s" % (event_type, e))
return JSONResponse({
'is_error': False,
'note': note.toJSON(request.user),
})
def gene_autocomplete(request):
query = request.GET.get('q', '')
gene_items = get_queried_genes(query, 20)
genes = [{
'value': item['gene_id'],
'label': item['gene_symbol'],
} for item in gene_items]
return JSONResponse(genes)
@login_required
@log_request('variant_info')
def variant_info(request):
pass
@csrf_exempt
@login_required
@log_request('combine_mendelian_families_api')
def combine_mendelian_families(request):
project, family_group = utils.get_project_and_family_group_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.CombineMendelianFamiliesForm(request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_group_id = family_group.slug
genes = api_utils.calculate_combine_mendelian_families(family_group, search_spec, user=request.user)
search_hash = cache_utils.save_results_for_spec(project.project_id, search_spec.toJSON(), genes)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
return JSONResponse({
'is_error': False,
'genes': genes,
'search_hash': search_hash,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('mendelian_variant_search_spec_api')
def combine_mendelian_families_spec(request):
project, family_group = utils.get_project_and_family_group_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
search_hash = request.GET.get('search_hash')
search_spec, genes = cache_utils.get_cached_results(project.project_id, search_hash)
search_spec_obj = MendelianVariantSearchSpec.fromJSON(search_spec)
if request.GET.get('return_type') != 'csv' or not request.GET.get('group_by_variants'):
if genes is None:
genes = api_utils.calculate_combine_mendelian_families(family_group, search_spec, user=request.user)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
if request.GET.get('return_type') != 'csv':
return JSONResponse({
'is_error': False,
'genes': genes,
'search_spec': search_spec,
})
else:
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="family_group_results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
writer.writerow(["gene", "# families", "family list", "chrom", "start", "end"])
for gene in genes:
family_id_list = [family_id for (project_id, family_id) in gene["family_id_list"]]
writer.writerow(map(str, [gene["gene_name"], len(family_id_list), " ".join(family_id_list), gene["chr"], gene["start"], gene["end"], ""]))
return response
else:
# download results grouped by variant
indiv_id_list = []
for family in family_group.get_families():
indiv_id_list.extend(family.indiv_ids_with_variant_data())
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
headers = ['genes','chr','pos','ref','alt','worst_annotation' ]
headers.extend(project.get_reference_population_slugs())
headers.extend([ 'polyphen','sift','muttaster','fathmm'])
for indiv_id in indiv_id_list:
headers.append(indiv_id)
headers.append(indiv_id+'_gq')
headers.append(indiv_id+'_dp')
writer.writerow(headers)
mall = get_mall(project)
variant_key_to_individual_id_to_variant = defaultdict(dict)
variant_key_to_variant = {}
for family in family_group.get_families():
for variant in get_variants_with_inheritance_mode(
mall,
family.xfamily(),
search_spec_obj.inheritance_mode,
search_spec_obj.variant_filter,
search_spec_obj.quality_filter,
user=request.user):
if len(variant.coding_gene_ids) == 0:
continue
variant_key = (variant.xpos, variant.ref, variant.alt)
variant_key_to_variant[variant_key] = variant
for indiv_id in family.indiv_ids_with_variant_data():
variant_key_to_individual_id_to_variant[variant_key][indiv_id] = variant
for variant_key in sorted(variant_key_to_individual_id_to_variant.keys()):
variant = variant_key_to_variant[variant_key]
individual_id_to_variant = variant_key_to_individual_id_to_variant[variant_key]
genes = [mall.reference.get_gene_symbol(gene_id) for gene_id in variant.coding_gene_ids]
fields = []
fields.append(','.join(genes))
fields.extend([
variant.chr,
str(variant.pos),
variant.ref,
variant.alt,
variant.annotation.get('vep_group', '.'),
])
for ref_population_slug in project.get_reference_population_slugs():
fields.append(variant.annotation['freqs'][ref_population_slug])
for field_key in ['polyphen', 'sift', 'muttaster', 'fathmm']:
fields.append(variant.annotation.get(field_key, ""))
for indiv_id in indiv_id_list:
variant = individual_id_to_variant.get(indiv_id)
genotype = None
if variant is not None:
genotype = variant.get_genotype(indiv_id)
if genotype is None:
fields.extend(['.', '.', '.'])
else:
fields.append("/".join(genotype.alleles) if genotype.alleles else "./.")
#fields[-1] += " %s (%s)" % (indiv_id, genotype.num_alt)
fields.append(str(genotype.gq) if genotype.gq is not None else '.')
fields.append(genotype.extras['dp'] if genotype.extras.get('dp') is not None else '.')
writer.writerow(fields)
return response
@csrf_exempt
@login_required
@log_request('combine_mendelian_families_variants_api')
def combine_mendelian_families_variants(request):
project, family_group = utils.get_project_and_family_group_for_user(request.user, request.GET)
form = api_forms.CombineMendelianFamiliesVariantsForm(request.GET)
if form.is_valid():
variants_grouped = get_variants_by_family_for_gene(
get_mall(project),
[f.xfamily() for f in form.cleaned_data['families']],
form.cleaned_data['inheritance_mode'],
form.cleaned_data['gene_id'],
variant_filter=form.cleaned_data['variant_filter'],
quality_filter=form.cleaned_data['quality_filter'],
user=request.user,
)
variants_by_family = []
for family in form.cleaned_data['families']:
variants = variants_grouped[(family.project.project_id, family.family_id)]
add_extra_info_to_variants_project(get_reference(), family.project, variants, add_family_tags=True, add_populations=True)
variants_by_family.append({
'project_id': family.project.project_id,
'family_id': family.family_id,
'family_name': str(family),
'variants': [v.toJSON() for v in variants],
})
return JSONResponse({
'is_error': False,
'variants_by_family': variants_by_family,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('diagnostic_search')
def diagnostic_search(request):
project, family = utils.get_project_and_family_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.DiagnosticSearchForm(family, request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_id = family.family_id
gene_list = form.cleaned_data['gene_list']
diagnostic_info_list = []
for gene_id in gene_list.gene_id_list():
diagnostic_info = get_gene_diangostic_info(family, gene_id, search_spec.variant_filter)
add_extra_info_to_variants_project(get_reference(), project, diagnostic_info._variants, add_family_tags=True, add_populations=True)
diagnostic_info_list.append(diagnostic_info)
return JSONResponse({
'is_error': False,
'gene_diagnostic_info_list': [d.toJSON() for d in diagnostic_info_list],
'gene_list_info': gene_list.toJSON(details=True),
'data_summary': family.get_data_summary(),
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
def family_gene_lookup(request):
project, family = utils.get_project_and_family_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
gene_id = request.GET.get('gene_id')
if not get_reference().is_valid_gene_id(gene_id):
return JSONResponse({
'is_error': True,
'error': 'Invalid gene',
})
family_gene_data = get_gene_diangostic_info(family, gene_id)
add_extra_info_to_variants_project(get_reference(), project, family_gene_data._variants, add_family_tags=True,
add_populations=True)
return JSONResponse({
'is_error': False,
'family_gene_data': family_gene_data.toJSON(),
'data_summary': family.get_data_summary(),
'gene': get_reference().get_gene(gene_id),
})
@csrf_exempt
@login_required
@log_request('API_project_phenotypes')
def export_project_individuals_phenotypes(request,project_id):
"""
Export all HPO terms entered for this project individuals. A direct proxy
from PhenoTips API
Args:
project_id
Returns:
A JSON string of HPO terms entered
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
project = get_object_or_404(Project, project_id=project_id)
result={}
for individual in project.get_individuals():
ui_display_name = individual.indiv_id
ext_id=individual.phenotips_id
result[ui_display_name] = phenotype_entry_metric_for_individual(project_id, ext_id)['raw']
return JSONResponse(result)
@csrf_exempt
@login_required
@log_request('API_project_phenotypes')
def export_project_family_statuses(request,project_id):
"""
Exports the status of all families in this project
Args:
Project ID
Returns:
All statuses of families
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
project = get_object_or_404(Project, project_id=project_id)
status_description_map = {}
for abbrev, details in ANALYSIS_STATUS_CHOICES:
status_description_map[abbrev] = details[0]
result={}
for family in project.get_families():
fam_details =family.toJSON()
result[fam_details['family_id']] = status_description_map.get(family.analysis_status, 'unknown')
return JSONResponse(result)
@csrf_exempt
@login_required
@log_request('API_project_phenotypes')
def export_project_variants(request,project_id):
"""
Export all variants associated to this project
Args:
Project id
Returns:
A JSON object of variant information
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
status_description_map = {}
for abbrev, details in ANALYSIS_STATUS_CHOICES:
status_description_map[abbrev] = details[0]
variants=[]
project_tags = ProjectTag.objects.filter(project__project_id=project_id)
for project_tag in project_tags:
variant_tags = VariantTag.objects.filter(project_tag=project_tag)
for variant_tag in variant_tags:
variant = get_datastore(project).get_single_variant(
project.project_id,
variant_tag.family.family_id if variant_tag.family else '',
variant_tag.xpos,
variant_tag.ref,
variant_tag.alt,
)
variant_json = variant.toJSON() if variant is not None else {'xpos': variant_tag.xpos, 'ref': variant_tag.ref, 'alt': variant_tag.alt}
family_status = ''
if variant_tag.family:
family_status = status_description_map.get(variant_tag.family.analysis_status, 'unknown')
variants.append({"variant":variant_json,
"tag":project_tag.tag,
"description":project_tag.title,
"family":variant_tag.family.toJSON(),
"family_status":family_status})
return JSONResponse(variants)
@login_required
@log_request('matchmaker_individual_add')
def get_submission_candidates(request,project_id,family_id,indiv_id):
"""
Gathers submission candidate individuals from this family
Args:
individual_id: an individual ID
project_id: project this individual belongs to
Returns:
Status code
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
else:
id_map,affected_patient = get_all_clinical_data_for_family(project_id,family_id,indiv_id)
return JSONResponse({
"submission_candidate":affected_patient,
"id_map":id_map
})
@csrf_exempt
@login_required
@log_request('matchmaker_individual_add')
def add_individual(request):
"""
Adds given individual to the local database
Args:
submission information of a single patient is expected in the POST data
Returns:
Submission status information
"""
affected_patient = json.loads(request.POST.get("patient_data", "wasn't able to parse patient_data in POST!"))
seqr_id = request.POST.get("localId", "wasn't able to parse Id (as seqr knows it) in POST!")
project_id = request.POST.get("projectId", "wasn't able to parse project Id in POST!")
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=seqr_id, family__project=project.seqr_project)
submission = json.dumps({'patient':affected_patient})
validity_check=is_a_valid_patient_structure(affected_patient)
if not validity_check['status']:
return JSONResponse({
'http_result':{"message":validity_check['reason'] + ", the patient was not submitted to matchmaker"},
'status_code':400,
})
headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
result = requests.post(url=settings.MME_ADD_INDIVIDUAL_URL,
headers=headers,
data=submission)
#if successfully submitted to MME, persist info
if result.status_code==200 or result.status_code==409:
individual.mme_submitted_data = {'patient':affected_patient}
individual.mme_submitted_date = datetime.datetime.now()
individual.mme_deleted_date = None
individual.mme_deleted_by = None
individual.save()
#update the contact information store if any updates were made
updated_contact_name = affected_patient['contact']['name']
updated_contact_href = affected_patient['contact']['href']
try:
project = Project.objects.get(project_id=project_id)
update_xbrowse_model(
project,
mme_primary_data_owner=updated_contact_name,
mme_contact_url=updated_contact_href,
)
except ObjectDoesNotExist:
logger.error("ERROR: couldn't update the contact name and href of MME submission: ", updated_contact_name, updated_contact_href)
#seqr_project.save()
if result.status_code==401:
return JSONResponse({
'http_result':{"message":"sorry, authorization failed, I wasn't able to insert that individual"},
'status_code':result.status_code,
})
return JSONResponse({
'http_result':result.json(),
'status_code':result.status_code,
})
@csrf_exempt
@login_required
@log_request('matchmaker_individual_delete')
def delete_individual(request,project_id, indiv_id):
"""
Deletes a given individual from the local database
Args:
Project ID of project
Individual ID of a single patient to delete
Returns:
Delete confirmation
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
#find the latest ID that was used in submission which might defer from seqr ID
matchbox_id=indiv_id
if individual.mme_submitted_date:
if individual.mme_deleted_date:
return JSONResponse({"status_code":402,"message":"that individual has already been deleted"})
else:
matchbox_id = individual.mme_submitted_data['patient']['id']
logger.info("using matchbox ID: %s" % (matchbox_id))
payload = {"id":matchbox_id}
result = requests.delete(url=settings.MME_DELETE_INDIVIDUAL_URL,
headers=headers,
data=json.dumps(payload))
#if successfully deleted from matchbox/MME, persist that detail
if result.status_code == 200:
deleted_date = datetime.datetime.now()
individual.mme_deleted_date = deleted_date
individual.mme_deleted_by = request.user
individual.save()
return JSONResponse({"status_code":result.status_code,"message":result.text, 'deletion_date':str(deleted_date)})
else:
return JSONResponse({"status_code":404,"message":result.text})
return JSONResponse({"status_code":result.status_code,"message":result.text})
@login_required
@log_request('matchmaker_family_submissions')
def get_family_submissions(request,project_id,family_id):
"""
Gets the last 4 submissions for this family
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
else:
family = get_object_or_404(Family, project=project, family_id=family_id)
family_submissions=[]
family_members_submitted=[]
for individual in family.individual_set.filter(seqr_individual__mme_submitted_date__isnull=False):
family_submissions.append({'submitted_data': individual.seqr_individual.mme_submitted_data,
'hpo_details': extract_hpo_id_list_from_mme_patient_struct(individual.seqr_individual.mme_submitted_data),
'seqr_id': individual.indiv_id,
'family_id': family_id,
'project_id': project_id,
'insertion_date': individual.seqr_individual.mme_submitted_date.strftime("%b %d %Y %H:%M:%S"),
'deletion': individual.seqr_individual.mme_deleted_date,
})
family_members_submitted.append(individual.indiv_id)
#TODO: figure out when more than 1 indi for a family. For now returning a list. Eventually
#this must be the latest submission for every indiv in a family
return JSONResponse({
"family_submissions":family_submissions,
"family_members_submitted":family_members_submitted
})
@login_required
@csrf_exempt
@log_request('match_internally_and_externally')
def match_internally_and_externally(request,project_id,indiv_id):
"""
Looks for matches for the given individual. Expects a single patient (MME spec) in the POST
data field under key "patient_data"
Args:
project_id,indiv_id and POST all data in POST under key "patient_data"
Returns:
Status code and results
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
patient_data = request.POST.get("patient_data")
if patient_data is None:
r = HttpResponse("wasn't able to parse patient data field in POST!",status=400)
return r
#find details on HPO terms and start aggregating in a map to send back with reply
hpo_map={}
extract_hpo_id_list_from_mme_patient_struct(json.loads(patient_data),hpo_map)
headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
results={}
#first look in the local MME database
internal_result = requests.post(url=settings.MME_LOCAL_MATCH_URL,
headers=headers,
data=patient_data
)
ids={}
for internal_res in internal_result.json().get('results',[]):
ids[internal_res['patient']['id']] = internal_res
extract_hpo_id_list_from_mme_patient_struct(internal_res,hpo_map)
results['local_results']={"result":internal_result.json(),
"status_code":internal_result.status_code
}
#then externally (unless turned off)
if settings.SEARCH_IN_EXTERNAL_MME_NODES:
extnl_result = requests.post(url=settings.MME_EXTERNAL_MATCH_URL,
headers=headers,
data=patient_data
)
results['external_results']={"result":extnl_result.json(),
"status_code":str(extnl_result.status_code)
}
for ext_res in extnl_result.json().get('results',[]):
extract_hpo_id_list_from_mme_patient_struct(ext_res,hpo_map)
ids[ext_res['patient']['id']] = ext_res
saved_results = {
result.result_data['patient']['id']: result for result in MatchmakerResult.objects.filter(individual=individual)
}
result_analysis_state={}
for id in ids.keys():
persisted_result_det = saved_results.get(id)
if not persisted_result_det:
persisted_result_det = MatchmakerResult.objects.create(
individual=individual,
result_data=ids[id],
last_modified_by=request.user,
)
result_analysis_state[id] = {
"id_of_indiv_searched_with":indiv_id,
"content_of_indiv_searched_with":json.loads(patient_data),
"content_of_result":ids[id],
"result_id":id,
"we_contacted_host":persisted_result_det.we_contacted,
"host_contacted_us":persisted_result_det.host_contacted,
"seen_on":str(persisted_result_det.created_date),
"deemed_irrelevant":persisted_result_det.deemed_irrelevant,
"comments":persisted_result_det.comments or '',
"seqr_project_id":project_id,
"flag_for_analysis":persisted_result_det.flag_for_analysis,
"username_of_last_event_initiator":persisted_result_det.last_modified_by.username,
}
#post to slack
if settings.SLACK_TOKEN is not None:
generate_slack_notification_for_seqr_match(results,project_id,indiv_id)
return JSONResponse({
"match_results":results,
"result_analysis_state":result_analysis_state,
"hpo_map":hpo_map
})
@login_required
@csrf_exempt
@log_request('match_internally_and_externally')
def match_in_open_mme_sources(request,project_id,indiv_id):
"""
Match in other MME data sources that are open and not toke protected (ex: Monarch)
Args:
project_id,indiv_id and POST all data in POST under key "patient_data"
Returns:
Status code and results
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
patient_data = request.POST.get("patient_data")
if patient_data is None:
r = HttpResponse("wasn't able to parse patient data field in POST!",status=400)
return r
#find details on HPO terms and start aggregating in a map to send back with reply
hpo_map={}
extract_hpo_id_list_from_mme_patient_struct(json.loads(patient_data),hpo_map)
#these open sites require no token
headers={
'X-Auth-Token': '',
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
results={}
open_sites = {'Monarch Initiative':'https://mme.monarchinitiative.org/match'} #todo: put into settings
for open_site_name, open_site_url in open_sites.iteritems():
results_back = requests.post(url=open_site_url,
headers=headers,
data=patient_data)
ids={}
for res in results_back.json().get('results',[]):
ids[res['patient']['id']] = res
extract_hpo_id_list_from_mme_patient_struct(res,hpo_map)
results[open_site_name]={"result":results_back.json(),
"status_code":results_back.status_code
}
return JSONResponse({
"match_results":results,
"hpo_map":hpo_map
})
@login_required
@csrf_exempt
@log_request('get_project_individuals')
def get_project_individuals(request,project_id):
"""
Get a list of individuals with their family IDs of this project
Args:
project_id
Returns:
map of individuals and their family
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
indivs=[]
for indiv in project.get_individuals():
strct={'guid':indiv.id}
for k,v in indiv.to_dict().iteritems():
if k not in ['phenotypes']:
strct[k] = v
indivs.append(strct)
return JSONResponse({
"individuals":indivs
})
@login_required
@csrf_exempt
@log_request('get_family_individuals')
def get_family_individuals(request,project_id,family_id):
"""
Get a list of individuals belongint to this family IDs
Args:
project_id
family_id
Returns:
map of individuals in this family
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
indivs=[]
for indiv in project.get_individuals():
if indiv.to_dict()['family_id'] == family_id:
strct={'guid':indiv.id}
for k,v in indiv.to_dict().iteritems():
if k not in ['phenotypes']:
strct[k] = v
indivs.append(strct)
return JSONResponse({
"individuals":indivs
})
@staff_member_required(login_url=LOGIN_URL)
@log_request('matchmaker_get_matchbox_id_details')
def get_matchbox_id_details(request,matchbox_id):
"""
Gets information of this matchbox_id
"""
match_individuals = SeqrIndividual.objects.filter(mme_submitted_data__patient__id=matchbox_id)
records = []
for individual in match_individuals:
record = {
'seqr_id':individual.individual_id,
'family_id':individual.family.family_id,
'project_id':individual.family.project.deprecated_project_id,
'insertion_date':str(individual.mme_submitted_date)}
genomicFatures = []
for g_feature in individual.mme_submitted_data['patient']['genomicFeatures']:
genomicFatures.append({'gene_id': g_feature['gene']['id'],
'variant_start': g_feature['variant']['start'],
'variant_end': g_feature['variant']['end']})
record['submitted_genomic_features'] = genomicFatures
features = []
for feature in individual.mme_submitted_data['patient']['features']:
id = feature['id']
label = ''
if feature.has_key('label'):
label = feature['label']
features.append({'id': id,
'label': label}),
record['submitted_features'] = features
records.append(record)
return JSONResponse({
'submission_records':records
})
@staff_member_required(login_url=LOGIN_URL)
@log_request('matchmaker_get_matchbox_metrics')
def get_matchbox_metrics(request):
"""
Gets matchbox metrics
"""
mme_headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
r = requests.get(url=settings.MME_MATCHBOX_METRICS_URL,
headers=mme_headers)
if r.status_code==200:
matchbox_metrics = r.json()['metrics']
genes_in_matchbox=matchbox_metrics['geneCounts'].keys()
seqr__gene_info = gather_all_annotated_genes_in_seqr()
seqr_metrics={"genes_in_seqr":len(seqr__gene_info),
"genes_found_in_matchbox":0}
unique_genes=[]
for gene_ids,proj in seqr__gene_info.iteritems():
if gene_ids[0] in genes_in_matchbox:
unique_genes.append(gene_ids[0])
seqr_metrics['genes_found_in_matchbox'] = len(set(unique_genes))
seqr_metrics["submission_info"]=find_projects_with_families_in_matchbox()
return JSONResponse({"from_matchbox":r.json(),
"from_seqr":seqr_metrics})
else:
resp = HttpResponse('{"message":"error contacting matchbox to gain metrics", "status":' + r.status_code + '}',status=r.status_code)
resp.status_code=r.status_code
return resp
@login_required
@log_request('matchmaker_get_matchbox_metrics')
def get_matchbox_metrics_for_project(request,project_id):
"""
Gets matchbox submission metrics for project (accessible to non-staff)
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
try:
return JSONResponse({"families":find_families_of_this_project_in_matchbox(project_id)})
except:
raise
@login_required
@csrf_exempt
@log_request('update_match_comment')
def update_match_comment(request,project_id,match_id,indiv_id):
"""
Update a comment made about a match
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
parse_json_error_mesg="wasn't able to parse POST!"
comment = request.POST.get("comment",parse_json_error_mesg)
if comment == parse_json_error_mesg:
return HttpResponse('{"message":"' + parse_json_error_mesg +'"}',status=500)
persisted_result_dets = MatchmakerResult.objects.filter(individual=individual, result_data__patient__id=match_id)
if persisted_result_dets.count()>0:
for persisted_result_det in persisted_result_dets:
persisted_result_det.comments=comment.strip()
persisted_result_det.last_modified_by=request.user
persisted_result_det.save()
resp = HttpResponse('{"message":"OK"}',status=200)
return resp
else:
raise
return HttpResponse('{"message":"error updating database"}',status=500)
@staff_member_required(login_url=LOGIN_URL)
@csrf_exempt
@log_request('get_current_match_state_of_all_results')
def get_current_match_state_of_all_results(request):
"""
gets the current state of all matches in this project
"""
return HttpResponse('{"message":"error unimplemented MME endpoint"}',status=500)
@login_required
@csrf_exempt
@log_request('get_current_match_state')
def get_current_match_state(request,project_id,match_id,indiv_id):
"""
gets the current state of this matched pair
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
try:
result_model = MatchmakerResult.objects.filter(individual=individual, result_data__patient__id=match_id).first()
persisted_result_det = {
"id_of_indiv_searched_with":indiv_id,
"content_of_result":result_model.result_data,
"result_id":result_model.result_data['patient']['id'],
"we_contacted_host":result_model.we_contacted,
"host_contacted_us":result_model.host_contacted,
"seen_on":str(result_model.created_date),
"deemed_irrelevant":result_model.deemed_irrelevant,
"comments":result_model.comments or '',
"seqr_project_id":project_id,
"flag_for_analysis":result_model.flag_for_analysis,
"username_of_last_event_initiator":result_model.last_modified_by.username,
}
except Exception as e:
print e
return HttpResponse('{"message":"error talking to database"}',status=500)
return JSONResponse(persisted_result_det)
@login_required
@csrf_exempt
@log_request('match_state_update')
def match_state_update(request,project_id,match_id,indiv_id):
"""
Update a state change made about a match
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
state_type = request.POST.get('state_type', None)
state = request.POST.get('state',None)
if state_type is None or state is None:
return HttpResponse('{"message":"error parsing POST"}',status=500)
persisted_result_det = MatchmakerResult.objects.filter(individual=individual, result_data__patient__id=match_id).first()
try:
if state_type == 'flag_for_analysis':
persisted_result_det.flag_for_analysis=False
if state == "true":
persisted_result_det.flag_for_analysis=True
if state_type == 'deemed_irrelevant':
persisted_result_det.deemed_irrelevant=False
if state == "true":
persisted_result_det.deemed_irrelevant=True
if state_type == 'we_contacted_host':
persisted_result_det.we_contacted=False
if state == "true":
persisted_result_det.we_contacted=True
if state_type == 'host_contacted_us':
persisted_result_det.host_contacted=False
if state == "true":
persisted_result_det.host_contacted=True
persisted_result_det.last_modified_by=request.user
persisted_result_det.save()
except:
raise
return HttpResponse('{"message":"error updating database"}',status=500)
return HttpResponse('{"message":"successfully updated database"}',status=200)
| agpl-3.0 |
dvklopfenstein/PrincetonAlgorithms | py/AlgsSedgewickWayne/Topological.py | 1 | 2002 | """Compute topological ordering(w DFS) of a DAG or edge-weighted DAG. Runs in O(E + V) time."""
# TBD Finish Python port
from AlgsSedgewickWayne.DirectedCycle import DirectedCycle
from AlgsSedgewickWayne.DepthFirstOrder import DepthFirstOrder
from AlgsSedgewickWayne.EdgeWeightedDigraph import EdgeWeightedDigraph
from AlgsSedgewickWayne.EdgeWeightedDirectedCycle import EdgeWeightedDirectedCycle
class Topological(object):
"""Determines if digraph G has a topological order and, if so, finds topological order."""
def __init__(self, G): # G is Digraph O(V+E) wc
finder = DirectedCycle(G)
if not finder.hasCycle():
dfs = DepthFirstOrder(G)
self._order = dfs.reversePost() # topological order
self._rank = [] # rank[v] = position of vertex v in topological order
i = 0
for v in self. order:
self._rank[v] = i
i += 1
def Topological(EdgeWeightedDigraph G): # EdgeWeightedDigraph
"""Determines if digraph G has a topological order and, if so, finds topological order."""
EdgeWeightedDirectedCycle finder = new EdgeWeightedDirectedCycle(G)
if not finder.hasCycle():
dfs = DepthFirstOrder(G)
order = dfs.reversePost()
# Returns a topological order if the digraph has a topologial order, None otherwise
def order(self): return self._order # O(V)
# Does the digraph have a topological order?
def hasOrder(self): return self._order is not None # O(k)
def rank(self, v): # O(k)
"""The the rank of vertex v in the topological order; -1 if the digraph is not a DAG."""
self._validateVertex(v)
if self.hasOrder(): return self._rank[v]
else: return -1
def _validateVertex(self, v):
"""raise an IndexOutOfBoundsException unless 0 <= v < V."""
V = len(self._rank)
if v < 0 or v >= V:
raise Exception("vertex {} is not between 0 and {}".format(v, (V-1))
# Copyright 2002-2016, Robert Sedgewick and Kevin Wayne.
# Copyright 2002-2019, DV Klopfenstein, Python port
| gpl-2.0 |
antepsis/anteplahmacun | sympy/external/tests/test_codegen.py | 73 | 11832 | # This tests the compilation and execution of the source code generated with
# utilities.codegen. The compilation takes place in a temporary directory that
# is removed after the test. By default the test directory is always removed,
# but this behavior can be changed by setting the environment variable
# SYMPY_TEST_CLEAN_TEMP to:
# export SYMPY_TEST_CLEAN_TEMP=always : the default behavior.
# export SYMPY_TEST_CLEAN_TEMP=success : only remove the directories of working tests.
# export SYMPY_TEST_CLEAN_TEMP=never : never remove the directories with the test code.
# When a directory is not removed, the necessary information is printed on
# screen to find the files that belong to the (failed) tests. If a test does
# not fail, py.test captures all the output and you will not see the directories
# corresponding to the successful tests. Use the --nocapture option to see all
# the output.
# All tests below have a counterpart in utilities/test/test_codegen.py. In the
# latter file, the resulting code is compared with predefined strings, without
# compilation or execution.
# All the generated Fortran code should conform with the Fortran 95 standard,
# and all the generated C code should be ANSI C, which facilitates the
# incorporation in various projects. The tests below assume that the binary cc
# is somewhere in the path and that it can compile ANSI C code.
from __future__ import print_function
from sympy.abc import x, y, z
from sympy.utilities.pytest import skip
from sympy.utilities.codegen import codegen, make_routine, get_code_generator
import sys
import os
import tempfile
import subprocess
# templates for the main program that will test the generated code.
main_template = {}
main_template['F95'] = """
program main
include "codegen.h"
integer :: result;
result = 0
%(statements)s
call exit(result)
end program
"""
main_template['C'] = """
#include "codegen.h"
#include <stdio.h>
#include <math.h>
int main() {
int result = 0;
%(statements)s
return result;
}
"""
# templates for the numerical tests
numerical_test_template = {}
numerical_test_template['C'] = """
if (fabs(%(call)s)>%(threshold)s) {
printf("Numerical validation failed: %(call)s=%%e threshold=%(threshold)s\\n", %(call)s);
result = -1;
}
"""
numerical_test_template['F95'] = """
if (abs(%(call)s)>%(threshold)s) then
write(6,"('Numerical validation failed:')")
write(6,"('%(call)s=',e15.5,'threshold=',e15.5)") %(call)s, %(threshold)s
result = -1;
end if
"""
# command sequences for supported compilers
compile_commands = {}
compile_commands['cc'] = [
"cc -c codegen.c -o codegen.o",
"cc -c main.c -o main.o",
"cc main.o codegen.o -lm -o test.exe"
]
compile_commands['gfortran'] = [
"gfortran -c codegen.f90 -o codegen.o",
"gfortran -ffree-line-length-none -c main.f90 -o main.o",
"gfortran main.o codegen.o -o test.exe"
]
compile_commands['g95'] = [
"g95 -c codegen.f90 -o codegen.o",
"g95 -ffree-line-length-huge -c main.f90 -o main.o",
"g95 main.o codegen.o -o test.exe"
]
compile_commands['ifort'] = [
"ifort -c codegen.f90 -o codegen.o",
"ifort -c main.f90 -o main.o",
"ifort main.o codegen.o -o test.exe"
]
combinations_lang_compiler = [
('C', 'cc'),
('F95', 'ifort'),
('F95', 'gfortran'),
('F95', 'g95')
]
def try_run(commands):
"""Run a series of commands and only return True if all ran fine."""
null = open(os.devnull, 'w')
for command in commands:
retcode = subprocess.call(command, stdout=null, shell=True,
stderr=subprocess.STDOUT)
if retcode != 0:
return False
return True
def run_test(label, routines, numerical_tests, language, commands, friendly=True):
"""A driver for the codegen tests.
This driver assumes that a compiler ifort is present in the PATH and that
ifort is (at least) a Fortran 90 compiler. The generated code is written in
a temporary directory, together with a main program that validates the
generated code. The test passes when the compilation and the validation
run correctly.
"""
# Check input arguments before touching the file system
language = language.upper()
assert language in main_template
assert language in numerical_test_template
# Check that evironment variable makes sense
clean = os.getenv('SYMPY_TEST_CLEAN_TEMP', 'always').lower()
if clean not in ('always', 'success', 'never'):
raise ValueError("SYMPY_TEST_CLEAN_TEMP must be one of the following: 'always', 'success' or 'never'.")
# Do all the magic to compile, run and validate the test code
# 1) prepare the temporary working directory, switch to that dir
work = tempfile.mkdtemp("_sympy_%s_test" % language, "%s_" % label)
oldwork = os.getcwd()
os.chdir(work)
# 2) write the generated code
if friendly:
# interpret the routines as a name_expr list and call the friendly
# function codegen
codegen(routines, language, "codegen", to_files=True)
else:
code_gen = get_code_generator(language, "codegen")
code_gen.write(routines, "codegen", to_files=True)
# 3) write a simple main program that links to the generated code, and that
# includes the numerical tests
test_strings = []
for fn_name, args, expected, threshold in numerical_tests:
call_string = "%s(%s)-(%s)" % (
fn_name, ",".join(str(arg) for arg in args), expected)
if language == "F95":
call_string = fortranize_double_constants(call_string)
threshold = fortranize_double_constants(str(threshold))
test_strings.append(numerical_test_template[language] % {
"call": call_string,
"threshold": threshold,
})
if language == "F95":
f_name = "main.f90"
elif language == "C":
f_name = "main.c"
else:
raise NotImplementedError(
"FIXME: filename extension unknown for language: %s" % language)
with open(f_name, "w") as f:
f.write(
main_template[language] % {'statements': "".join(test_strings)})
# 4) Compile and link
compiled = try_run(commands)
# 5) Run if compiled
if compiled:
executed = try_run(["./test.exe"])
else:
executed = False
# 6) Clean up stuff
if clean == 'always' or (clean == 'success' and compiled and executed):
def safe_remove(filename):
if os.path.isfile(filename):
os.remove(filename)
safe_remove("codegen.f90")
safe_remove("codegen.c")
safe_remove("codegen.h")
safe_remove("codegen.o")
safe_remove("main.f90")
safe_remove("main.c")
safe_remove("main.o")
safe_remove("test.exe")
os.chdir(oldwork)
os.rmdir(work)
else:
print("TEST NOT REMOVED: %s" % work, file=sys.stderr)
os.chdir(oldwork)
# 7) Do the assertions in the end
assert compiled, "failed to compile %s code with:\n%s" % (
language, "\n".join(commands))
assert executed, "failed to execute %s code from:\n%s" % (
language, "\n".join(commands))
def fortranize_double_constants(code_string):
"""
Replaces every literal float with literal doubles
"""
import re
pattern_exp = re.compile('\d+(\.)?\d*[eE]-?\d+')
pattern_float = re.compile('\d+\.\d*(?!\d*d)')
def subs_exp(matchobj):
return re.sub('[eE]', 'd', matchobj.group(0))
def subs_float(matchobj):
return "%sd0" % matchobj.group(0)
code_string = pattern_exp.sub(subs_exp, code_string)
code_string = pattern_float.sub(subs_float, code_string)
return code_string
def is_feasible(language, commands):
# This test should always work, otherwise the compiler is not present.
routine = make_routine("test", x)
numerical_tests = [
("test", ( 1.0,), 1.0, 1e-15),
("test", (-1.0,), -1.0, 1e-15),
]
try:
run_test("is_feasible", [routine], numerical_tests, language, commands,
friendly=False)
return True
except AssertionError:
return False
valid_lang_commands = []
invalid_lang_compilers = []
for lang, compiler in combinations_lang_compiler:
commands = compile_commands[compiler]
if is_feasible(lang, commands):
valid_lang_commands.append((lang, commands))
else:
invalid_lang_compilers.append((lang, compiler))
# We test all language-compiler combinations, just to report what is skipped
def test_C_cc():
if ("C", 'cc') in invalid_lang_compilers:
skip("`cc' command didn't work as expected")
def test_F95_ifort():
if ("F95", 'ifort') in invalid_lang_compilers:
skip("`ifort' command didn't work as expected")
def test_F95_gfortran():
if ("F95", 'gfortran') in invalid_lang_compilers:
skip("`gfortran' command didn't work as expected")
def test_F95_g95():
if ("F95", 'g95') in invalid_lang_compilers:
skip("`g95' command didn't work as expected")
# Here comes the actual tests
def test_basic_codegen():
numerical_tests = [
("test", (1.0, 6.0, 3.0), 21.0, 1e-15),
("test", (-1.0, 2.0, -2.5), -2.5, 1e-15),
]
name_expr = [("test", (x + y)*z)]
for lang, commands in valid_lang_commands:
run_test("basic_codegen", name_expr, numerical_tests, lang, commands)
def test_intrinsic_math1_codegen():
# not included: log10
from sympy import acos, asin, atan, ceiling, cos, cosh, floor, log, ln, \
sin, sinh, sqrt, tan, tanh, N
name_expr = [
("test_fabs", abs(x)),
("test_acos", acos(x)),
("test_asin", asin(x)),
("test_atan", atan(x)),
("test_cos", cos(x)),
("test_cosh", cosh(x)),
("test_log", log(x)),
("test_ln", ln(x)),
("test_sin", sin(x)),
("test_sinh", sinh(x)),
("test_sqrt", sqrt(x)),
("test_tan", tan(x)),
("test_tanh", tanh(x)),
]
numerical_tests = []
for name, expr in name_expr:
for xval in 0.2, 0.5, 0.8:
expected = N(expr.subs(x, xval))
numerical_tests.append((name, (xval,), expected, 1e-14))
for lang, commands in valid_lang_commands:
if lang == "C":
name_expr_C = [("test_floor", floor(x)), ("test_ceil", ceiling(x))]
else:
name_expr_C = []
run_test("intrinsic_math1", name_expr + name_expr_C,
numerical_tests, lang, commands)
def test_instrinsic_math2_codegen():
# not included: frexp, ldexp, modf, fmod
from sympy import atan2, N
name_expr = [
("test_atan2", atan2(x, y)),
("test_pow", x**y),
]
numerical_tests = []
for name, expr in name_expr:
for xval, yval in (0.2, 1.3), (0.5, -0.2), (0.8, 0.8):
expected = N(expr.subs(x, xval).subs(y, yval))
numerical_tests.append((name, (xval, yval), expected, 1e-14))
for lang, commands in valid_lang_commands:
run_test("intrinsic_math2", name_expr, numerical_tests, lang, commands)
def test_complicated_codegen():
from sympy import sin, cos, tan, N
name_expr = [
("test1", ((sin(x) + cos(y) + tan(z))**7).expand()),
("test2", cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))),
]
numerical_tests = []
for name, expr in name_expr:
for xval, yval, zval in (0.2, 1.3, -0.3), (0.5, -0.2, 0.0), (0.8, 2.1, 0.8):
expected = N(expr.subs(x, xval).subs(y, yval).subs(z, zval))
numerical_tests.append((name, (xval, yval, zval), expected, 1e-12))
for lang, commands in valid_lang_commands:
run_test(
"complicated_codegen", name_expr, numerical_tests, lang, commands)
| bsd-3-clause |
CoolProp/CoolProp | dev/scripts/viscosity_builder.py | 2 | 3895 | from math import sqrt, exp
from CoolProp.CoolProp import Props
import numpy as np
import matplotlib.pyplot as plt
from scipy.odr import *
from math import log
E_K = {'REFPROP-Ammonia': 386,
'REFPROP-Argon': 143.2
}
SIGMA = {'REFPROP-Ammonia': 0.2957,
'REFPROP-Argon': 0.335
}
E_K['REFPROP-Propane'] = 263.88
SIGMA['REFPROP-Propane'] = 0.49748
E_K['REFPROP-R32'] = 289.65
SIGMA['REFPROP-R32'] = 0.4098
E_K['REFPROP-R245fa'] = 329.72
SIGMA['REFPROP-R245fa'] = 0.5529
def viscosity_dilute(fluid, T, e_k, sigma):
"""
T in [K], e_k in [K], sigma in [nm]
viscosity returned is in [Pa-s]
"""
Tstar = T / e_k
molemass = Props(fluid, 'molemass')
if fluid == 'Propane' or fluid == 'REFPROP-Propane':
a = [0.25104574, -0.47271238, 0, 0.060836515, 0]
theta_star = exp(a[0] * pow(log(Tstar), 0) + a[1] * pow(log(Tstar), 1) + a[3] * pow(log(Tstar), 3));
eta_star = 0.021357 * sqrt(molemass * T) / (pow(sigma, 2) * theta_star) / 1e6;
return eta_star
# From Neufeld, 1972, Journal of Chemical Physics - checked coefficients
OMEGA_2_2 = 1.16145 * pow(Tstar, -0.14874) + 0.52487 * exp(-0.77320 * Tstar) + 2.16178 * exp(-2.43787 * Tstar)
# Using the leading constant from McLinden, 2000 since the leading term from Huber 2003 gives crazy values
eta_star = 26.692e-3 * sqrt(molemass * T) / (pow(sigma, 2) * OMEGA_2_2) / 1e6
return eta_star
def viscosity_linear(fluid, T, rho, e_k, sigma):
"""
Implements the method of Vogel 1998 (Propane) for the linear part
"""
N_A = 6.02214129e23
molemass = Props(fluid, 'molemass')
Tstar = T / e_k
b = [-19.572881, 219.73999, -1015.3226, 2471.01251, -3375.1717, 2491.6597, -787.26086, 14.085455, -0.34664158]
s = sum([b[i] * pow(Tstar, -0.25 * i) for i in range(7)])
B_eta_star = s + b[7] * pow(Tstar, -2.5) + b[8] * pow(Tstar, -5.5) # //[no units]
B_eta = N_A * pow(sigma / 1e9, 3) * B_eta_star # [m3/mol]
return viscosity_dilute(fluid, T, e_k, sigma) * B_eta * rho / molemass * 1000
from PDSim.misc.datatypes import Collector
RHO = Collector()
TT = Collector()
DELTA = Collector()
TAU = Collector()
VV = Collector()
VV0 = Collector()
VV1 = Collector()
VVH = Collector()
fluid = 'REFPROP-R32'
Tc = Props(fluid, 'Tcrit')
rhoc = Props(fluid, 'rhocrit')
for T in np.linspace(290, Props(fluid, 'Tcrit') - 0.1, 100):
rhoV = Props('D', 'T', T, 'Q', 1, fluid)
rhoL = Props('D', 'T', T, 'Q', 0, fluid)
rhomax = Props('D', 'T', Props(fluid, 'Tmin'), 'Q', 0, fluid)
for rho in list(np.linspace(rhoL, rhomax, 100)): # +list(np.linspace(rhoV,0.0001,100)):
# for rho in list(np.linspace(rhoV,0.0001,100)):
mu_0 = viscosity_dilute(fluid, T, E_K[fluid], SIGMA[fluid])
mu_1 = viscosity_linear(fluid, T, rho, E_K[fluid], SIGMA[fluid])
mu = Props('V', 'T', T, 'D', rho, fluid)
VV << mu
VV0 << mu_0
VV1 << mu_1
VVH << mu - mu_0 - mu_1
TT << T
RHO << rho
DELTA << rho / rhoc
TAU << Tc / T
def f_RHS(E, DELTA_TAU, VV):
k = 0
sum = 0
DELTA = DELTA_TAU[0, :]
TAU = DELTA_TAU[1, :]
for i in range(2, 5):
for j in range(3):
sum += E[k] * DELTA**i / TAU**j
k += 1
# f1,f2,f3,g1,g2 = E[k],E[k+1],E[k+2],E[k+3],E[k+4]
# DELTA0 = g1*(1+g2*np.sqrt(TAU))
# sum += (f1+f2/TAU+f3/TAU/TAU)*(DELTA/(DELTA0-DELTA)-DELTA/DELTA0)
print('%s %%' % np.mean(np.abs(((sum / VV - 1) * 100))))
return sum
log_muH = np.log(VVH.v().T)
x = np.c_[DELTA.v().T, TAU.v().T].T
y = VVH.v()
linear = Model(f_RHS, extra_args=(y,))
mydata = Data(x, y)
myodr = ODR(mydata, linear, beta0=np.array([0.1] * 17),)
myoutput = myodr.run()
E = myoutput.beta
print(E)
#plt.plot(TT.vec, y,'b.',TT.vec, f_RHS(E, x, y),'r.')
# plt.show()
# plt.plot()
plt.plot(y.T, f_RHS(E, x, y))
plt.show()
| mit |
gaddman/ansible | lib/ansible/plugins/action/voss_config.py | 63 | 4227 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=to_text(exc))
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in list(result.keys()):
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
WizeCommerce/medusa | setup.py | 1 | 1292 | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "thrift_medusa",
version = "0.0.1",
author = "Samir Faci",
author_email = "",
description = ("Language agnostic tool for packaging of thrift based services and artifacts"),
license = "Apache Software License",
url = "https://github.com/WizeCommerce/medusa",
packages=['thrift_medusa', 'tests'],
#packages = find_packages(exclude="test"),
package_data = {'': ['*.yaml']},
long_description=read('README.md'),
install_requires=['lxml','paramiko','argparse','pyyaml','jinja2'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
],
#entry_points = { 'console_scripts': ['medusa = thrift_medusa:main', 'samir = thrift_medusa.thrift_medusa:main'] },
#scripts = ['./publishClients.py'],
test_suite='tests',
zip_safe = True
)
| apache-2.0 |
sanyaade-teachings/turbulenz_engine | scripts/buildassets.py | 5 | 24091 | #!/usr/bin/env python
# Copyright (c) 2013-2014 Turbulenz Limited
# pylint:disable=C0330
from sys import argv, stdout
from json import loads as load_json, dumps as dump_json
from yaml import load as load_yaml
from os.path import join as path_join, exists as path_exists, splitext, basename, normpath, getmtime, dirname
from os import makedirs, listdir, environ, unlink as remove_file, getenv as os_getenv, walk as os_walk, rmdir
from shutil import copy2 as copy_file
from hashlib import md5
from base64 import urlsafe_b64encode
from subprocess import Popen, PIPE, STDOUT
from platform import system, machine
from threading import Thread, Lock
from time import sleep
import multiprocessing
import argparse
import errno
import platform
COLORED_OUTPUT = stdout.isatty() and (platform.system() != 'Windows' or 'ANSICON' in environ)
def warning(message):
if COLORED_OUTPUT:
print '\033[1m\033[33m[WARNING]\033[0m - %s' % message
else:
print '[WARNING] - %s' % message
def error(message):
if COLORED_OUTPUT:
print '\033[1m\033[31m[ERROR]\033[0m - %s' % message
else:
print ' >> [ERROR] - %s' % message
# pylint: disable=W0231
class CalledProcessError(Exception):
def __init__(self, retcode, cmd, output=None):
self.retcode = retcode
self.cmd = cmd
self.output = output
def __str__(self):
cmd = self.cmd
if isinstance(cmd, list):
cmd = ' '.join(cmd)
return "Command '%s' returned non-zero exit status %d" % (cmd, self.retcode)
# pylint: enable=W0231
# pylint: disable=C0103
def sh(command, cwd=None, env=None, verbose=True, console=False, shell=False):
command_list = command
command_string = ' '.join(command)
if verbose:
print 'Executing: %s' % command_string
try:
if console:
process = Popen(command_list, stderr=STDOUT, cwd=cwd, shell=shell, env=env)
else:
process = Popen(command_list, stdout=PIPE, stderr=STDOUT, cwd=cwd, shell=shell, env=env)
except OSError:
raise CalledProcessError(1, command_list, output='Could not start process')
output, _ = process.communicate()
output = str(output)
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, command_list, output=output)
if output is not None:
output = output.rstrip()
return output
# pylint: enable=C0103
def get_file_hash(path):
with open(path, 'r') as f:
m = md5()
m.update(f.read())
digest = m.digest()
return urlsafe_b64encode(digest).rstrip('=')
class Source(object):
def __init__(self, path, assets_paths, old_hash=None):
self.path = path
for p in assets_paths:
self.asset_path = path_join(p, path)
if path_exists(self.asset_path):
break
else:
raise IOError('Source asset path not found for %s' % self.path)
if old_hash:
self.hash = old_hash
self.hash_checked = False
self.changed = None
else:
self.hash = self.calculate_hash()
self.hash_checked = True
self.changed = True
self.built = False
def has_changed(self):
if self.hash_checked:
return self.changed
else:
new_hash = self.calculate_hash()
self.changed = (new_hash != self.hash)
self.hash = new_hash
self.hash_checked = True
return self.changed
def calculate_hash(self):
return get_file_hash(self.asset_path)
class SourceList(object):
def __init__(self, source_hashes, assets_paths):
self.assets_paths = assets_paths
source_list = {}
for (path, file_hash) in source_hashes.iteritems():
source_list[path] = Source(path, assets_paths, file_hash)
self.source_list = source_list
def get_source(self, path):
if path not in self.source_list:
self.source_list[path] = Source(path, self.assets_paths)
return self.source_list[path]
def get_hashes(self):
return dict((k, v.hash) for (k, v) in self.source_list.iteritems())
class Tool(object):
def __init__(self, name, path):
super(Tool, self).__init__()
self.name = name
self.path = path
self.changed = None
def check_version(self, build_path, verbose=False):
version_file_path = path_join(build_path, self.name + '.version')
try:
with open(version_file_path, 'r') as f:
old_version = f.read()
except IOError:
old_version = None
version = self.get_version(version_file_path)
self.changed = (version != old_version)
if verbose and version and self.changed:
print self.name + ' tool version changed ' + version
return self.changed
def get_version(self, version_file_path):
raise NotImplementedError()
def has_changed(self):
if self.changed is None:
raise ValueError('Tool %s has not called check_version' % self.name)
else:
return self.changed
@staticmethod
def run_sh(cmd, verbose):
try:
sh(cmd, verbose=verbose)
return True
except CalledProcessError as e:
error('command %s failed\n%s' % (' '.join(e.cmd), e.output))
raise
# pylint: disable=R0201
def check_external_deps(self, src, dst, args):
return False
# pylint: enable=R0201
class CopyTool(object):
def __init__(self, name='copy', path=None):
self.name = name
@staticmethod
def run(src, dst, verbose=False, args=None):
if verbose:
print 'Copy ' + src + ' -> ' + dst
copy_file(src, dst)
return True
@staticmethod
def check_version(build_path, verbose=False):
return False
@staticmethod
def has_changed():
return False
@staticmethod
def check_external_deps(src, dst, args):
return False
class Tga2Json(Tool):
def get_version(self, version_file_path):
try:
version_string = sh([self.path, '--version'], verbose=False)
except CalledProcessError:
warning('could not launch ImageMagick, TGA support will not be available')
return None
version = version_string.splitlines()[0]
with open(version_file_path, 'w') as f:
f.write(version)
return version
def run(self, src, dst, verbose=False, args=None):
cmd = [self.path, '-quality', '105', src, dst]
if args:
cmd.extend(args)
return self.run_sh(cmd, verbose=verbose)
class PythonTool(Tool):
def __init__(self, name, path=None, module_name=None):
if module_name:
self.base_args = ['python', '-m', module_name]
else:
self.base_args = ['python', self.path]
super(PythonTool, self).__init__(name, path)
def get_version(self, version_file_path):
cmd = self.base_args[:]
cmd.extend(['--version', '-o', version_file_path])
return sh(cmd, verbose=False)
def run(self, src, dst, verbose=False, args=None):
cmd = self.base_args[:]
cmd.extend(['-i', src, '-o', dst])
if args:
cmd.extend(args)
return self.run_sh(cmd, verbose=verbose)
class Dae2Json(PythonTool):
def __init__(self, name, path=None, module_name=None, nvtristrip=None):
super(Dae2Json, self).__init__(name, path, module_name)
self.nvtristrip = nvtristrip
def get_version(self, version_file_path):
cmd = self.base_args[:]
cmd.extend(['--version', '-o', version_file_path])
return sh(cmd, verbose=False)
def run(self, src, dst, verbose=False, args=None):
cmd = self.base_args[:]
cmd.extend(['-i', src, '-o', dst])
if self.nvtristrip:
cmd.extend(['--nvtristrip', self.nvtristrip])
if args:
cmd.extend(args)
return self.run_sh(cmd, verbose=verbose)
class Cgfx2JsonTool(Tool):
def __init__(self, name, path, cgfx_flags):
super(Cgfx2JsonTool, self).__init__(name, path)
self.cgfx_flags = cgfx_flags or []
def get_version(self, version_file_path):
try:
version = sh([self.path, '--version'], verbose=False)
except CalledProcessError:
error('could not launch cgfx2json, CGFX support will be unavailable.')
return None
with open(version_file_path, 'w') as f:
f.write(version)
return version
def run(self, src, dst, verbose=False, args=None):
cmd = [self.path, '-i', src, '-o', dst]
if args:
cmd.extend(args)
cmd.extend(self.cgfx_flags)
if verbose:
print "CMD: %s" % " ".join(cmd)
return self.run_sh(cmd, verbose=verbose)
def check_external_deps(self, src, dst, args):
cmd = [self.path, '-i', src, '-M']
try:
dep_files = sh(cmd, verbose=False)
except CalledProcessError as e:
error('deps command %s failed ignoring external deps\n%s' % (' '.join(e.cmd), e.output))
return False
if not dep_files:
return False
dst_mtime = getmtime(dst)
for filename in dep_files.replace('\r\n', '\n').split('\n'):
if getmtime(filename) > dst_mtime:
return True
class Tools(object):
def __init__(self, args, build_path):
exe = ''
system_name = system()
if system_name == 'Linux':
if 'x86_64' == machine():
turbulenz_os = 'linux64'
else:
turbulenz_os = 'linux32'
elif system_name == 'Windows':
turbulenz_os = 'win32'
# if 'x86' == machine():
# turbulenz_os = 'win32'
# else:
# turbulenz_os = 'win64'
exe = '.exe'
elif system_name == 'Darwin':
turbulenz_os = 'macosx'
root = args.root
verbose = args.verbose
if args.imagemagick_convert:
imagemagick_convert_path = args.imagemagick_convert
else:
if system_name == 'Windows':
default_convert_path = path_join(root, 'external', 'ImageMagick', 'bin', 'win32', 'convert.exe')
else:
default_convert_path = 'convert'
imagemagick_convert_path = os_getenv('TURBULENZ_IMAGEMAGICK_CONVERT', default_convert_path)
nvtristrip = path_join(root, 'tools', 'bin', turbulenz_os, 'NvTriStripper' + exe)
copy = CopyTool()
tga2png = Tga2Json('tga2png', imagemagick_convert_path)
dae2json = Dae2Json('dae2json', module_name='turbulenz_tools.tools.dae2json', nvtristrip=nvtristrip)
obj2json = PythonTool('obj2json', module_name='turbulenz_tools.tools.obj2json')
material2json = PythonTool('material2json', module_name='turbulenz_tools.tools.material2json')
bmfont2json = PythonTool('bmfont2json', module_name='turbulenz_tools.tools.bmfont2json')
cgfx2json = Cgfx2JsonTool( \
'cgfx2json',
path_join(root, 'tools', 'bin', turbulenz_os, 'cgfx2json' + exe),
args.cgfx_flag
)
copy.check_version(build_path, verbose)
tga2png.check_version(build_path, verbose)
dae2json.check_version(build_path, verbose)
obj2json.check_version(build_path, verbose)
material2json.check_version(build_path, verbose)
bmfont2json.check_version(build_path, verbose)
cgfx2json.check_version(build_path, verbose)
self.asset_tool_map = {
'.png': copy,
'.dds': copy,
'.jpg': copy,
'.ogg': copy,
'.wav': copy,
'.mp3': copy,
'.m4a': copy,
'.aac': copy,
'.mp4': copy,
'.m4v': copy,
'.webm': copy,
'.json': copy,
'.tar': copy,
'.tga': tga2png,
'.dae': dae2json,
'.obj': obj2json,
'.material': material2json,
'.bmfont': bmfont2json,
'.fnt': bmfont2json,
'.cgfx': cgfx2json
}
self.asset_dst_ext = {
'.dae': '.dae.json',
'.obj': '.obj.json',
'.material': '.material.json',
'.fnt': '.fnt.json',
'.cgfx': '.cgfx.json',
'.tga': '.tga.png'
}
def get_asset_tool(self, path):
ext = splitext(path)[1]
try:
return self.asset_tool_map[ext]
except KeyError:
error('No tool registered for file extension %s' % ext)
exit(1)
def get_asset_destination(self, path):
path_split = splitext(path)
try:
return path_split[0] + self.asset_dst_ext[path_split[1]]
except KeyError:
return path
class AssetInfo(object):
def __init__(self, yaml_info):
if isinstance(yaml_info, str):
path = normpath(yaml_info)
self.path = path
self.deps = [path]
self.install = True
self.args = None
self.logical_path = yaml_info
else:
self.path = normpath(yaml_info['path'])
deps = yaml_info.get('deps', [])
self.deps = [normpath(d) for d in deps]
self.deps.append(self.path)
self.install = yaml_info.get('install', True)
self.args = yaml_info.get('args')
self.logical_path = yaml_info.get('logical_path', yaml_info['path'])
self.build_path = None
def build_asset(tool, asset_path, dst_path, verbose, args):
try:
create_dir(dirname(dst_path))
tool.run(asset_path, dst_path, verbose, args)
except:
if path_exists(dst_path):
remove_file(dst_path)
raise
def check_and_build_asset(asset_info, source_list, tools, build_path, verbose):
src = asset_info.path
asset_tool = tools.get_asset_tool(src)
dst_path = path_join(build_path, tools.get_asset_destination(src))
asset_info.build_path = dst_path
create_dir(dirname(dst_path))
source = source_list.get_source(src)
deps = [source_list.get_source(path) for path in asset_info.deps]
if any([dep.has_changed() for dep in deps]) or asset_tool.has_changed() or not path_exists(dst_path) \
or asset_tool.check_external_deps(source.asset_path, dst_path, asset_info.args):
stdout.write('[%s] %s\n' % (asset_tool.name.upper(), src))
# asset_tool.run(source.asset_path, dst_path, verbose, asset_info.args)
build_asset(asset_tool,
source.asset_path,
dst_path,
verbose,
asset_info.args)
source.built = True
return True
else:
source.built = True
return False
def install(install_asset_info, install_path):
old_install_files = listdir(install_path)
mapping = {}
for asset_info in install_asset_info:
if not asset_info.install:
continue
try:
file_hash = get_file_hash(asset_info.build_path)
logical_path = asset_info.logical_path
physical_path = '%s_%s.%s' % (splitext(basename(logical_path))[0],
file_hash,
asset_info.build_path.split('.', 1)[1])
copy_file(asset_info.build_path, path_join(install_path, physical_path))
mapping[logical_path] = physical_path
try:
old_install_files.remove(physical_path)
except ValueError:
pass
except (IOError, TypeError):
error('could not install %s' % asset_info.path)
for path in old_install_files:
asset_install_path = path_join(install_path, path)
print 'Removing old install file ' + asset_install_path
remove_file(asset_install_path)
return mapping
def remove_old_build_files(build_asset_info, build_path):
old_build_files = []
exludes = [
path_join(build_path, 'sourcehashes.json'),
path_join(build_path, 'cgfx2json.version'),
path_join(build_path, 'json2json.version'),
path_join(build_path, 'obj2json.version'),
path_join(build_path, 'tga2png.version'),
path_join(build_path, 'bmfont2json.version'),
path_join(build_path, 'dae2json.version'),
path_join(build_path, 'material2json.version')
]
for base, _, files in os_walk(build_path):
dir_files = [path_join(base, filename) for filename in files]
old_build_files.extend(f for f in dir_files if f not in exludes)
for asset_info in build_asset_info:
try:
old_build_files.remove(asset_info.build_path)
except ValueError:
pass
for path in old_build_files:
print 'Removing old build file ' + path
remove_file(path)
for base, _, _ in os_walk(build_path, topdown=False):
try:
rmdir(base)
except OSError:
pass
else:
print 'Removed old build directory ' + base
def create_dir(path):
try:
makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--root', required=True, help="Root path of Turbulenz Engine")
parser.add_argument('--assets-path', required=True, action='append', help="Path to root of source assets")
parser.add_argument('--build-path', default=path_join('_build', 'assets'), help="Path for intermediate build files")
parser.add_argument('--install-path', default='staticmax', help="Path to install output assets into")
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--imagemagick-convert', help="Path to ImageMagick convert executable (enables TGA support)")
parser.add_argument('--cgfx-flag', action='append',
help="argument to pass to cgfx2json tool")
try:
default_num_threads = multiprocessing.cpu_count()
except NotImplementedError:
default_num_threads = 1
parser.add_argument('-j', '--num-threads', help="Specify how many threads to use for building",
default=default_num_threads, type=int)
args = parser.parse_args(argv[1:])
assets_paths = [normpath(p) for p in args.assets_path]
base_build_path = normpath(args.build_path)
create_dir(base_build_path)
create_dir(args.install_path)
tools = Tools(args, base_build_path)
with open('deps.yaml', 'r') as f:
asset_build_info = load_yaml(f.read())
if asset_build_info:
asset_build_info = [AssetInfo(asset_info) for asset_info in asset_build_info]
else:
asset_build_info = []
try:
with open(path_join(base_build_path, 'sourcehashes.json'), 'r') as f:
source_list = SourceList(load_json(f.read()), assets_paths)
except IOError:
if args.verbose:
print 'No source hash file'
source_list = SourceList({}, assets_paths)
# Ensure all sources are in the source list so that the threads aren't writing to the list
for a in asset_build_info:
source_list.get_source(a.path)
class AssetBuildThread(Thread):
def __init__(self, asset_list, asset_list_mutex):
Thread.__init__(self)
self.asset_list = asset_list
self.mutex = asset_list_mutex
self.assets_rebuilt = 0
self.exit = False
self.error = None
def run(self):
while True:
if self.exit:
return 0
self.mutex.acquire(True)
try:
# Try and pull the head off the list and if all it's dependencies are already built then
# build it. This could iterate down the remaining list in case the head isn't buildable but
# things later in the list are
asset_info = self.asset_list[0]
deps = [source_list.get_source(path) for path in asset_info.deps if path != asset_info.path]
if any([not d.built for d in deps]):
self.mutex.release()
sleep(0.01)
continue
self.asset_list.pop(0)
self.mutex.release()
except IndexError:
self.mutex.release()
return 0
try:
rebuild = check_and_build_asset(asset_info,
source_list,
tools,
base_build_path,
args.verbose)
except CalledProcessError as e:
self.error = '%s - Tool failed - %s' % (asset_info.path, str(e))
return 1
except IOError as e:
self.error = str(e)
return 1
if rebuild:
self.assets_rebuilt += 1
num_threads = args.num_threads
# Sort assets by dependencies
assets_to_build = []
while len(assets_to_build) != len(asset_build_info):
num_assets_sorted = len(assets_to_build)
for asset in asset_build_info:
if asset in assets_to_build:
continue
for dep in asset.deps:
if dep != asset.path and dep not in [a.path for a in assets_to_build]:
break
else:
assets_to_build.append(asset)
if num_assets_sorted == len(assets_to_build):
assets_left = [a for a in asset_build_info if a not in assets_to_build]
error('Detected cyclic dependencies between assets within - \n%s' %
'\n'.join([a.path for a in assets_left]))
return 1
# Create and start threads to build the assets in the sorted dependency list
asset_threads = []
asset_list_mutex = Lock()
for t in xrange(num_threads):
asset_threads.append(AssetBuildThread(assets_to_build, asset_list_mutex))
for t in xrange(num_threads):
asset_threads[t].start()
while any(a.isAlive() for a in asset_threads):
for t in xrange(num_threads):
asset_threads[t].join(0.1)
if not asset_threads[t].isAlive() and asset_threads[t].error:
# One thread has an error ask all the others to finish asap
for o in xrange(num_threads):
asset_threads[o].exit = True
# Update the stats on number of assets rebuilt
assets_rebuilt = 0
for t in xrange(num_threads):
assets_rebuilt += asset_threads[t].assets_rebuilt
# Dump the state of the build for partial rebuilds
with open(path_join(base_build_path, 'sourcehashes.json'), 'w') as f:
f.write(dump_json(source_list.get_hashes()))
# Check if any build threads failed and if so exit with an error
for t in xrange(num_threads):
if asset_threads[t].error:
error(asset_threads[t].error)
return 1
# Dump the mapping table for the built assets
print 'Installing assets and building mapping table...'
mapping = install(asset_build_info, args.install_path)
with open('mapping_table.json', 'w') as f:
f.write(dump_json({'urnmapping': mapping}))
# Cleanup any built files no longer referenced by the new mapping table
remove_old_build_files(asset_build_info, base_build_path)
print '%d assets rebuilt' % assets_rebuilt
print 'Assets build complete'
if __name__ == "__main__":
exit(main())
| mit |
erjohnso/ansible | lib/ansible/modules/cloud/ovirt/ovirt_snapshots_facts.py | 73 | 4381 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_snapshots_facts
short_description: Retrieve facts about one or more oVirt/RHV virtual machine snapshots
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV virtual machine snapshots."
notes:
- "This module creates a new top-level C(ovirt_snapshots) fact, which
contains a list of snapshots."
options:
vm:
description:
- "Name of the VM with snapshot."
required: true
description:
description:
- "Description of the snapshot, can be used as glob expression."
snapshot_id:
description:
- "Id of the snaphost we want to retrieve facts about."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all snapshots which description start with C(update) for VM named C(centos7):
- ovirt_snapshots_facts:
vm: centos7
description: update*
- debug:
var: ovirt_snapshots
'''
RETURN = '''
ovirt_snapshots:
description: "List of dictionaries describing the snapshot. Snapshot attribtues are mapped to dictionary keys,
all snapshot attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
search_by_name,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
vm=dict(required=True),
description=dict(default=None),
snapshot_id=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
vms_service = connection.system_service().vms_service()
vm_name = module.params['vm']
vm = search_by_name(vms_service, vm_name)
if vm is None:
raise Exception("VM '%s' was not found." % vm_name)
snapshots_service = vms_service.service(vm.id).snapshots_service()
if module.params['description']:
snapshots = [
e for e in snapshots_service.list()
if fnmatch.fnmatch(e.description, module.params['description'])
]
elif module.params['snapshot_id']:
snapshots = [
snapshots_service.snapshot_service(module.params['snapshot_id']).get()
]
else:
snapshots = snapshots_service.list()
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_snapshots=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in snapshots
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 |
pllim/stginga | experimental/plugins/MultiImage.py | 2 | 16207 | import logging
from math import cos, hypot, radians
from ginga import GingaPlugin
from ginga.gw import Widgets, Viewers
instructions = (
'To add images to the group, simply ensure that the plugin is active'
'and display the image in the main viewer.'
'\n\nThen move, drag, or edit the region as needed.'
'\n\nThe WCS options select the common frame of reference to use between'
' images.'
)
__all__ = ['MultiImage', 'Region']
_radius_scale = cos(radians(45))
_def_coords = 'wcs'
_coords_options = (
('wcs', 'Fix region to sky'),
('data', 'Fix region to pixels')
)
class RegionError(Exception):
"""Generic Region errors"""
class RegionConversionError(RegionError):
"""Could not convert between coordinates"""
class Region(object):
"""Region management
Attributes
----------
x, y, r: numbers
The x, y center location with radius r
coord: str
The coordinate system in use.
"""
def __init__(self, *args, **kwargs):
super(Region, self).__init__()
self.logger = kwargs.pop('logger', None)
if self.logger is None:
self.logger = logging.getLogger(__name__)
self.x = None
self.y = None
self.r = None
self.coord = None
self.image = None
if len(args) > 0 or len(kwargs) > 0:
self.set(*args, **kwargs)
def __call__(self, coord=None, image=None):
"""Return the region in the specified coordinates
Parameters
----------
coord: str
The coordinate system to return in.
image: `ginga image`
The reference image if conversion is needed.
Returns
------
(x, y, r)
The center point and radius of the region.
"""
self.logger.debug('Called.')
convert = self.get_convert(to_coord=coord, image=image)
dx, dy = self.delta()
cx, cy = convert(self.x, self.y)
cx1, cy1 = convert(
self.x + dx,
self.y + dy
)
cr = hypot(cx1 - cx, cy1 - cy)
return (cx, cy, cr)
def set(self, x, y, r, coord, as_coord=None, image=None):
"""Set the region's center and radius
Parameters
----------
x, y, r: numbers
The center point and radius
coord: str
The coordinate system of the input numbers.
as_coord: str
The native coordinate system to use.
image: `ginga image`
The reference image.
"""
self.logger.debug('Called.')
self.x = x
self.y = y
self.r = r
self.coord = coord
if coord != as_coord:
self.x, self.y, self.r = self(coord=as_coord, image=image)
self.coord = as_coord
if image is not None:
self.image = image
def set_center(self, x, y, coord=None, image=None):
self.logger.debug('Called.')
convert = self.get_convert(from_coord=coord, image=image)
self.x, self.y = convert(x, y)
def set_bbox(self, x1, y1, x2, y2, coord=None, image=None):
convert = self.get_convert(from_coord=coord, image=image)
cx1, cy1 = convert(x1, y1)
cx2, cy2 = convert(x2, y2)
self.x = (cx1 + cx2) / 2
self.y = (cy1 + cy2) / 2
self.r = hypot(cx2 - self.x, cy2 - self.y)
def set_coords(self, coord, image=None):
self.x, self.y, self.r = self(coord=coord, image=image)
self.coord = coord
if image is not None:
self.image = image
def bbox(self, coord=None, image=None):
self.logger.debug('Called.')
convert = self.get_convert(to_coord=coord, image=image)
dx, dy = self.delta()
x1, y1 = convert(self.x - dx, self.y - dy)
x2, y2 = convert(self.x + dx, self.y + dy)
(x1, x2) = (x1, x2) if x1 <= x2 else (x2, x1)
(y1, y2) = (y1, y2) if y1 <= y2 else (y2, y1)
return (x1, y1, x2, y2)
def delta(self):
delta = self.r * _radius_scale
return (delta, delta)
def get_convert(self, from_coord=None, to_coord=None, image=None):
from_coord = self.coord if from_coord is None else from_coord
to_coord = self.coord if to_coord is None else to_coord
image = image if image is not None else self.image
if from_coord == to_coord:
return lambda x, y: (x, y)
elif image is None:
raise RegionConversionError(
'No reference specified for conversion'
)
if to_coord == 'wcs':
convert = image.pixtoradec
else:
convert = image.radectopix
return convert
class MultiImage(GingaPlugin.LocalPlugin):
"""Coordinate display between multiple images"""
def __init__(self, fv, fitsimage):
super(MultiImage, self).__init__(fv, fitsimage)
self.logger.debug('Called.')
self.dc = self.fv.getDrawClasses()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype('rectangle', color='cyan', linestyle='dash',
coord='wcs', drawdims=True)
canvas.set_callback('draw-event', self.draw_cb)
canvas.set_callback('edit-event', self.edit_cb)
canvas.add_draw_mode('move', down=self.btndown,
move=self.drag, up=self.update)
canvas.register_for_cursor_drawing(self.fitsimage)
canvas.setSurface(self.fitsimage)
canvas.set_draw_mode('move')
self.canvas = canvas
self.id_count = 0 # Create unique ids
self.layertag = 'muimg-canvas'
self.region = None
self.images = {}
self.pstamps = None
def build_gui(self, container):
"""Build the Dialog"""
self.logger.debug('Called.')
# Setup for options
vbox, sw, orientation = Widgets.get_oriented_box(container)
vbox.set_border_width(4)
vbox.set_spacing(2)
# Instructions
self.msgFont = self.fv.getFont("sansFont", 12)
tw = Widgets.TextArea(wrap=True, editable=False)
tw.set_font(self.msgFont)
self.tw = tw
fr = Widgets.Expander("Instructions")
fr.set_widget(tw)
# Mode administration
modes = Widgets.Frame('Region Editing')
mode = self.canvas.get_draw_mode()
hbox = Widgets.HBox()
hbox.set_border_width(4)
btn1 = Widgets.RadioButton("Move")
btn1.set_state(mode == 'move')
btn1.add_callback(
'activated',
lambda w, val: self.set_mode_cb('move', val)
)
btn1.set_tooltip("Choose this to position region")
self.w.btn_move = btn1
hbox.add_widget(btn1)
btn2 = Widgets.RadioButton("Draw", group=btn1)
btn2.set_state(mode == 'draw')
btn2.add_callback(
'activated',
lambda w, val: self.set_mode_cb('draw', val)
)
btn2.set_tooltip("Choose this to draw a replacement region")
self.w.btn_draw = btn2
hbox.add_widget(btn2)
btn3 = Widgets.RadioButton("Edit", group=btn1)
btn3.set_state(mode == 'edit')
btn3.add_callback(
'activated',
lambda w, val: self.set_mode_cb('edit', val)
)
btn3.set_tooltip("Choose this to edit a region")
self.w.btn_edit = btn3
hbox.add_widget(btn3)
hbox.add_widget(Widgets.Label(''), stretch=1)
modes.set_widget(hbox)
# Coordinates
coords = Widgets.Frame('WCS Reference')
hbox = Widgets.HBox()
hbox.set_border_width(4)
hbox.set_spacing(4)
for option, tooltip in _coords_options:
btn = Widgets.RadioButton(option)
btn.set_state(option == _def_coords)
btn.add_callback(
'activated',
lambda widget, state, option=option: self.set_coords(
option, state)
)
btn.set_tooltip(tooltip)
hbox.add_widget(btn)
hbox.add_widget(Widgets.Label(''), stretch=1)
coords.set_widget(hbox)
# Basic plugin admin buttons
btns = Widgets.HBox()
btns.set_spacing(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btns.add_widget(Widgets.Label(''), stretch=1)
# Layout the options
vbox.add_widget(fr, stretch=0)
vbox.add_widget(coords, stretch=0)
vbox.add_widget(modes, stretch=0)
# Layout top level framing
vtop = Widgets.VBox()
vtop.set_border_width(4)
vtop.add_widget(sw, stretch=1) # Magic: sw contains vbox
vtop.add_widget(btns, stretch=0)
# Options completed.
container.add_widget(vtop, stretch=1)
# Postage stamps
if self.pstamps is not None:
return
pstamps_frame = self.fv.w['pstamps']
self.pstamps_show = False
pstamps = Widgets.HBox()
w = pstamps.get_widget()
self.logger.debug(f'layout="{pstamps_frame.get_widget().layout()}"')
self.logger.debug(f'pstamps.w="{w}"')
w.setMinimumHeight(100)
pstamps_frame.add_widget(pstamps)
self.pstamps = pstamps
self.pstamps_frame = pstamps_frame
def instructions(self):
self.tw.set_text(instructions)
self.tw.set_font(self.msgFont)
def start(self):
self.logger.debug('Called.')
self.instructions()
# insert layer if it is not already
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.getObjectByTag(self.layertag)
except KeyError:
# Add canvas layer
p_canvas.add(self.canvas, tag=self.layertag)
self.show_pstamps(True)
def resume(self):
self.logger.debug('Called.')
self.canvas.ui_setActive(True)
self.fv.showStatus("Draw a region to examine.")
self.redo()
def redo(self):
self.logger.debug('Called.')
fi_image = self.fitsimage.get_image()
if fi_image is None:
return
try:
fi_image_id = fi_image.get('path')
except Exception:
raise
fi_image_id = self.make_id()
try:
_, pstamp = self.images[fi_image_id]
except KeyError:
pstamp = self.add_pstamp()
self.images[fi_image_id] = (fi_image, pstamp)
self.fitsimage.copy_attributes(pstamp,
['transforms', 'cutlevels',
'rgbmap'])
# Ensure region is accurately reflected on displayed image.
if self.region is None:
self.init_region()
self.region.image = fi_image
self.draw_region(finalize=True)
# Loop through all images.
for image_id, (image, pstamp) in self.images.items():
x1, y1, x2, y2 = self.region.bbox(coord='data', image=image)
x1, y1, x2, y2, data = self.cutdetail(image,
int(x1), int(y1),
int(x2), int(y2))
pstamp.set_data(data)
def stop(self):
self.logger.debug('Called.')
try:
obj = self.canvas.getObjectByTag(self.pstag)
except Exception:
"""Ignore"""
else:
self.canvas.delete_objects([obj])
self.canvas.ui_setActive(False)
self.fv.showStatus("")
self.pstamps_frame.layout().removeWidget(self.pstamps.get_widget())
self.pstamps.get_widget().setParent(None)
self.pstamps = None
self.images = {}
def close(self):
self.logger.debug('Called.')
self.fv.stop_local_plugin(self.chname, str(self))
return True
def pause(self):
self.logger.debug('Called.')
self.canvas.ui_setActive(False)
def __str__(self):
return 'MultiImage'
def btndown(self, canvas, event, data_x, data_y, viewer):
self.logger.debug('Called.')
self.region.set_center(data_x, data_y, coord='data')
self.redo()
return True
def update(self, canvas, event, data_x, data_y, viewer):
self.region.set_center(data_x, data_y, coord='data')
self.redo()
return
def drag(self, canvas, event, data_x, data_y, viewer):
self.region.set_center(data_x, data_y, coord='data')
self.redo()
return True
def draw_cb(self, canvas, tag):
self.logger.debug('Called.')
obj = canvas.getObjectByTag(tag)
pt_obj = canvas.getObjectByTag(self.pstag)
if obj.kind != 'rectangle':
return True
canvas.deleteObjects([obj, pt_obj])
x1, y1, x2, y2 = obj.get_llur()
self.region.set_bbox(x1, y1, x2, y2, coord='data')
self.redo()
return True
def edit_cb(self, canvas, obj):
self.logger.debug('Called.')
pt_obj = canvas.getObjectByTag(self.pstag)
if obj != pt_obj:
return True
x1, y1, x2, y2 = pt_obj.get_llur()
self.region.set_bbox(x1, y1, x2, y2, coord='data')
self.redo()
return True
def cutdetail(self, srcimage, x1, y1, x2, y2):
data, x1, y1, x2, y2 = srcimage.cutout_adjust(x1, y1, x2, y2)
return (x1, y1, x2, y2, data)
def add_pstamp(self):
self.logger.debug('Called.')
# Setup for thumbnail display
di = Viewers.ImageViewCanvas(logger=self.logger)
# di.configure_window(100, 100)
di.set_desired_size(100, 100)
di.enable_autozoom('on')
di.add_callback('configure', self.window_resized_cb)
di.enable_autocuts('off')
di.set_bg(0.4, 0.4, 0.4)
# for debugging
di.set_name('pstamp')
iw = Widgets.wrap(di.get_widget())
self.pstamps.add_widget(iw)
return di
def draw_region(self, finalize=False, coord='data'):
"""Set the box"""
self.logger.debug('Called.')
linestyle = 'solid' if finalize else 'dash'
x1, y1, x2, y2 = self.region.bbox(coord=coord)
try:
obj = self.canvas.getObjectByTag(self.pstag)
except Exception: # Need be general due to ginga
self.pstag = self.canvas.add(
self.dc.Rectangle(x1, y1, x2, y2,
color='cyan',
linestyle=linestyle)
)
obj = self.canvas.getObjectByTag(self.pstag)
else:
obj.linestyle = linestyle
obj.x1, obj.y1 = x1, y1
obj.x2, obj.y2 = x2, y2
self.canvas.redraw(whence=3)
def make_id(self):
self.id_count += 1
return f'Image_{self.id_count:02}'
def window_resized_cb(self, fitsimage, width, height):
self.logger.debug('Called.')
fitsimage.zoom_fit()
def show_pstamps(self, show):
"""Show/hide the stamps"""
self.pstamps_frame.get_widget().setVisible(show)
def edit_region(self):
if self.pstag is not None:
obj = self.canvas.getObjectByTag(self.pstag)
if obj.kind != 'rectangle':
return True
self.canvas.edit_select(obj)
else:
self.canvas.clear_selected()
self.canvas.update_canvas()
def set_mode_cb(self, mode, tf):
if tf:
self.canvas.set_draw_mode(mode)
if mode == 'edit':
self.edit_region()
return True
def set_coords(self, coords, state):
self.logger.debug('Called.')
if state:
self.region.set_coords(coords)
def init_region(self):
image = self.fitsimage.get_image()
height, width = image.shape
x = width // 2
y = height // 2
self.region = Region(x, y, 30, 'data',
as_coord=_def_coords, image=image,
logger=self.logger)
| bsd-3-clause |
Plantain/sms-mailinglist | lib/googlecloudapis/sqladmin/v1beta1/sqladmin_v1beta1_client.py | 5 | 19012 | """Generated client library for sqladmin version v1beta1."""
from googlecloudapis.apitools.base.py import base_api
from googlecloudapis.sqladmin.v1beta1 import sqladmin_v1beta1_messages as messages
class SqladminV1beta1(base_api.BaseApiClient):
"""Generated client library for service sqladmin version v1beta1."""
MESSAGES_MODULE = messages
_PACKAGE = u'sqladmin'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/sqlservice.admin']
_VERSION = u'v1beta1'
_CLIENT_ID = ''
_CLIENT_SECRET = ''
_USER_AGENT = ''
_CLIENT_CLASS_NAME = u'SqladminV1beta1'
_URL_VERSION = u'v1beta1'
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new sqladmin handle."""
url = url or u'https://www.googleapis.com/sql/v1beta1/'
super(SqladminV1beta1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.backupRuns = self.BackupRunsService(self)
self.instances = self.InstancesService(self)
self.operations = self.OperationsService(self)
self.tiers = self.TiersService(self)
class BackupRunsService(base_api.BaseApiService):
"""Service class for the backupRuns resource."""
_NAME = u'backupRuns'
def __init__(self, client):
super(SqladminV1beta1.BackupRunsService, self).__init__(client)
self._method_configs = {
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'sql.backupRuns.get',
ordered_params=[u'project', u'instance', u'backupConfiguration', u'dueTime'],
path_params=[u'backupConfiguration', u'instance', u'project'],
query_params=[u'dueTime'],
relative_path=u'projects/{project}/instances/{instance}/backupRuns/{backupConfiguration}',
request_field='',
request_type_name=u'SqlBackupRunsGetRequest',
response_type_name=u'BackupRun',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'sql.backupRuns.list',
ordered_params=[u'project', u'instance', u'backupConfiguration'],
path_params=[u'instance', u'project'],
query_params=[u'backupConfiguration', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/instances/{instance}/backupRuns',
request_field='',
request_type_name=u'SqlBackupRunsListRequest',
response_type_name=u'BackupRunsListResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def Get(self, request, global_params=None):
"""Retrieves a resource containing information about a backup run.
Args:
request: (SqlBackupRunsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BackupRun) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Lists all backup runs associated with a given instance and configuration in the reverse chronological order of the enqueued time.
Args:
request: (SqlBackupRunsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BackupRunsListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class InstancesService(base_api.BaseApiService):
"""Service class for the instances resource."""
_NAME = u'instances'
def __init__(self, client):
super(SqladminV1beta1.InstancesService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'sql.instances.delete',
ordered_params=[u'project', u'instance'],
path_params=[u'instance', u'project'],
query_params=[],
relative_path=u'projects/{project}/instances/{instance}',
request_field='',
request_type_name=u'SqlInstancesDeleteRequest',
response_type_name=u'InstancesDeleteResponse',
supports_download=False,
),
'Export': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'sql.instances.export',
ordered_params=[u'project', u'instance'],
path_params=[u'instance', u'project'],
query_params=[],
relative_path=u'projects/{project}/instances/{instance}/export',
request_field=u'instancesExportRequest',
request_type_name=u'SqlInstancesExportRequest',
response_type_name=u'InstancesExportResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'sql.instances.get',
ordered_params=[u'project', u'instance'],
path_params=[u'instance', u'project'],
query_params=[],
relative_path=u'projects/{project}/instances/{instance}',
request_field='',
request_type_name=u'SqlInstancesGetRequest',
response_type_name=u'DatabaseInstance',
supports_download=False,
),
'Import': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'sql.instances.import',
ordered_params=[u'project', u'instance'],
path_params=[u'instance', u'project'],
query_params=[],
relative_path=u'projects/{project}/instances/{instance}/import',
request_field=u'instancesImportRequest',
request_type_name=u'SqlInstancesImportRequest',
response_type_name=u'InstancesImportResponse',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'sql.instances.insert',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}/instances',
request_field='<request>',
request_type_name=u'DatabaseInstance',
response_type_name=u'InstancesInsertResponse',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'sql.instances.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/instances',
request_field='',
request_type_name=u'SqlInstancesListRequest',
response_type_name=u'InstancesListResponse',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'sql.instances.patch',
ordered_params=[u'project', u'instance'],
path_params=[u'instance', u'project'],
query_params=[],
relative_path=u'projects/{project}/instances/{instance}',
request_field='<request>',
request_type_name=u'DatabaseInstance',
response_type_name=u'InstancesUpdateResponse',
supports_download=False,
),
'Restart': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'sql.instances.restart',
ordered_params=[u'project', u'instance'],
path_params=[u'instance', u'project'],
query_params=[],
relative_path=u'projects/{project}/instances/{instance}/restart',
request_field='',
request_type_name=u'SqlInstancesRestartRequest',
response_type_name=u'InstancesRestartResponse',
supports_download=False,
),
'RestoreBackup': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'sql.instances.restoreBackup',
ordered_params=[u'project', u'instance', u'backupConfiguration', u'dueTime'],
path_params=[u'instance', u'project'],
query_params=[u'backupConfiguration', u'dueTime'],
relative_path=u'projects/{project}/instances/{instance}/restoreBackup',
request_field='',
request_type_name=u'SqlInstancesRestoreBackupRequest',
response_type_name=u'InstancesRestoreBackupResponse',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'sql.instances.update',
ordered_params=[u'project', u'instance'],
path_params=[u'instance', u'project'],
query_params=[],
relative_path=u'projects/{project}/instances/{instance}',
request_field='<request>',
request_type_name=u'DatabaseInstance',
response_type_name=u'InstancesUpdateResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Deletes a Cloud SQL instance.
Args:
request: (SqlInstancesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(InstancesDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Export(self, request, global_params=None):
"""Exports data from a Cloud SQL instance to a Google Cloud Storage bucket as a MySQL dump file.
Args:
request: (SqlInstancesExportRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(InstancesExportResponse) The response message.
"""
config = self.GetMethodConfig('Export')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Retrieves a resource containing information about a Cloud SQL instance.
Args:
request: (SqlInstancesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DatabaseInstance) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Import(self, request, global_params=None):
"""Imports data into a Cloud SQL instance from a MySQL dump file in Google Cloud Storage.
Args:
request: (SqlInstancesImportRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(InstancesImportResponse) The response message.
"""
config = self.GetMethodConfig('Import')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new Cloud SQL instance.
Args:
request: (DatabaseInstance) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(InstancesInsertResponse) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Lists instances under a given project in the alphabetical order of the instance name.
Args:
request: (SqlInstancesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(InstancesListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates settings of a Cloud SQL instance. Caution: This is not a partial update, so you must include values for all the settings that you want to retain. For partial updates, use patch.. This method supports patch semantics.
Args:
request: (DatabaseInstance) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(InstancesUpdateResponse) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Restart(self, request, global_params=None):
"""Restarts a Cloud SQL instance.
Args:
request: (SqlInstancesRestartRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(InstancesRestartResponse) The response message.
"""
config = self.GetMethodConfig('Restart')
return self._RunMethod(
config, request, global_params=global_params)
def RestoreBackup(self, request, global_params=None):
"""Restores a backup of a Cloud SQL instance.
Args:
request: (SqlInstancesRestoreBackupRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(InstancesRestoreBackupResponse) The response message.
"""
config = self.GetMethodConfig('RestoreBackup')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates settings of a Cloud SQL instance. Caution: This is not a partial update, so you must include values for all the settings that you want to retain. For partial updates, use patch.
Args:
request: (DatabaseInstance) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(InstancesUpdateResponse) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class OperationsService(base_api.BaseApiService):
"""Service class for the operations resource."""
_NAME = u'operations'
def __init__(self, client):
super(SqladminV1beta1.OperationsService, self).__init__(client)
self._method_configs = {
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'sql.operations.get',
ordered_params=[u'project', u'instance', u'operation'],
path_params=[u'instance', u'operation', u'project'],
query_params=[],
relative_path=u'projects/{project}/instances/{instance}/operations/{operation}',
request_field='',
request_type_name=u'SqlOperationsGetRequest',
response_type_name=u'InstanceOperation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'sql.operations.list',
ordered_params=[u'project', u'instance'],
path_params=[u'instance', u'project'],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/instances/{instance}/operations',
request_field='',
request_type_name=u'SqlOperationsListRequest',
response_type_name=u'OperationsListResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def Get(self, request, global_params=None):
"""Retrieves an instance operation that has been performed on an instance.
Args:
request: (SqlOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(InstanceOperation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Lists all instance operations that have been performed on the given Cloud SQL instance in the reverse chronological order of the start time.
Args:
request: (SqlOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(OperationsListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class TiersService(base_api.BaseApiService):
"""Service class for the tiers resource."""
_NAME = u'tiers'
def __init__(self, client):
super(SqladminV1beta1.TiersService, self).__init__(client)
self._method_configs = {
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'sql.tiers.list',
ordered_params=[],
path_params=[],
query_params=[],
relative_path=u'tiers',
request_field='',
request_type_name=u'SqlTiersListRequest',
response_type_name=u'TiersListResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def List(self, request, global_params=None):
"""Lists all available service tiers for Google Cloud SQL, for example D1, D2. For related information, see Pricing.
Args:
request: (SqlTiersListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TiersListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
| apache-2.0 |
jokey2k/sentry | tests/sentry/interfaces/test_user.py | 15 | 1620 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from exam import fixture
from sentry.testutils import TestCase
from sentry.interfaces.user import User
from sentry.models import Event
class UserTest(TestCase):
@fixture
def event(self):
return mock.Mock(spec=Event())
@fixture
def interface(self):
return User.to_python(dict(
id=1,
email='lol@example.com',
favorite_color='brown',
))
def test_path(self):
assert self.interface.get_path() == 'sentry.interfaces.User'
def test_serialize_behavior(self):
assert self.interface.to_json() == {
'id': 1,
'email': 'lol@example.com',
'data': {'favorite_color': 'brown'}
}
@mock.patch('sentry.interfaces.user.render_to_string')
def test_to_html(self, render_to_string):
self.interface.to_html(self.event)
render_to_string.assert_called_once_with('sentry/partial/interfaces/user.html', {
'is_public': False,
'event': self.event,
'user_ip_address': None,
'user_id': 1,
'user_username': None,
'user_email': 'lol@example.com',
'user_data': {'favorite_color': 'brown'},
})
def test_to_html_public(self):
result = self.interface.to_html(self.event, is_public=True)
assert result == ''
def test_serialize_unserialize_behavior(self):
result = type(self.interface).to_python(self.interface.to_json())
assert result.to_json() == self.interface.to_json()
| bsd-3-clause |
sanjeevtripurari/hue | desktop/core/ext-py/pysaml2-2.4.0/example/idp2/idp.py | 29 | 37717 | #!/usr/bin/env python
import argparse
import base64
import importlib
import logging
import os
import re
import socket
import time
from Cookie import SimpleCookie
from hashlib import sha1
from urlparse import parse_qs
from saml2 import BINDING_HTTP_ARTIFACT
from saml2 import BINDING_URI
from saml2 import BINDING_PAOS
from saml2 import BINDING_SOAP
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_HTTP_POST
from saml2 import server
from saml2 import time_util
from saml2.authn import is_equal
from saml2.authn_context import AuthnBroker
from saml2.authn_context import PASSWORD
from saml2.authn_context import UNSPECIFIED
from saml2.authn_context import authn_context_class_ref
from saml2.httputil import Response
from saml2.httputil import NotFound
from saml2.httputil import geturl
from saml2.httputil import get_post
from saml2.httputil import Redirect
from saml2.httputil import Unauthorized
from saml2.httputil import BadRequest
from saml2.httputil import ServiceError
from saml2.ident import Unknown
from saml2.metadata import create_metadata_string
from saml2.profile import ecp
from saml2.s_utils import rndstr
from saml2.s_utils import exception_trace
from saml2.s_utils import UnknownPrincipal
from saml2.s_utils import UnsupportedBinding
from saml2.s_utils import PolicyError
from saml2.sigver import verify_redirect_signature
from saml2.sigver import encrypt_cert_from_item
from idp_user import USERS
from idp_user import EXTRA
from mako.lookup import TemplateLookup
logger = logging.getLogger("saml2.idp")
logger.setLevel(logging.WARNING)
class Cache(object):
def __init__(self):
self.user2uid = {}
self.uid2user = {}
def _expiration(timeout, tformat="%a, %d-%b-%Y %H:%M:%S GMT"):
"""
:param timeout:
:param tformat:
:return:
"""
if timeout == "now":
return time_util.instant(tformat)
elif timeout == "dawn":
return time.strftime(tformat, time.gmtime(0))
else:
# validity time should match lifetime of assertions
return time_util.in_a_while(minutes=timeout, format=tformat)
# -----------------------------------------------------------------------------
def dict2list_of_tuples(d):
return [(k, v) for k, v in d.items()]
# -----------------------------------------------------------------------------
class Service(object):
def __init__(self, environ, start_response, user=None):
self.environ = environ
logger.debug("ENVIRON: %s" % environ)
self.start_response = start_response
self.user = user
def unpack_redirect(self):
if "QUERY_STRING" in self.environ:
_qs = self.environ["QUERY_STRING"]
return dict([(k, v[0]) for k, v in parse_qs(_qs).items()])
else:
return None
def unpack_post(self):
_dict = parse_qs(get_post(self.environ))
logger.debug("unpack_post:: %s" % _dict)
try:
return dict([(k, v[0]) for k, v in _dict.items()])
except Exception:
return None
def unpack_soap(self):
try:
query = get_post(self.environ)
return {"SAMLRequest": query, "RelayState": ""}
except Exception:
return None
def unpack_either(self):
if self.environ["REQUEST_METHOD"] == "GET":
_dict = self.unpack_redirect()
elif self.environ["REQUEST_METHOD"] == "POST":
_dict = self.unpack_post()
else:
_dict = None
logger.debug("_dict: %s" % _dict)
return _dict
def operation(self, saml_msg, binding):
logger.debug("_operation: %s" % saml_msg)
if not (saml_msg and 'SAMLRequest' in saml_msg):
resp = BadRequest('Error parsing request or no request')
return resp(self.environ, self.start_response)
else:
# saml_msg may also contain Signature and SigAlg
if "Signature" in saml_msg:
kwargs = {"signature": saml_msg["signature"],
"sigalg": saml_msg["SigAlg"]}
else:
kwargs = {}
try:
_encrypt_cert = encrypt_cert_from_item(
saml_msg["req_info"].message)
return self.do(saml_msg["SAMLRequest"], binding,
saml_msg["RelayState"],
encrypt_cert=_encrypt_cert, **kwargs)
except KeyError:
# Can live with no relay state
return self.do(saml_msg["SAMLRequest"], binding,
saml_msg["RelayState"], **kwargs)
def artifact_operation(self, saml_msg):
if not saml_msg:
resp = BadRequest("Missing query")
return resp(self.environ, self.start_response)
else:
# exchange artifact for request
request = IDP.artifact2message(saml_msg["SAMLart"], "spsso")
try:
return self.do(request, BINDING_HTTP_ARTIFACT,
saml_msg["RelayState"])
except KeyError:
return self.do(request, BINDING_HTTP_ARTIFACT)
def response(self, binding, http_args):
resp = None
if binding == BINDING_HTTP_ARTIFACT:
resp = Redirect()
elif http_args["data"]:
resp = Response(http_args["data"], headers=http_args["headers"])
else:
for header in http_args["headers"]:
if header[0] == "Location":
resp = Redirect(header[1])
if not resp:
resp = ServiceError("Don't know how to return response")
return resp(self.environ, self.start_response)
def do(self, query, binding, relay_state="", encrypt_cert=None):
pass
def redirect(self):
""" Expects a HTTP-redirect request """
_dict = self.unpack_redirect()
return self.operation(_dict, BINDING_HTTP_REDIRECT)
def post(self):
""" Expects a HTTP-POST request """
_dict = self.unpack_post()
return self.operation(_dict, BINDING_HTTP_POST)
def artifact(self):
# Can be either by HTTP_Redirect or HTTP_POST
_dict = self.unpack_either()
return self.artifact_operation(_dict)
def soap(self):
"""
Single log out using HTTP_SOAP binding
"""
logger.debug("- SOAP -")
_dict = self.unpack_soap()
logger.debug("_dict: %s" % _dict)
return self.operation(_dict, BINDING_SOAP)
def uri(self):
_dict = self.unpack_either()
return self.operation(_dict, BINDING_SOAP)
def not_authn(self, key, requested_authn_context):
ruri = geturl(self.environ, query=False)
kwargs = dict(authn_context=requested_authn_context, key=key, redirect_uri=ruri)
# Clear cookie, if it already exists
kaka = delete_cookie(self.environ, "idpauthn")
if kaka:
kwargs["headers"] = [kaka]
return do_authentication(self.environ, self.start_response, **kwargs)
# -----------------------------------------------------------------------------
REPOZE_ID_EQUIVALENT = "uid"
FORM_SPEC = """<form name="myform" method="post" action="%s">
<input type="hidden" name="SAMLResponse" value="%s" />
<input type="hidden" name="RelayState" value="%s" />
</form>"""
# -----------------------------------------------------------------------------
# === Single log in ====
# -----------------------------------------------------------------------------
class AuthenticationNeeded(Exception):
def __init__(self, authn_context=None, *args, **kwargs):
Exception.__init__(*args, **kwargs)
self.authn_context = authn_context
class SSO(Service):
def __init__(self, environ, start_response, user=None):
Service.__init__(self, environ, start_response, user)
self.binding = ""
self.response_bindings = None
self.resp_args = {}
self.binding_out = None
self.destination = None
self.req_info = None
self.op_type = ""
def verify_request(self, query, binding):
"""
:param query: The SAML query, transport encoded
:param binding: Which binding the query came in over
"""
resp_args = {}
if not query:
logger.info("Missing QUERY")
resp = Unauthorized('Unknown user')
return resp_args, resp(self.environ, self.start_response)
if not self.req_info:
self.req_info = IDP.parse_authn_request(query, binding)
logger.info("parsed OK")
_authn_req = self.req_info.message
logger.debug("%s" % _authn_req)
try:
self.binding_out, self.destination = IDP.pick_binding(
"assertion_consumer_service",
bindings=self.response_bindings,
entity_id=_authn_req.issuer.text, request=_authn_req)
except Exception as err:
logger.error("Couldn't find receiver endpoint: %s" % err)
raise
logger.debug("Binding: %s, destination: %s" % (self.binding_out,
self.destination))
resp_args = {}
try:
resp_args = IDP.response_args(_authn_req)
_resp = None
except UnknownPrincipal as excp:
_resp = IDP.create_error_response(_authn_req.id,
self.destination, excp)
except UnsupportedBinding as excp:
_resp = IDP.create_error_response(_authn_req.id,
self.destination, excp)
return resp_args, _resp
def do(self, query, binding_in, relay_state="", encrypt_cert=None):
"""
:param query: The request
:param binding_in: Which binding was used when receiving the query
:param relay_state: The relay state provided by the SP
:param encrypt_cert: Cert to use for encryption
:return: A response
"""
try:
resp_args, _resp = self.verify_request(query, binding_in)
except UnknownPrincipal as excp:
logger.error("UnknownPrincipal: %s" % (excp,))
resp = ServiceError("UnknownPrincipal: %s" % (excp,))
return resp(self.environ, self.start_response)
except UnsupportedBinding as excp:
logger.error("UnsupportedBinding: %s" % (excp,))
resp = ServiceError("UnsupportedBinding: %s" % (excp,))
return resp(self.environ, self.start_response)
if not _resp:
identity = USERS[self.user].copy()
# identity["eduPersonTargetedID"] = get_eptid(IDP, query, session)
logger.info("Identity: %s" % (identity,))
if REPOZE_ID_EQUIVALENT:
identity[REPOZE_ID_EQUIVALENT] = self.user
try:
try:
metod = self.environ["idp.authn"]
except KeyError:
pass
else:
resp_args["authn"] = metod
_resp = IDP.create_authn_response(
identity, userid=self.user,
encrypt_cert=encrypt_cert,
**resp_args)
except Exception as excp:
logging.error(exception_trace(excp))
resp = ServiceError("Exception: %s" % (excp,))
return resp(self.environ, self.start_response)
logger.info("AuthNResponse: %s" % _resp)
if self.op_type == "ecp":
kwargs = {"soap_headers": [
ecp.Response(
assertion_consumer_service_url=self.destination)]}
else:
kwargs = {}
http_args = IDP.apply_binding(self.binding_out,
"%s" % _resp, self.destination,
relay_state, response=True, **kwargs)
logger.debug("HTTPargs: %s" % http_args)
return self.response(self.binding_out, http_args)
@staticmethod
def _store_request(saml_msg):
logger.debug("_store_request: %s" % saml_msg)
key = sha1(saml_msg["SAMLRequest"]).hexdigest()
# store the AuthnRequest
IDP.ticket[key] = saml_msg
return key
def redirect(self):
""" This is the HTTP-redirect endpoint """
logger.info("--- In SSO Redirect ---")
saml_msg = self.unpack_redirect()
try:
_key = saml_msg["key"]
saml_msg = IDP.ticket[_key]
self.req_info = saml_msg["req_info"]
del IDP.ticket[_key]
except KeyError:
try:
self.req_info = IDP.parse_authn_request(saml_msg["SAMLRequest"],
BINDING_HTTP_REDIRECT)
except KeyError:
resp = BadRequest("Message signature verification failure")
return resp(self.environ, self.start_response)
_req = self.req_info.message
if "SigAlg" in saml_msg and "Signature" in saml_msg:
# Signed request
issuer = _req.issuer.text
_certs = IDP.metadata.certs(issuer, "any", "signing")
verified_ok = False
for cert in _certs:
if verify_redirect_signature(saml_msg, cert):
verified_ok = True
break
if not verified_ok:
resp = BadRequest("Message signature verification failure")
return resp(self.environ, self.start_response)
if self.user:
if _req.force_authn is not None and \
_req.force_authn.lower() == 'true':
saml_msg["req_info"] = self.req_info
key = self._store_request(saml_msg)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(saml_msg, BINDING_HTTP_REDIRECT)
else:
saml_msg["req_info"] = self.req_info
key = self._store_request(saml_msg)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(saml_msg, BINDING_HTTP_REDIRECT)
def post(self):
"""
The HTTP-Post endpoint
"""
logger.info("--- In SSO POST ---")
saml_msg = self.unpack_either()
try:
_key = saml_msg["key"]
saml_msg = IDP.ticket[_key]
self.req_info = saml_msg["req_info"]
del IDP.ticket[_key]
except KeyError:
self.req_info = IDP.parse_authn_request(
saml_msg["SAMLRequest"], BINDING_HTTP_POST)
_req = self.req_info.message
if self.user:
if _req.force_authn is not None and \
_req.force_authn.lower() == 'true':
saml_msg["req_info"] = self.req_info
key = self._store_request(saml_msg)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(saml_msg, BINDING_HTTP_POST)
else:
saml_msg["req_info"] = self.req_info
key = self._store_request(saml_msg)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(saml_msg, BINDING_HTTP_POST)
# def artifact(self):
# # Can be either by HTTP_Redirect or HTTP_POST
# _req = self._store_request(self.unpack_either())
# if isinstance(_req, basestring):
# return self.not_authn(_req)
# return self.artifact_operation(_req)
def ecp(self):
# The ECP interface
logger.info("--- ECP SSO ---")
resp = None
try:
authz_info = self.environ["HTTP_AUTHORIZATION"]
if authz_info.startswith("Basic "):
try:
_info = base64.b64decode(authz_info[6:])
except TypeError:
resp = Unauthorized()
else:
try:
(user, passwd) = _info.split(":")
if is_equal(PASSWD[user], passwd):
resp = Unauthorized()
self.user = user
self.environ[
"idp.authn"] = AUTHN_BROKER.get_authn_by_accr(
PASSWORD)
except ValueError:
resp = Unauthorized()
else:
resp = Unauthorized()
except KeyError:
resp = Unauthorized()
if resp:
return resp(self.environ, self.start_response)
_dict = self.unpack_soap()
self.response_bindings = [BINDING_PAOS]
# Basic auth ?!
self.op_type = "ecp"
return self.operation(_dict, BINDING_SOAP)
# -----------------------------------------------------------------------------
# === Authentication ====
# -----------------------------------------------------------------------------
def do_authentication(environ, start_response, authn_context, key,
redirect_uri, headers=None):
"""
Display the login form
"""
logger.debug("Do authentication")
auth_info = AUTHN_BROKER.pick(authn_context)
if len(auth_info):
method, reference = auth_info[0]
logger.debug("Authn chosen: %s (ref=%s)" % (method, reference))
return method(environ, start_response, reference, key, redirect_uri, headers)
else:
resp = Unauthorized("No usable authentication method")
return resp(environ, start_response)
# -----------------------------------------------------------------------------
PASSWD = {
"daev0001": "qwerty",
"haho0032": "qwerty",
"roland": "dianakra",
"babs": "howes",
"upper": "crust"}
def username_password_authn(environ, start_response, reference, key,
redirect_uri, headers=None):
"""
Display the login form
"""
logger.info("The login page")
kwargs = dict(mako_template="login.mako", template_lookup=LOOKUP)
if headers:
kwargs["headers"] = headers
resp = Response(**kwargs)
argv = {
"action": "/verify",
"login": "",
"password": "",
"key": key,
"authn_reference": reference,
"redirect_uri": redirect_uri
}
logger.info("do_authentication argv: %s" % argv)
return resp(environ, start_response, **argv)
def verify_username_and_password(dic):
global PASSWD
# verify username and password
if PASSWD[dic["login"][0]] == dic["password"][0]:
return True, dic["login"][0]
else:
return False, ""
def do_verify(environ, start_response, _):
query = parse_qs(get_post(environ))
logger.debug("do_verify: %s" % query)
try:
_ok, user = verify_username_and_password(query)
except KeyError:
_ok = False
user = None
if not _ok:
resp = Unauthorized("Unknown user or wrong password")
else:
uid = rndstr(24)
IDP.cache.uid2user[uid] = user
IDP.cache.user2uid[user] = uid
logger.debug("Register %s under '%s'" % (user, uid))
kaka = set_cookie("idpauthn", "/", uid, query["authn_reference"][0])
lox = "%s?id=%s&key=%s" % (query["redirect_uri"][0], uid,
query["key"][0])
logger.debug("Redirect => %s" % lox)
resp = Redirect(lox, headers=[kaka], content="text/html")
return resp(environ, start_response)
def not_found(environ, start_response):
"""Called if no URL matches."""
resp = NotFound()
return resp(environ, start_response)
# -----------------------------------------------------------------------------
# === Single log out ===
# -----------------------------------------------------------------------------
# def _subject_sp_info(req_info):
# # look for the subject
# subject = req_info.subject_id()
# subject = subject.text.strip()
# sp_entity_id = req_info.message.issuer.text.strip()
# return subject, sp_entity_id
class SLO(Service):
def do(self, request, binding, relay_state="", encrypt_cert=None):
logger.info("--- Single Log Out Service ---")
try:
logger.debug("req: '%s'" % request)
req_info = IDP.parse_logout_request(request, binding)
except Exception as exc:
logger.error("Bad request: %s" % exc)
resp = BadRequest("%s" % exc)
return resp(self.environ, self.start_response)
msg = req_info.message
if msg.name_id:
lid = IDP.ident.find_local_id(msg.name_id)
logger.info("local identifier: %s" % lid)
if lid in IDP.cache.user2uid:
uid = IDP.cache.user2uid[lid]
if uid in IDP.cache.uid2user:
del IDP.cache.uid2user[uid]
del IDP.cache.user2uid[lid]
# remove the authentication
try:
IDP.session_db.remove_authn_statements(msg.name_id)
except KeyError as exc:
logger.error("Unknown session: %s" % exc)
resp = ServiceError("Unknown session: %s" % exc)
return resp(self.environ, self.start_response)
resp = IDP.create_logout_response(msg, [binding])
if binding == BINDING_SOAP:
destination = ""
response = False
else:
binding, destination = IDP.pick_binding("single_logout_service",
[binding], "spsso",
req_info)
response = True
try:
hinfo = IDP.apply_binding(binding, "%s" % resp, destination,
relay_state, response=response)
except Exception as exc:
logger.error("ServiceError: %s" % exc)
resp = ServiceError("%s" % exc)
return resp(self.environ, self.start_response)
#_tlh = dict2list_of_tuples(hinfo["headers"])
delco = delete_cookie(self.environ, "idpauthn")
if delco:
hinfo["headers"].append(delco)
logger.info("Header: %s" % (hinfo["headers"],))
if binding == BINDING_HTTP_REDIRECT:
for key, value in hinfo['headers']:
if key.lower() == 'location':
resp = Redirect(value, headers=hinfo["headers"])
return resp(self.environ, self.start_response)
resp = ServiceError('missing Location header')
return resp(self.environ, self.start_response)
else:
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# Manage Name ID service
# ----------------------------------------------------------------------------
class NMI(Service):
def do(self, query, binding, relay_state="", encrypt_cert=None):
logger.info("--- Manage Name ID Service ---")
req = IDP.parse_manage_name_id_request(query, binding)
request = req.message
# Do the necessary stuff
name_id = IDP.ident.handle_manage_name_id_request(
request.name_id, request.new_id, request.new_encrypted_id,
request.terminate)
logger.debug("New NameID: %s" % name_id)
_resp = IDP.create_manage_name_id_response(request)
# It's using SOAP binding
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % _resp, "",
relay_state, response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# === Assertion ID request ===
# ----------------------------------------------------------------------------
# Only URI binding
class AIDR(Service):
def do(self, aid, binding, relay_state="", encrypt_cert=None):
logger.info("--- Assertion ID Service ---")
try:
assertion = IDP.create_assertion_id_request_response(aid)
except Unknown:
resp = NotFound(aid)
return resp(self.environ, self.start_response)
hinfo = IDP.apply_binding(BINDING_URI, "%s" % assertion, response=True)
logger.debug("HINFO: %s" % hinfo)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
def operation(self, _dict, binding, **kwargs):
logger.debug("_operation: %s" % _dict)
if not _dict or "ID" not in _dict:
resp = BadRequest('Error parsing request or no request')
return resp(self.environ, self.start_response)
return self.do(_dict["ID"], binding, **kwargs)
# ----------------------------------------------------------------------------
# === Artifact resolve service ===
# ----------------------------------------------------------------------------
class ARS(Service):
def do(self, request, binding, relay_state="", encrypt_cert=None):
_req = IDP.parse_artifact_resolve(request, binding)
msg = IDP.create_artifact_response(_req, _req.artifact.text)
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# === Authn query service ===
# ----------------------------------------------------------------------------
# Only SOAP binding
class AQS(Service):
def do(self, request, binding, relay_state="", encrypt_cert=None):
logger.info("--- Authn Query Service ---")
_req = IDP.parse_authn_query(request, binding)
_query = _req.message
msg = IDP.create_authn_query_response(_query.subject,
_query.requested_authn_context,
_query.session_index)
logger.debug("response: %s" % msg)
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# === Attribute query service ===
# ----------------------------------------------------------------------------
# Only SOAP binding
class ATTR(Service):
def do(self, request, binding, relay_state="", encrypt_cert=None):
logger.info("--- Attribute Query Service ---")
_req = IDP.parse_attribute_query(request, binding)
_query = _req.message
name_id = _query.subject.name_id
uid = name_id.text
logger.debug("Local uid: %s" % uid)
identity = EXTRA[uid]
# Comes in over SOAP so only need to construct the response
args = IDP.response_args(_query, [BINDING_SOAP])
msg = IDP.create_attribute_response(identity,
name_id=name_id, **args)
logger.debug("response: %s" % msg)
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# Name ID Mapping service
# When an entity that shares an identifier for a principal with an identity
# provider wishes to obtain a name identifier for the same principal in a
# particular format or federation namespace, it can send a request to
# the identity provider using this protocol.
# ----------------------------------------------------------------------------
class NIM(Service):
def do(self, query, binding, relay_state="", encrypt_cert=None):
req = IDP.parse_name_id_mapping_request(query, binding)
request = req.message
# Do the necessary stuff
try:
name_id = IDP.ident.handle_name_id_mapping_request(
request.name_id, request.name_id_policy)
except Unknown:
resp = BadRequest("Unknown entity")
return resp(self.environ, self.start_response)
except PolicyError:
resp = BadRequest("Unknown entity")
return resp(self.environ, self.start_response)
info = IDP.response_args(request)
_resp = IDP.create_name_id_mapping_response(name_id, **info)
# Only SOAP
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % _resp, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# Cookie handling
# ----------------------------------------------------------------------------
def info_from_cookie(kaka):
logger.debug("KAKA: %s" % kaka)
if kaka:
cookie_obj = SimpleCookie(kaka)
morsel = cookie_obj.get("idpauthn", None)
if morsel:
try:
key, ref = base64.b64decode(morsel.value).split(":")
return IDP.cache.uid2user[key], ref
except (KeyError, TypeError):
return None, None
else:
logger.debug("No idpauthn cookie")
return None, None
def delete_cookie(environ, name):
kaka = environ.get("HTTP_COOKIE", '')
logger.debug("delete KAKA: %s" % kaka)
if kaka:
cookie_obj = SimpleCookie(kaka)
morsel = cookie_obj.get(name, None)
cookie = SimpleCookie()
cookie[name] = ""
cookie[name]['path'] = "/"
logger.debug("Expire: %s" % morsel)
cookie[name]["expires"] = _expiration("dawn")
return tuple(cookie.output().split(": ", 1))
return None
def set_cookie(name, _, *args):
cookie = SimpleCookie()
cookie[name] = base64.b64encode(":".join(args))
cookie[name]['path'] = "/"
cookie[name]["expires"] = _expiration(5) # 5 minutes from now
logger.debug("Cookie expires: %s" % cookie[name]["expires"])
return tuple(cookie.output().split(": ", 1))
# ----------------------------------------------------------------------------
# map urls to functions
AUTHN_URLS = [
# sso
(r'sso/post$', (SSO, "post")),
(r'sso/post/(.*)$', (SSO, "post")),
(r'sso/redirect$', (SSO, "redirect")),
(r'sso/redirect/(.*)$', (SSO, "redirect")),
(r'sso/art$', (SSO, "artifact")),
(r'sso/art/(.*)$', (SSO, "artifact")),
# slo
(r'slo/redirect$', (SLO, "redirect")),
(r'slo/redirect/(.*)$', (SLO, "redirect")),
(r'slo/post$', (SLO, "post")),
(r'slo/post/(.*)$', (SLO, "post")),
(r'slo/soap$', (SLO, "soap")),
(r'slo/soap/(.*)$', (SLO, "soap")),
#
(r'airs$', (AIDR, "uri")),
(r'ars$', (ARS, "soap")),
# mni
(r'mni/post$', (NMI, "post")),
(r'mni/post/(.*)$', (NMI, "post")),
(r'mni/redirect$', (NMI, "redirect")),
(r'mni/redirect/(.*)$', (NMI, "redirect")),
(r'mni/art$', (NMI, "artifact")),
(r'mni/art/(.*)$', (NMI, "artifact")),
(r'mni/soap$', (NMI, "soap")),
(r'mni/soap/(.*)$', (NMI, "soap")),
# nim
(r'nim$', (NIM, "soap")),
(r'nim/(.*)$', (NIM, "soap")),
#
(r'aqs$', (AQS, "soap")),
(r'attr$', (ATTR, "soap"))
]
NON_AUTHN_URLS = [
#(r'login?(.*)$', do_authentication),
(r'verify?(.*)$', do_verify),
(r'sso/ecp$', (SSO, "ecp")),
]
# ----------------------------------------------------------------------------
def metadata(environ, start_response):
try:
path = args.path
if path is None or len(path) == 0:
path = os.path.dirname(os.path.abspath(__file__))
if path[-1] != "/":
path += "/"
metadata = create_metadata_string(path + args.config, IDP.config,
args.valid, args.cert, args.keyfile,
args.id, args.name, args.sign)
start_response('200 OK', [('Content-Type', "text/xml")])
return metadata
except Exception as ex:
logger.error("An error occured while creating metadata:" + ex.message)
return not_found(environ, start_response)
def staticfile(environ, start_response):
try:
path = args.path[:]
if path is None or len(path) == 0:
path = os.path.dirname(os.path.abspath(__file__))
if path[-1] != "/":
path += "/"
path += environ.get('PATH_INFO', '').lstrip('/')
path = os.path.realpath(path)
if not path.startswith(args.path):
resp = Unauthorized()
return resp(environ, start_response)
start_response('200 OK', [('Content-Type', "text/xml")])
return open(path, 'r').read()
except Exception as ex:
logger.error("An error occured while creating metadata:" + ex.message)
return not_found(environ, start_response)
def application(environ, start_response):
"""
The main WSGI application. Dispatch the current request to
the functions from above and store the regular expression
captures in the WSGI environment as `myapp.url_args` so that
the functions from above can access the url placeholders.
If nothing matches, call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines
"""
path = environ.get('PATH_INFO', '').lstrip('/')
if path == "metadata":
return metadata(environ, start_response)
kaka = environ.get("HTTP_COOKIE", None)
logger.info("<application> PATH: %s" % path)
if kaka:
logger.info("= KAKA =")
user, authn_ref = info_from_cookie(kaka)
if authn_ref:
environ["idp.authn"] = AUTHN_BROKER[authn_ref]
else:
try:
query = parse_qs(environ["QUERY_STRING"])
logger.debug("QUERY: %s" % query)
user = IDP.cache.uid2user[query["id"][0]]
except KeyError:
user = None
url_patterns = AUTHN_URLS
if not user:
logger.info("-- No USER --")
# insert NON_AUTHN_URLS first in case there is no user
url_patterns = NON_AUTHN_URLS + url_patterns
for regex, callback in url_patterns:
match = re.search(regex, path)
if match is not None:
try:
environ['myapp.url_args'] = match.groups()[0]
except IndexError:
environ['myapp.url_args'] = path
logger.debug("Callback: %s" % (callback,))
if isinstance(callback, tuple):
cls = callback[0](environ, start_response, user)
func = getattr(cls, callback[1])
return func()
return callback(environ, start_response, user)
if re.search(r'static/.*', path) is not None:
return staticfile(environ, start_response)
return not_found(environ, start_response)
# ----------------------------------------------------------------------------
if __name__ == '__main__':
from wsgiref.simple_server import make_server
parser = argparse.ArgumentParser()
parser.add_argument('-p', dest='path', help='Path to configuration file.')
parser.add_argument('-v', dest='valid',
help="How long, in days, the metadata is valid from "
"the time of creation")
parser.add_argument('-c', dest='cert', help='certificate')
parser.add_argument('-i', dest='id',
help="The ID of the entities descriptor")
parser.add_argument('-k', dest='keyfile',
help="A file with a key to sign the metadata with")
parser.add_argument('-n', dest='name')
parser.add_argument('-s', dest='sign', action='store_true',
help="sign the metadata")
parser.add_argument('-m', dest='mako_root', default="./")
parser.add_argument(dest="config")
args = parser.parse_args()
AUTHN_BROKER = AuthnBroker()
AUTHN_BROKER.add(authn_context_class_ref(PASSWORD),
username_password_authn, 10,
"http://%s" % socket.gethostname())
AUTHN_BROKER.add(authn_context_class_ref(UNSPECIFIED),
"", 0, "http://%s" % socket.gethostname())
CONFIG = importlib.import_module(args.config)
IDP = server.Server(args.config, cache=Cache())
IDP.ticket = {}
_rot = args.mako_root
LOOKUP = TemplateLookup(directories=[_rot + 'templates', _rot + 'htdocs'],
module_directory=_rot + 'modules',
input_encoding='utf-8', output_encoding='utf-8')
HOST = CONFIG.HOST
PORT = CONFIG.PORT
SRV = make_server(HOST, PORT, application)
print "IdP listening on %s:%s" % (HOST, PORT)
SRV.serve_forever()
| apache-2.0 |
kleinfeld/medpy | medpy/features/__init__.py | 1 | 1324 | """
@package medpy.features
Functionality to extract features from images and present/manipulate them.
Packages:
- histogram: Functions to create and manipulate (fuzzy) histograms.
- intensity: Functions to extracts voxel-wise intensity based features from (medical) images.
- texture: Run-time optimised features extraction on images. (experimental)
- utilities: Utilities for feature handling. Currently only for features from the @see medpy.features.intensity package.
"""
# determines the modules that should be imported when "from metric import *" is used
__all__ = []
# if __all__ is not set, only the following, explicit import statements are executed
from histogram import fuzzy_histogram, triangular_membership, trapezoid_membership, \
gaussian_membership, sigmoidal_difference_membership
from intensity import centerdistance, centerdistance_xdminus1, guassian_gradient_magnitude, \
hemispheric_difference, indices, intensities, local_histogram, local_mean_gauss, \
median
from utilities import append, join, normalize, normalize_with_model
#!experimental, therefore not directly included
#from texture import coarseness, contrast, directionality, efficient_local_avg, efficient_local_avg3d, running_total, running_total3d, tamura | gpl-3.0 |
open-austin/influence-texas | src/influencetx/legislators/migrations/0001_initial.py | 1 | 1680 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-01-17 17:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Legislator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('openstates_leg_id', models.CharField(db_index=True, max_length=48)),
('name', models.CharField(max_length=45)),
('first_name', models.CharField(blank=True, max_length=20)),
('last_name', models.CharField(blank=True, max_length=20)),
('party', models.CharField(choices=[('D', 'Democratic'), ('I', 'Independent'), ('R', 'Republican'), ('U', 'Unknown')], max_length=1)),
('chamber', models.CharField(choices=[('House', 'House'), ('Senate', 'Senate')], max_length=6)),
('district', models.IntegerField()),
('openstates_updated_at', models.DateTimeField()),
('url', models.URLField(blank=True)),
('photo_url', models.URLField(blank=True)),
],
),
migrations.CreateModel(
name='LegislatorIdMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('openstates_leg_id', models.CharField(db_index=True, max_length=48)),
('tpj_filer_id', models.IntegerField(db_index=True)),
],
),
]
| gpl-2.0 |
MyPureCloud/kafka | tests/kafkatest/tests/tools/trogdor_test.py | 17 | 4508 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kafkatest.services.trogdor.network_partition_fault_spec import NetworkPartitionFaultSpec
from ducktape.cluster.cluster_spec import ClusterSpec
from ducktape.mark.resource import cluster
from ducktape.tests.test import Test
from ducktape.utils.util import wait_until
from kafkatest.services.trogdor.task_spec import TaskSpec
from kafkatest.services.trogdor.no_op_task_spec import NoOpTaskSpec
from kafkatest.services.trogdor.trogdor import TrogdorService
from kafkatest.utils import node_is_reachable
class TrogdorTest(Test):
"""
Tests the Trogdor fault injection daemon in isolation.
"""
def __init__(self, test_context):
super(TrogdorTest, self).__init__(test_context)
def set_up_trogdor(self, num_agent_nodes):
self.agent_nodes = self.test_context.cluster.alloc(ClusterSpec.simple_linux(num_agent_nodes))
self.trogdor = TrogdorService(context=self.test_context, agent_nodes=self.agent_nodes)
for agent_node in self.agent_nodes:
agent_node.account.logger = self.trogdor.logger
self.trogdor.start()
def setUp(self):
self.trogdor = None
self.agent_nodes = None
def tearDown(self):
if self.trogdor is not None:
self.trogdor.stop()
self.trogdor = None
if self.agent_nodes is not None:
self.test_context.cluster.free(self.agent_nodes)
self.agent_nodes = None
@cluster(num_nodes=4)
def test_trogdor_service(self):
"""
Test that we can bring up Trogdor and create a no-op fault.
"""
self.set_up_trogdor(3)
spec = NoOpTaskSpec(0, TaskSpec.MAX_DURATION_MS)
self.trogdor.create_task("myfault", spec)
def check_for_myfault():
faults = self.trogdor.tasks()["tasks"]
self.logger.info("tasks = %s" % faults)
return "myfault" in faults
wait_until(lambda: check_for_myfault,
timeout_sec=10, backoff_sec=.2, err_msg="Failed to read back myfault.")
self.trogdor.stop_task("myfault")
@cluster(num_nodes=4)
def test_network_partition_fault(self):
"""
Test that the network partition fault results in a true network partition between nodes.
"""
self.set_up_trogdor(3)
spec = NetworkPartitionFaultSpec(0, TaskSpec.MAX_DURATION_MS,
[[self.agent_nodes[0]], self.agent_nodes[1:]])
partitions = spec.message["partitions"]
assert 2 == len(partitions)
assert [self.agent_nodes[0].name] == partitions[0]
assert [self.agent_nodes[1].name, self.agent_nodes[2].name] == partitions[1]
self.trogdor.create_task("partition0", spec)
def verify_nodes_partitioned():
if node_is_reachable(self.agent_nodes[0], self.agent_nodes[1]):
return False
if node_is_reachable(self.agent_nodes[1], self.agent_nodes[0]):
return False
if node_is_reachable(self.agent_nodes[2], self.agent_nodes[0]):
return False
return True
wait_until(lambda: verify_nodes_partitioned,
timeout_sec=10, backoff_sec=.2, err_msg="Failed to verify that the nodes were partitioned.")
if not node_is_reachable(self.agent_nodes[0], self.agent_nodes[0]):
raise RuntimeError("Node 0 must be reachable from itself.")
if not node_is_reachable(self.agent_nodes[1], self.agent_nodes[2]):
raise RuntimeError("Node 2 must be reachable from node 1.")
if not node_is_reachable(self.agent_nodes[2], self.agent_nodes[1]):
raise RuntimeError("Node 1 must be reachable from node 2.")
| apache-2.0 |
devs1991/test_edx_docmode | lms/djangoapps/course_wiki/views.py | 5 | 4720 | """
This file contains view functions for wrapping the django-wiki.
"""
import logging
import re
import cgi
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from wiki.core.exceptions import NoRootURL
from wiki.models import URLPath, Article
from courseware.courses import get_course_by_id
from course_wiki.utils import course_wiki_slug
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
def root_create(request): # pylint: disable=unused-argument
"""
In the edX wiki, we don't show the root_create view. Instead, we
just create the root automatically if it doesn't exist.
"""
root = get_or_create_root()
return redirect('wiki:get', path=root.path)
def course_wiki_redirect(request, course_id): # pylint: disable=unused-argument
"""
This redirects to whatever page on the wiki that the course designates
as it's home page. A course's wiki must be an article on the root (for
example, "/6.002x") to keep things simple.
"""
course = get_course_by_id(SlashSeparatedCourseKey.from_deprecated_string(course_id))
course_slug = course_wiki_slug(course)
valid_slug = True
if not course_slug:
log.exception("This course is improperly configured. The slug cannot be empty.")
valid_slug = False
if re.match(r'^[-\w\.]+$', course_slug) is None:
log.exception("This course is improperly configured. The slug can only contain letters, numbers, periods or hyphens.")
valid_slug = False
if not valid_slug:
return redirect("wiki:get", path="")
# The wiki needs a Site object created. We make sure it exists here
try:
Site.objects.get_current()
except Site.DoesNotExist:
new_site = Site()
new_site.domain = settings.SITE_NAME
new_site.name = "edX"
new_site.save()
site_id = str(new_site.id)
if site_id != str(settings.SITE_ID):
msg = "No site object was created and the SITE_ID doesn't match the newly created one. {} != {}".format(
site_id, settings.SITE_ID
)
raise ImproperlyConfigured(msg)
try:
urlpath = URLPath.get_by_path(course_slug, select_related=True)
results = list(Article.objects.filter(id=urlpath.article.id))
if results:
article = results[0]
else:
article = None
except (NoRootURL, URLPath.DoesNotExist):
# We will create it in the next block
urlpath = None
article = None
if not article:
# create it
root = get_or_create_root()
if urlpath:
# Somehow we got a urlpath without an article. Just delete it and
# recerate it.
urlpath.delete()
content = cgi.escape(
# Translators: this string includes wiki markup. Leave the ** and the _ alone.
_("This is the wiki for **{organization}**'s _{course_name}_.").format(
organization=course.display_org_with_default,
course_name=course.display_name_with_default_escaped,
)
)
urlpath = URLPath.create_article(
root,
course_slug,
title=course_slug,
content=content,
user_message=_("Course page automatically created."),
user=None,
ip_address=None,
article_kwargs={'owner': None,
'group': None,
'group_read': True,
'group_write': True,
'other_read': True,
'other_write': True,
})
return redirect("wiki:get", path=urlpath.path)
def get_or_create_root():
"""
Returns the root article, or creates it if it doesn't exist.
"""
try:
root = URLPath.root()
if not root.article:
root.delete()
raise NoRootURL
return root
except NoRootURL:
pass
starting_content = "\n".join((
_("Welcome to the {platform_name} Wiki").format(platform_name=settings.PLATFORM_NAME),
"===",
_("Visit a course wiki to add an article."),
))
root = URLPath.create_root(title=_("Wiki"), content=starting_content)
article = root.article
article.group = None
article.group_read = True
article.group_write = False
article.other_read = True
article.other_write = False
article.save()
return root
| agpl-3.0 |
silly-wacky-3-town-toon/SOURCE-COD | Panda3D-1.10.0/panda3d/core.py | 1 | 36364 | # This file is automatically generated by makepanda.py. Do not modify.
from __future__ import absolute_import
from ._core import *
### BEGIN direct/src/extensions_native/core_extensions.py
import sys
main_dir = Filename()
if sys.argv and sys.argv[0]:
main_dir = Filename.from_os_specific(sys.argv[0])
if main_dir.empty():
# We must be running in the Python interpreter directly, so return the CWD.
main_dir = ExecutionEnvironment.get_cwd()
else:
main_dir.make_absolute()
main_dir = Filename(main_dir.get_dirname())
ExecutionEnvironment.shadow_environment_variable('MAIN_DIR', main_dir.to_os_specific())
del sys, main_dir
def Dtool_funcToMethod(func, cls, method_name=None):
"""Adds func to class so it is an accessible method; use method_name to specify the name to be used for calling the method.
The new method is accessible to any instance immediately."""
#if sys.version_info < (3, 0):
# func.im_class = cls
func.im_func = func
func.im_self = None
if not method_name:
method_name = func.__name__
cls.DtoolClassDict[method_name] = func;
### END direct/src/extensions_native/core_extensions.py
### BEGIN direct/src/extensions_native/NodePath_extensions.py
####################################################################
#Dtool_funcToMethod(func, class)
#del func
#####################################################################
"""
NodePath-extensions module: contains methods to extend functionality
of the NodePath class
"""
####################################################################
def id(self):
"""Deprecated. Returns a unique id identifying the NodePath instance"""
print("Warning: NodePath.id() is deprecated. Use hash(NodePath) or NodePath.get_key() instead.")
return self.getKey()
Dtool_funcToMethod(id, NodePath)
del id
#####################################################################
def getChildrenAsList(self):
"""Deprecated. Converts a node path's child NodePathCollection into a list"""
print("Warning: NodePath.getChildrenAsList() is deprecated. Use get_children() instead.")
return list(self.getChildren())
Dtool_funcToMethod(getChildrenAsList, NodePath)
del getChildrenAsList
#####################################################################
def printChildren(self):
"""Prints out the children of the bottom node of a node path"""
for child in self.getChildren():
print(child.getName())
Dtool_funcToMethod(printChildren, NodePath)
del printChildren
#####################################################################
def removeChildren(self):
"""Deletes the children of the bottom node of a node path"""
self.getChildren().detach()
Dtool_funcToMethod(removeChildren, NodePath)
del removeChildren
#####################################################################
def toggleVis(self):
"""Toggles visibility of a nodePath"""
if self.isHidden():
self.show()
return 1
else:
self.hide()
return 0
Dtool_funcToMethod(toggleVis, NodePath)
del toggleVis
#####################################################################
def showSiblings(self):
"""Show all the siblings of a node path"""
for sib in self.getParent().getChildren():
if sib.node() != self.node():
sib.show()
Dtool_funcToMethod(showSiblings, NodePath)
del showSiblings
#####################################################################
def hideSiblings(self):
"""Hide all the siblings of a node path"""
for sib in self.getParent().getChildren():
if sib.node() != self.node():
sib.hide()
Dtool_funcToMethod(hideSiblings, NodePath)
del hideSiblings
#####################################################################
def showAllDescendants(self):
"""Show the node path and all its children"""
self.show()
for child in self.getChildren():
child.showAllDescendants()
Dtool_funcToMethod(showAllDescendants, NodePath)
del showAllDescendants
#####################################################################
def isolate(self):
"""Show the node path and hide its siblings"""
self.showAllDescendants()
self.hideSiblings()
Dtool_funcToMethod(isolate, NodePath)
del isolate
#####################################################################
def remove(self):
"""Deprecated. Remove a node path from the scene graph"""
print("Warning: NodePath.remove() is deprecated. Use remove_node() instead.")
# Send message in case anyone needs to do something
# before node is deleted
messenger.send('preRemoveNodePath', [self])
# Remove nodePath
self.removeNode()
Dtool_funcToMethod(remove, NodePath)
del remove
#####################################################################
def lsNames(self):
"""Walk down a tree and print out the path"""
if self.isEmpty():
print("(empty)")
else:
type = self.node().getType().getName()
name = self.getName()
print(type + " " + name)
self.lsNamesRecurse()
Dtool_funcToMethod(lsNames, NodePath)
del lsNames
#####################################################################
def lsNamesRecurse(self, indentString=' '):
"""Walk down a tree and print out the path"""
for nodePath in self.getChildren():
type = nodePath.node().getType().getName()
name = nodePath.getName()
print(indentString + type + " " + name)
nodePath.lsNamesRecurse(indentString + " ")
Dtool_funcToMethod(lsNamesRecurse, NodePath)
del lsNamesRecurse
#####################################################################
def reverseLsNames(self):
"""Walk up a tree and print out the path to the root"""
ancestors = list(self.getAncestors())
ancestry = ancestors.reverse()
indentString = ""
for nodePath in ancestry:
type = nodePath.node().getType().getName()
name = nodePath.getName()
print(indentString + type + " " + name)
indentString = indentString + " "
Dtool_funcToMethod(reverseLsNames, NodePath)
del reverseLsNames
#####################################################################
def getAncestry(self):
"""Get a list of a node path's ancestors"""
print("NodePath.getAncestry() is deprecated. Use get_ancestors() instead.""")
ancestors = list(self.getAncestors())
ancestors.reverse()
return ancestors
Dtool_funcToMethod(getAncestry, NodePath)
del getAncestry
#####################################################################
def pPrintString(self, other = None):
"""
pretty print
"""
if __debug__:
# Normally I would have put the if __debug__ around
# the entire funciton, the that doesn't seem to work
# with -extensions. Maybe someone will look into
# this further.
if other:
pos = self.getPos(other)
hpr = self.getHpr(other)
scale = self.getScale(other)
shear = self.getShear(other)
otherString = " 'other': %s,\n" % (other.getName(),)
else:
pos = self.getPos()
hpr = self.getHpr()
scale = self.getScale()
shear = self.getShear()
otherString = '\n'
return (
"%s = {"%(self.getName()) +
otherString +
" 'Pos': (%s),\n" % pos.pPrintValues() +
" 'Hpr': (%s),\n" % hpr.pPrintValues() +
" 'Scale': (%s),\n" % scale.pPrintValues() +
" 'Shear': (%s),\n" % shear.pPrintValues() +
"}")
Dtool_funcToMethod(pPrintString, NodePath)
del pPrintString
#####################################################################
def printPos(self, other = None, sd = 2):
""" Pretty print a node path's pos """
formatString = '%0.' + '%d' % sd + 'f'
if other:
pos = self.getPos(other)
otherString = other.getName() + ', '
else:
pos = self.getPos()
otherString = ''
print((self.getName() + '.setPos(' + otherString +
formatString % pos[0] + ', ' +
formatString % pos[1] + ', ' +
formatString % pos[2] +
')\n'))
Dtool_funcToMethod(printPos, NodePath)
del printPos
#####################################################################
def printHpr(self, other = None, sd = 2):
""" Pretty print a node path's hpr """
formatString = '%0.' + '%d' % sd + 'f'
if other:
hpr = self.getHpr(other)
otherString = other.getName() + ', '
else:
hpr = self.getHpr()
otherString = ''
print((self.getName() + '.setHpr(' + otherString +
formatString % hpr[0] + ', ' +
formatString % hpr[1] + ', ' +
formatString % hpr[2] +
')\n'))
Dtool_funcToMethod(printHpr, NodePath)
del printHpr
#####################################################################
def printScale(self, other = None, sd = 2):
""" Pretty print a node path's scale """
formatString = '%0.' + '%d' % sd + 'f'
if other:
scale = self.getScale(other)
otherString = other.getName() + ', '
else:
scale = self.getScale()
otherString = ''
print((self.getName() + '.setScale(' + otherString +
formatString % scale[0] + ', ' +
formatString % scale[1] + ', ' +
formatString % scale[2] +
')\n'))
Dtool_funcToMethod(printScale, NodePath)
del printScale
#####################################################################
def printPosHpr(self, other = None, sd = 2):
""" Pretty print a node path's pos and, hpr """
formatString = '%0.' + '%d' % sd + 'f'
if other:
pos = self.getPos(other)
hpr = self.getHpr(other)
otherString = other.getName() + ', '
else:
pos = self.getPos()
hpr = self.getHpr()
otherString = ''
print((self.getName() + '.setPosHpr(' + otherString +
formatString % pos[0] + ', ' +
formatString % pos[1] + ', ' +
formatString % pos[2] + ', ' +
formatString % hpr[0] + ', ' +
formatString % hpr[1] + ', ' +
formatString % hpr[2] +
')\n'))
Dtool_funcToMethod(printPosHpr, NodePath)
del printPosHpr
#####################################################################
def printPosHprScale(self, other = None, sd = 2):
""" Pretty print a node path's pos, hpr, and scale """
formatString = '%0.' + '%d' % sd + 'f'
if other:
pos = self.getPos(other)
hpr = self.getHpr(other)
scale = self.getScale(other)
otherString = other.getName() + ', '
else:
pos = self.getPos()
hpr = self.getHpr()
scale = self.getScale()
otherString = ''
print((self.getName() + '.setPosHprScale(' + otherString +
formatString % pos[0] + ', ' +
formatString % pos[1] + ', ' +
formatString % pos[2] + ', ' +
formatString % hpr[0] + ', ' +
formatString % hpr[1] + ', ' +
formatString % hpr[2] + ', ' +
formatString % scale[0] + ', ' +
formatString % scale[1] + ', ' +
formatString % scale[2] +
')\n'))
Dtool_funcToMethod(printPosHprScale, NodePath)
del printPosHprScale
#####################################################################
def printTransform(self, other = None, sd = 2, fRecursive = 0):
from panda3d.core import Vec3
fmtStr = '%%0.%df' % sd
name = self.getName()
if other == None:
transform = self.getTransform()
else:
transform = self.getTransform(other)
if transform.hasPos():
pos = transform.getPos()
if not pos.almostEqual(Vec3(0)):
outputString = '%s.setPos(%s, %s, %s)' % (name, fmtStr, fmtStr, fmtStr)
print(outputString % (pos[0], pos[1], pos[2]))
if transform.hasHpr():
hpr = transform.getHpr()
if not hpr.almostEqual(Vec3(0)):
outputString = '%s.setHpr(%s, %s, %s)' % (name, fmtStr, fmtStr, fmtStr)
print(outputString % (hpr[0], hpr[1], hpr[2]))
if transform.hasScale():
if transform.hasUniformScale():
scale = transform.getUniformScale()
if scale != 1.0:
outputString = '%s.setScale(%s)' % (name, fmtStr)
print(outputString % scale)
else:
scale = transform.getScale()
if not scale.almostEqual(Vec3(1)):
outputString = '%s.setScale(%s, %s, %s)' % (name, fmtStr, fmtStr, fmtStr)
print(outputString % (scale[0], scale[1], scale[2]))
if fRecursive:
for child in self.getChildren():
child.printTransform(other, sd, fRecursive)
Dtool_funcToMethod(printTransform, NodePath)
del printTransform
#####################################################################
def iPos(self, other = None):
""" Set node path's pos to 0, 0, 0 """
if other:
self.setPos(other, 0, 0, 0)
else:
self.setPos(0, 0, 0)
Dtool_funcToMethod(iPos, NodePath)
del iPos
#####################################################################
def iHpr(self, other = None):
""" Set node path's hpr to 0, 0, 0 """
if other:
self.setHpr(other, 0, 0, 0)
else:
self.setHpr(0, 0, 0)
Dtool_funcToMethod(iHpr, NodePath)
del iHpr
#####################################################################
def iScale(self, other = None):
""" SEt node path's scale to 1, 1, 1 """
if other:
self.setScale(other, 1, 1, 1)
else:
self.setScale(1, 1, 1)
Dtool_funcToMethod(iScale, NodePath)
del iScale
#####################################################################
def iPosHpr(self, other = None):
""" Set node path's pos and hpr to 0, 0, 0 """
if other:
self.setPosHpr(other, 0, 0, 0, 0, 0, 0)
else:
self.setPosHpr(0, 0, 0, 0, 0, 0)
Dtool_funcToMethod(iPosHpr, NodePath)
del iPosHpr
#####################################################################
def iPosHprScale(self, other = None):
""" Set node path's pos and hpr to 0, 0, 0 and scale to 1, 1, 1 """
if other:
self.setPosHprScale(other, 0, 0, 0, 0, 0, 0, 1, 1, 1)
else:
self.setPosHprScale(0, 0, 0, 0, 0, 0, 1, 1, 1)
# private methods
Dtool_funcToMethod(iPosHprScale, NodePath)
del iPosHprScale
#####################################################################
def place(self):
base.startDirect(fWantTk = 1)
from direct.tkpanels import Placer
return Placer.place(self)
Dtool_funcToMethod(place, NodePath)
del place
#####################################################################
def explore(self):
base.startDirect(fWantTk = 1)
from direct.tkwidgets import SceneGraphExplorer
return SceneGraphExplorer.explore(self)
Dtool_funcToMethod(explore, NodePath)
del explore
#####################################################################
def rgbPanel(self, cb = None):
base.startTk()
from direct.tkwidgets import Slider
return Slider.rgbPanel(self, cb)
Dtool_funcToMethod(rgbPanel, NodePath)
del rgbPanel
#####################################################################
def select(self):
base.startDirect(fWantTk = 0)
base.direct.select(self)
Dtool_funcToMethod(select, NodePath)
del select
#####################################################################
def deselect(self):
base.startDirect(fWantTk = 0)
base.direct.deselect(self)
Dtool_funcToMethod(deselect, NodePath)
del deselect
#####################################################################
def showCS(self, mask = None):
"""
Shows the collision solids at or below this node. If mask is
not None, it is a BitMask32 object (e.g. WallBitmask,
CameraBitmask) that indicates which particular collision
solids should be made visible; otherwise, all of them will be.
"""
npc = self.findAllMatches('**/+CollisionNode')
for p in range(0, npc.getNumPaths()):
np = npc[p]
if (mask == None or (np.node().getIntoCollideMask() & mask).getWord()):
np.show()
Dtool_funcToMethod(showCS, NodePath)
del showCS
#####################################################################
def hideCS(self, mask = None):
"""
Hides the collision solids at or below this node. If mask is
not None, it is a BitMask32 object (e.g. WallBitmask,
CameraBitmask) that indicates which particular collision
solids should be hidden; otherwise, all of them will be.
"""
npc = self.findAllMatches('**/+CollisionNode')
for p in range(0, npc.getNumPaths()):
np = npc[p]
if (mask == None or (np.node().getIntoCollideMask() & mask).getWord()):
np.hide()
Dtool_funcToMethod(hideCS, NodePath)
del hideCS
#####################################################################
def posInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpPosInterval(self, *args, **kw)
Dtool_funcToMethod(posInterval, NodePath)
del posInterval
#####################################################################
def hprInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpHprInterval(self, *args, **kw)
Dtool_funcToMethod(hprInterval, NodePath)
del hprInterval
#####################################################################
def quatInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpQuatInterval(self, *args, **kw)
Dtool_funcToMethod(quatInterval, NodePath)
del quatInterval
#####################################################################
def scaleInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpScaleInterval(self, *args, **kw)
Dtool_funcToMethod(scaleInterval, NodePath)
del scaleInterval
#####################################################################
def shearInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpShearInterval(self, *args, **kw)
Dtool_funcToMethod(shearInterval, NodePath)
del shearInterval
#####################################################################
def posHprInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpPosHprInterval(self, *args, **kw)
Dtool_funcToMethod(posHprInterval, NodePath)
del posHprInterval
#####################################################################
def posQuatInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpPosQuatInterval(self, *args, **kw)
Dtool_funcToMethod(posQuatInterval, NodePath)
del posQuatInterval
#####################################################################
def hprScaleInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpHprScaleInterval(self, *args, **kw)
Dtool_funcToMethod(hprScaleInterval, NodePath)
del hprScaleInterval
#####################################################################
def quatScaleInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpQuatScaleInterval(self, *args, **kw)
Dtool_funcToMethod(quatScaleInterval, NodePath)
del quatScaleInterval
#####################################################################
def posHprScaleInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpPosHprScaleInterval(self, *args, **kw)
Dtool_funcToMethod(posHprScaleInterval, NodePath)
del posHprScaleInterval
#####################################################################
def posQuatScaleInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpPosQuatScaleInterval(self, *args, **kw)
Dtool_funcToMethod(posQuatScaleInterval, NodePath)
del posQuatScaleInterval
#####################################################################
def posHprScaleShearInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpPosHprScaleShearInterval(self, *args, **kw)
Dtool_funcToMethod(posHprScaleShearInterval, NodePath)
del posHprScaleShearInterval
#####################################################################
def posQuatScaleShearInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpPosQuatScaleShearInterval(self, *args, **kw)
Dtool_funcToMethod(posQuatScaleShearInterval, NodePath)
del posQuatScaleShearInterval
#####################################################################
def colorInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpColorInterval(self, *args, **kw)
Dtool_funcToMethod(colorInterval, NodePath)
del colorInterval
#####################################################################
def colorScaleInterval(self, *args, **kw):
from direct.interval import LerpInterval
return LerpInterval.LerpColorScaleInterval(self, *args, **kw)
Dtool_funcToMethod(colorScaleInterval, NodePath)
del colorScaleInterval
#####################################################################
def attachCollisionSphere(self, name, cx, cy, cz, r, fromCollide, intoCollide):
from panda3d.core import CollisionSphere
from panda3d.core import CollisionNode
coll = CollisionSphere(cx, cy, cz, r)
collNode = CollisionNode(name)
collNode.addSolid(coll)
collNode.setFromCollideMask(fromCollide)
collNode.setIntoCollideMask(intoCollide)
collNodePath = self.attachNewNode(collNode)
return collNodePath
Dtool_funcToMethod(attachCollisionSphere, NodePath)
del attachCollisionSphere
#####################################################################
def attachCollisionSegment(self, name, ax, ay, az, bx, by, bz, fromCollide, intoCollide):
from panda3d.core import CollisionSegment
from panda3d.core import CollisionNode
coll = CollisionSegment(ax, ay, az, bx, by, bz)
collNode = CollisionNode(name)
collNode.addSolid(coll)
collNode.setFromCollideMask(fromCollide)
collNode.setIntoCollideMask(intoCollide)
collNodePath = self.attachNewNode(collNode)
return collNodePath
Dtool_funcToMethod(attachCollisionSegment, NodePath)
del attachCollisionSegment
#####################################################################
def attachCollisionRay(self, name, ox, oy, oz, dx, dy, dz, fromCollide, intoCollide):
from panda3d.core import CollisionRay
from panda3d.core import CollisionNode
coll = CollisionRay(ox, oy, oz, dx, dy, dz)
collNode = CollisionNode(name)
collNode.addSolid(coll)
collNode.setFromCollideMask(fromCollide)
collNode.setIntoCollideMask(intoCollide)
collNodePath = self.attachNewNode(collNode)
return collNodePath
Dtool_funcToMethod(attachCollisionRay, NodePath)
del attachCollisionRay
#####################################################################
def flattenMultitex(self, stateFrom = None, target = None,
useGeom = 0, allowTexMat = 0, win = None):
from panda3d.core import MultitexReducer
mr = MultitexReducer()
if target != None:
mr.setTarget(target)
mr.setUseGeom(useGeom)
mr.setAllowTexMat(allowTexMat)
if win == None:
win = base.win
if stateFrom == None:
mr.scan(self)
else:
mr.scan(self, stateFrom)
mr.flatten(win)
Dtool_funcToMethod(flattenMultitex, NodePath)
del flattenMultitex
#####################################################################
def getNumDescendants(self):
return len(self.findAllMatches('**')) - 1
Dtool_funcToMethod(getNumDescendants, NodePath)
del getNumDescendants
#####################################################################
def removeNonCollisions(self):
# remove anything that is not collision-related
stack = [self]
while len(stack):
np = stack.pop()
# if there are no CollisionNodes under this node, remove it
if np.find('**/+CollisionNode').isEmpty():
np.detachNode()
else:
stack.extend(np.getChildren())
Dtool_funcToMethod(removeNonCollisions, NodePath)
del removeNonCollisions
#####################################################################
def subdivideCollisions(self, numSolidsInLeaves):
"""
expand CollisionNodes out into balanced trees, with a particular number
of solids in the leaves
TODO: better splitting logic at each level of the tree wrt spatial separation
and cost of bounding volume tests vs. cost of collision solid tests
"""
colNps = self.findAllMatches('**/+CollisionNode')
for colNp in colNps:
node = colNp.node()
numSolids = node.getNumSolids()
if numSolids <= numSolidsInLeaves:
# this CollisionNode doesn't need to be split
continue
solids = []
for i in xrange(numSolids):
solids.append(node.getSolid(i))
# recursively subdivide the solids into a spatial binary tree
solidTree = self.r_subdivideCollisions(solids, numSolidsInLeaves)
root = colNp.getParent().attachNewNode('%s-subDivRoot' % colNp.getName())
self.r_constructCollisionTree(solidTree, root, colNp.getName())
colNp.stash()
def r_subdivideCollisions(self, solids, numSolidsInLeaves):
# takes a list of solids, returns a list containing some number of lists,
# with the solids evenly distributed between them (recursively nested until
# the lists at the leaves contain no more than numSolidsInLeaves)
# if solids is already small enough, returns solids unchanged
if len(solids) <= numSolidsInLeaves:
return solids
origins = []
avgX = 0; avgY = 0; avgZ = 0
minX = None; minY = None; minZ = None
maxX = None; maxY = None; maxZ = None
for solid in solids:
origin = solid.getCollisionOrigin()
origins.append(origin)
x = origin.getX(); y = origin.getY(); z = origin.getZ()
avgX += x; avgY += y; avgZ += z
if minX is None:
minX = x; minY = y; minZ = z
maxX = x; maxY = y; maxZ = z
else:
minX = min(x, minX); minY = min(y, minY); minZ = min(z, minZ)
maxX = max(x, maxX); maxY = max(y, maxY); maxZ = max(z, maxZ)
avgX /= len(solids); avgY /= len(solids); avgZ /= len(solids)
extentX = maxX - minX; extentY = maxY - minY; extentZ = maxZ - minZ
maxExtent = max(max(extentX, extentY), extentZ)
# sparse octree
xyzSolids = []
XyzSolids = []
xYzSolids = []
XYzSolids = []
xyZSolids = []
XyZSolids = []
xYZSolids = []
XYZSolids = []
midX = avgX
midY = avgY
midZ = avgZ
# throw out axes that are not close to the max axis extent; try and keep
# the divisions square/spherical
if extentX < (maxExtent * .75) or extentX > (maxExtent * 1.25):
midX += maxExtent
if extentY < (maxExtent * .75) or extentY > (maxExtent * 1.25):
midY += maxExtent
if extentZ < (maxExtent * .75) or extentZ > (maxExtent * 1.25):
midZ += maxExtent
for i in xrange(len(solids)):
origin = origins[i]
x = origin.getX(); y = origin.getY(); z = origin.getZ()
if x < midX:
if y < midY:
if z < midZ:
xyzSolids.append(solids[i])
else:
xyZSolids.append(solids[i])
else:
if z < midZ:
xYzSolids.append(solids[i])
else:
xYZSolids.append(solids[i])
else:
if y < midY:
if z < midZ:
XyzSolids.append(solids[i])
else:
XyZSolids.append(solids[i])
else:
if z < midZ:
XYzSolids.append(solids[i])
else:
XYZSolids.append(solids[i])
newSolids = []
if len(xyzSolids):
newSolids.append(self.r_subdivideCollisions(xyzSolids, numSolidsInLeaves))
if len(XyzSolids):
newSolids.append(self.r_subdivideCollisions(XyzSolids, numSolidsInLeaves))
if len(xYzSolids):
newSolids.append(self.r_subdivideCollisions(xYzSolids, numSolidsInLeaves))
if len(XYzSolids):
newSolids.append(self.r_subdivideCollisions(XYzSolids, numSolidsInLeaves))
if len(xyZSolids):
newSolids.append(self.r_subdivideCollisions(xyZSolids, numSolidsInLeaves))
if len(XyZSolids):
newSolids.append(self.r_subdivideCollisions(XyZSolids, numSolidsInLeaves))
if len(xYZSolids):
newSolids.append(self.r_subdivideCollisions(xYZSolids, numSolidsInLeaves))
if len(XYZSolids):
newSolids.append(self.r_subdivideCollisions(XYZSolids, numSolidsInLeaves))
#import pdb;pdb.set_trace()
return newSolids
def r_constructCollisionTree(self, solidTree, parentNode, colName):
for item in solidTree:
if type(item[0]) == type([]):
newNode = parentNode.attachNewNode('%s-branch' % colName)
self.r_constructCollisionTree(item, newNode, colName)
else:
cn = CollisionNode('%s-leaf' % colName)
for solid in item:
cn.addSolid(solid)
parentNode.attachNewNode(cn)
Dtool_funcToMethod(subdivideCollisions, NodePath)
Dtool_funcToMethod(r_subdivideCollisions, NodePath)
Dtool_funcToMethod(r_constructCollisionTree, NodePath)
del subdivideCollisions
del r_subdivideCollisions
del r_constructCollisionTree
#####################################################################
def analyze(self):
"""
Analyzes the geometry below this node and reports the
number of vertices, triangles, etc. This is the same
information reported by the bam-info program.
"""
from panda3d.core import SceneGraphAnalyzer
sga = SceneGraphAnalyzer()
sga.addNode(self.node())
if sga.getNumLodNodes() == 0:
print(sga)
else:
print("At highest LOD:")
sga2 = SceneGraphAnalyzer()
sga2.setLodMode(sga2.LMHighest)
sga2.addNode(self.node())
print(sga2)
print("\nAt lowest LOD:")
sga2.clear()
sga2.setLodMode(sga2.LMLowest)
sga2.addNode(self.node())
print(sga2)
print("\nAll nodes:")
print(sga)
Dtool_funcToMethod(analyze, NodePath)
del analyze
#####################################################################
### END direct/src/extensions_native/NodePath_extensions.py
### BEGIN direct/src/extensions_native/Mat3_extensions.py
####################################################################
#Dtool_funcToMethod(func, class)
#del func
#####################################################################
"""
Mat3-extensions module: contains methods to extend functionality
of the LMatrix3f class.
"""
def pPrintValues(self):
"""
Pretty print
"""
return "\n%s\n%s\n%s" % (
self.getRow(0).pPrintValues(), self.getRow(1).pPrintValues(), self.getRow(2).pPrintValues())
Dtool_funcToMethod(pPrintValues, Mat3)
del pPrintValues
#####################################################################
### END direct/src/extensions_native/Mat3_extensions.py
### BEGIN direct/src/extensions_native/VBase3_extensions.py
"""
Methods to extend functionality of the VBase3 class
"""
def pPrintValues(self):
"""
Pretty print
"""
return "% 10.4f, % 10.4f, % 10.4f" % (self[0], self[1], self[2])
Dtool_funcToMethod(pPrintValues, VBase3)
del pPrintValues
def asTuple(self):
"""
Returns the vector as a tuple.
"""
print("Warning: VBase3.asTuple() is no longer needed and deprecated. Use the vector directly instead.")
return tuple(self)
Dtool_funcToMethod(asTuple, VBase3)
del asTuple
### END direct/src/extensions_native/VBase3_extensions.py
### BEGIN direct/src/extensions_native/VBase4_extensions.py
"""
Methods to extend functionality of the VBase4 class
"""
def pPrintValues(self):
"""
Pretty print
"""
return "% 10.4f, % 10.4f, % 10.4f, % 10.4f" % (self[0], self[1], self[2], self[3])
Dtool_funcToMethod(pPrintValues, VBase4)
del pPrintValues
def asTuple(self):
"""
Returns the vector as a tuple.
"""
print("Warning: VBase4.asTuple() is no longer needed and deprecated. Use the vector directly instead.")
return tuple(self)
Dtool_funcToMethod(asTuple, VBase4)
del asTuple
### END direct/src/extensions_native/VBase4_extensions.py
### BEGIN direct/src/extensions_native/HTTPChannel_extensions.py
####################################################################
#Dtool_funcToMethod(func, class)
#del func
#####################################################################
"""
HTTPChannel-extensions module: contains methods to extend functionality
of the HTTPChannel class
"""
def spawnTask(self, name = None, callback = None, extraArgs = []):
"""Spawns a task to service the download recently requested
via beginGetDocument(), etc., and/or downloadToFile() or
downloadToRam(). If a callback is specified, that function is
called when the download is complete, passing in the extraArgs
given.
Returns the newly-spawned task.
"""
if not name:
name = str(self.getUrl())
from direct.task import Task
task = Task.Task(self.doTask)
task.callback = callback
task.callbackArgs = extraArgs
return taskMgr.add(task, name)
Dtool_funcToMethod(spawnTask, HTTPChannel)
del spawnTask
#####################################################################
def doTask(self, task):
from direct.task import Task
if self.run():
return Task.cont
if task.callback:
task.callback(*task.callbackArgs)
return Task.done
Dtool_funcToMethod(doTask, HTTPChannel)
del doTask
#####################################################################
### END direct/src/extensions_native/HTTPChannel_extensions.py
| apache-2.0 |
raphaelrpl/portal | backend/appengine/routes/questions/rest.py | 1 | 4013 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from time import sleep
from gaebusiness.business import CommandExecutionException
from permission_app.permission_facade import main_user_form
from tekton.gae.middleware.json_middleware import JsonResponse
from question_app import question_facade
from gaepermission.decorator import login_required
from gaecookie.decorator import no_csrf
from question_app.question_model import CategoryQuestion, Question
from category_app.category_model import Category
@login_required
@no_csrf
def index():
cmd = question_facade.list_questions_cmd()
question_list = cmd()
question_form = question_facade.question_form()
def localize_user(model):
dct = question_form.fill_with_model(model)
user = main_user_form().fill_with_model(model.user.get())
dct['user'] = user
return dct
question_dcts = [localize_user(m) for m in question_list]
return JsonResponse(question_dcts)
@login_required
def new(_resp, _logged_user, **question_properties):
if _logged_user is None:
_resp.status_code = 400
return JsonResponse({"name": "Login required!"})
quest = question_properties.get('question', {})
if not quest:
_resp.status_code = 400
return JsonResponse({"name": "Required Field"})
question = Question(**quest)
question.user = _logged_user.key
try:
question.put()
except CommandExecutionException:
_resp.status_code = 400
if not question.name:
return JsonResponse({"name": "Required field"})
return JsonResponse({"name": "Put a valid post"})
for c in question_properties.get("categorys", {}):
cat = Category.query(Category.name == c).fetch()
if cat:
category = CategoryQuestion(origin=cat[0], destination=question)
category.put()
question_form = question_facade.question_form()
data = question_form.fill_with_model(question)
data['user'] = _logged_user.name
sleep(0.5)
return JsonResponse(data)
@login_required
def edit(_resp, _logged_user, **question_properties):
question_id = question_properties.get('id')
# key = ndb.Key('Question', int(question_id))
question = Question.get_by_id(int(question_id))
if int(_logged_user.key.id()) != int(question_properties.get('user', {}).get('id', 0)) and question_id != None:
_resp.status_code = 400
return JsonResponse({"name": "This post don't belong to you!"})
if question is None:
_resp.status_code = 400
return JsonResponse({"name": "Invalid post"})
question.name = question_properties.get('name')
try:
question.put()
except:
_resp.status_code = 400
return JsonResponse({"name": "Put a valid question"})
user_form = main_user_form()
form = question_facade.question_form()
question_dct = form.fill_with_model(question)
question_dct['user'] = user_form.fill_with_model(question.user.get())
return JsonResponse(question_dct)
# cmd = question_facade.update_question_cmd(question_id, **question_properties)
# return _save_or_update_json_response(_logged_user, cmd, _resp)
@login_required
def delete(_resp, id):
cmd = question_facade.delete_question_cmd(id)
try:
question = cmd()
# DeleteCategoryQuestion(destination=question).execute()
except CommandExecutionException:
_resp.status_code = 500
return JsonResponse(cmd.errors)
question_dct = question_facade.question_form().fill_with_model(question)
return JsonResponse(question_dct)
def _save_or_update_json_response(_logged_user, cmd, _resp):
try:
question = cmd()
except CommandExecutionException:
_resp.status_code = 500
return JsonResponse(cmd.errors)
question_form = question_facade.question_form()
data = question_form.fill_with_model(question)
data['user'] = _logged_user.name
return JsonResponse(data)
| mit |
vineodd/PIMSim | GEM5Simulation/gem5/src/mem/slicc/parser.py | 10 | 22989 | # Copyright (c) 2009 The Hewlett-Packard Development Company
# Copyright (c) 2017 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Lena Olson
import os.path
import re
import sys
from m5.util import code_formatter
from m5.util.grammar import Grammar, ParseError
import slicc.ast as ast
import slicc.util as util
from slicc.symbols import SymbolTable
class SLICC(Grammar):
def __init__(self, filename, base_dir, verbose=False, traceback=False, **kwargs):
self.protocol = None
self.traceback = traceback
self.verbose = verbose
self.symtab = SymbolTable(self)
self.base_dir = base_dir
try:
self.decl_list = self.parse_file(filename, **kwargs)
except ParseError, e:
if not self.traceback:
sys.exit(str(e))
raise
def currentLocation(self):
return util.Location(self.current_source, self.current_line,
no_warning=not self.verbose)
def codeFormatter(self, *args, **kwargs):
code = code_formatter(*args, **kwargs)
code['protocol'] = self.protocol
return code
def process(self):
self.decl_list.generate()
def writeCodeFiles(self, code_path, includes):
self.symtab.writeCodeFiles(code_path, includes)
def writeHTMLFiles(self, html_path):
self.symtab.writeHTMLFiles(html_path)
def files(self):
f = set(['Types.hh'])
f |= self.decl_list.files()
return f
t_ignore = '\t '
# C or C++ comment (ignore)
def t_c_comment(self, t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
def t_cpp_comment(self, t):
r'//.*'
# Define a rule so we can track line numbers
def t_newline(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
reserved = {
'protocol' : 'PROTOCOL',
'include' : 'INCLUDE',
'global' : 'GLOBAL',
'machine' : 'MACHINE',
'in_port' : 'IN_PORT',
'out_port' : 'OUT_PORT',
'action' : 'ACTION',
'transition' : 'TRANS',
'structure' : 'STRUCT',
'external_type' : 'EXTERN_TYPE',
'enumeration' : 'ENUM',
'state_declaration' : 'STATE_DECL',
'peek' : 'PEEK',
'stall_and_wait' : 'STALL_AND_WAIT',
'enqueue' : 'ENQUEUE',
'check_allocate' : 'CHECK_ALLOCATE',
'check_next_cycle' : 'CHECK_NEXT_CYCLE',
'check_stop_slots' : 'CHECK_STOP_SLOTS',
'static_cast' : 'STATIC_CAST',
'if' : 'IF',
'is_valid' : 'IS_VALID',
'is_invalid' : 'IS_INVALID',
'else' : 'ELSE',
'return' : 'RETURN',
'void' : 'VOID',
'new' : 'NEW',
'OOD' : 'OOD',
}
literals = ':[]{}(),='
tokens = [ 'EQ', 'NE', 'LT', 'GT', 'LE', 'GE',
'LEFTSHIFT', 'RIGHTSHIFT',
'NOT', 'AND', 'OR',
'PLUS', 'DASH', 'STAR', 'SLASH',
'INCR', 'DECR',
'DOUBLE_COLON', 'SEMI',
'ASSIGN', 'DOT',
'IDENT', 'LIT_BOOL', 'FLOATNUMBER', 'NUMBER', 'STRING' ]
tokens += reserved.values()
t_EQ = r'=='
t_NE = r'!='
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_LEFTSHIFT = r'<<'
t_RIGHTSHIFT = r'>>'
t_NOT = r'!'
t_AND = r'&&'
t_OR = r'\|\|'
t_PLUS = r'\+'
t_DASH = r'-'
t_STAR = r'\*'
t_SLASH = r'/'
t_DOUBLE_COLON = r'::'
t_SEMI = r';'
t_ASSIGN = r':='
t_DOT = r'\.'
t_INCR = r'\+\+'
t_DECR = r'--'
precedence = (
('left', 'INCR', 'DECR'),
('left', 'OR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'LT', 'GT', 'LE', 'GE'),
('left', 'RIGHTSHIFT', 'LEFTSHIFT'),
('left', 'PLUS', 'DASH'),
('left', 'STAR', 'SLASH'),
('right', 'NOT', 'UMINUS'),
)
def t_IDENT(self, t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
if t.value == 'true':
t.type = 'LIT_BOOL'
t.value = True
return t
if t.value == 'false':
t.type = 'LIT_BOOL'
t.value = False
return t
# Check for reserved words
t.type = self.reserved.get(t.value, 'IDENT')
return t
def t_FLOATNUMBER(self, t):
'[0-9]+[.][0-9]+'
try:
t.value = float(t.value)
except ValueError:
raise ParseError("Illegal float", t)
return t
def t_NUMBER(self, t):
r'[0-9]+'
try:
t.value = int(t.value)
except ValueError:
raise ParseError("Illegal number", t)
return t
def t_STRING1(self, t):
r'\"[^"\n]*\"'
t.type = 'STRING'
t.value = t.value[1:-1]
return t
def t_STRING2(self, t):
r"\'[^'\n]*\'"
t.type = 'STRING'
t.value = t.value[1:-1]
return t
def p_file(self, p):
"file : decls"
p[0] = p[1]
def p_empty(self, p):
"empty :"
def p_decls(self, p):
"decls : declsx"
p[0] = ast.DeclListAST(self, p[1])
def p_declsx__list(self, p):
"declsx : decl declsx"
if isinstance(p[1], ast.DeclListAST):
decls = p[1].decls
elif p[1] is None:
decls = []
else:
decls = [ p[1] ]
p[0] = decls + p[2]
def p_declsx__none(self, p):
"declsx : empty"
p[0] = []
def p_decl__protocol(self, p):
"decl : PROTOCOL STRING SEMI"
if self.protocol:
msg = "Protocol can only be set once! Error at %s:%s\n" % \
(self.current_source, self.current_line)
raise ParseError(msg)
self.protocol = p[2]
p[0] = None
def p_decl__include(self, p):
"decl : INCLUDE STRING SEMI"
dirname = os.path.dirname(self.current_source)
if os.path.exists(os.path.join(dirname, p[2])):
filename = os.path.join(dirname, p[2])
else:
filename = os.path.join(self.base_dir, p[2])
p[0] = self.parse_file(filename)
def p_decl__machine0(self, p):
"decl : MACHINE '(' enumeration ')' ':' obj_decls '{' decls '}'"
p[0] = ast.MachineAST(self, p[3], [], p[7], p[9])
def p_decl__machine1(self, p):
"decl : MACHINE '(' enumeration pairs ')' ':' obj_decls '{' decls '}'"
p[0] = ast.MachineAST(self, p[3], p[4], p[7], p[9])
def p_decl__action(self, p):
"decl : ACTION '(' ident pairs ')' statements"
p[0] = ast.ActionDeclAST(self, p[3], p[4], p[6])
def p_decl__in_port(self, p):
"decl : IN_PORT '(' ident ',' type ',' var pairs ')' statements"
p[0] = ast.InPortDeclAST(self, p[3], p[5], p[7], p[8], p[10])
def p_decl__out_port(self, p):
"decl : OUT_PORT '(' ident ',' type ',' var pairs ')' SEMI"
p[0] = ast.OutPortDeclAST(self, p[3], p[5], p[7], p[8])
def p_decl__trans0(self, p):
"decl : TRANS '(' idents ',' idents ',' ident_or_star ')' idents"
p[0] = ast.TransitionDeclAST(self, [], p[3], p[5], p[7], p[9])
def p_decl__trans1(self, p):
"decl : TRANS '(' idents ',' idents ')' idents"
p[0] = ast.TransitionDeclAST(self, [], p[3], p[5], None, p[7])
def p_decl__trans2(self, p):
"decl : TRANS '(' idents ',' idents ',' ident_or_star ')' idents idents"
p[0] = ast.TransitionDeclAST(self, p[9], p[3], p[5], p[7], p[10])
def p_decl__trans3(self, p):
"decl : TRANS '(' idents ',' idents ')' idents idents"
p[0] = ast.TransitionDeclAST(self, p[7], p[3], p[5], None, p[8])
def p_decl__extern0(self, p):
"decl : EXTERN_TYPE '(' type pairs ')' SEMI"
p[4]["external"] = "yes"
p[0] = ast.TypeDeclAST(self, p[3], p[4], [])
def p_decl__global(self, p):
"decl : GLOBAL '(' type pairs ')' '{' type_members '}'"
p[4]["global"] = "yes"
p[0] = ast.TypeDeclAST(self, p[3], p[4], p[7])
def p_decl__struct(self, p):
"decl : STRUCT '(' type pairs ')' '{' type_members '}'"
p[0] = ast.TypeDeclAST(self, p[3], p[4], p[7])
def p_decl__enum(self, p):
"decl : ENUM '(' type pairs ')' '{' type_enums '}'"
p[4]["enumeration"] = "yes"
p[0] = ast.EnumDeclAST(self, p[3], p[4], p[7])
def p_decl__state_decl(self, p):
"decl : STATE_DECL '(' type pairs ')' '{' type_states '}'"
p[4]["enumeration"] = "yes"
p[4]["state_decl"] = "yes"
p[0] = ast.StateDeclAST(self, p[3], p[4], p[7])
# Type fields
def p_obj_decls__list(self, p):
"obj_decls : obj_decl obj_decls"
p[0] = [ p[1] ] + p[2]
def p_obj_decls__empty(self, p):
"obj_decls : empty"
p[0] = []
def p_type_members__list(self, p):
"type_members : type_member type_members"
p[0] = [ p[1] ] + p[2]
def p_type_members__empty(self, p):
"type_members : empty"
p[0] = []
def p_type_member__0(self, p):
"""type_member : obj_decl
| func_decl
| func_def"""
p[0] = p[1]
# Member / Variable declarations
def p_decl__obj_decl(self, p):
"decl : obj_decl"
p[0] = p[1]
def p_obj_decl__0(self, p):
"obj_decl : type ident pairs SEMI"
p[0] = ast.ObjDeclAST(self, p[1], p[2], p[3], None, False)
def p_obj_decl__1(self, p):
"obj_decl : type STAR ident pairs SEMI"
p[0] = ast.ObjDeclAST(self, p[1], p[3], p[4], None, True)
def p_obj_decl__2(self, p):
"obj_decl : type ident ASSIGN expr SEMI"
p[0] = ast.ObjDeclAST(self, p[1], p[2], ast.PairListAST(self), p[4],
False)
def p_obj_decl__3(self, p):
"obj_decl : type STAR ident ASSIGN expr SEMI"
p[0] = ast.ObjDeclAST(self, p[1], p[3], ast.PairListAST(self), p[5],
True)
# Function definition and declaration
def p_decl__func_decl(self, p):
"decl : func_decl"
p[0] = p[1]
def p_func_decl__0(self, p):
"""func_decl : void ident '(' params ')' pairs SEMI
| type ident '(' params ')' pairs SEMI"""
p[0] = ast.FuncDeclAST(self, p[1], p[2], p[4], p[6], None)
def p_func_decl__1(self, p):
"""func_decl : void ident '(' types ')' pairs SEMI
| type ident '(' types ')' pairs SEMI"""
p[0] = ast.FuncDeclAST(self, p[1], p[2], p[4], p[6], None)
def p_decl__func_def(self, p):
"decl : func_def"
p[0] = p[1]
def p_func_def__0(self, p):
"""func_def : void ident '(' params ')' pairs statements
| type ident '(' params ')' pairs statements"""
p[0] = ast.FuncDeclAST(self, p[1], p[2], p[4], p[6], p[7])
# Enum fields
def p_type_enums__list(self, p):
"type_enums : type_enum type_enums"
p[0] = [ p[1] ] + p[2]
def p_type_enums__empty(self, p):
"type_enums : empty"
p[0] = []
def p_type_enum(self, p):
"type_enum : ident pairs SEMI"
p[0] = ast.TypeFieldEnumAST(self, p[1], p[2])
# States
def p_type_states__list(self, p):
"type_states : type_state type_states"
p[0] = [ p[1] ] + p[2]
def p_type_states__empty(self, p):
"type_states : empty"
p[0] = []
def p_type_state(self, p):
"type_state : ident ',' enumeration pairs SEMI"
p[0] = ast.TypeFieldStateAST(self, p[1], p[3], p[4])
# Formal Param
def p_params__many(self, p):
"params : param ',' params"
p[0] = [ p[1] ] + p[3]
def p_params__one(self, p):
"params : param"
p[0] = [ p[1] ]
def p_params__none(self, p):
"params : empty"
p[0] = []
def p_param(self, p):
"param : type ident"
p[0] = ast.FormalParamAST(self, p[1], p[2])
def p_param__pointer(self, p):
"param : type STAR ident"
p[0] = ast.FormalParamAST(self, p[1], p[3], None, True)
def p_param__pointer_default(self, p):
"param : type STAR ident ASSIGN STRING"
p[0] = ast.FormalParamAST(self, p[1], p[3], p[5], True)
def p_param__default_number(self, p):
"param : type ident ASSIGN NUMBER"
p[0] = ast.FormalParamAST(self, p[1], p[2], p[4])
def p_param__default_bool(self, p):
"param : type ident ASSIGN LIT_BOOL"
p[0] = ast.FormalParamAST(self, p[1], p[2], p[4])
def p_param__default_string(self, p):
"param : type ident ASSIGN STRING"
p[0] = ast.FormalParamAST(self, p[1], p[2], p[4])
# Type
def p_types__multiple(self, p):
"types : type ',' types"
p[0] = [ p[1] ] + p[3]
def p_types__one(self, p):
"types : type"
p[0] = [ p[1] ]
def p_types__empty(self, p):
"types : empty"
p[0] = []
def p_typestr__multi(self, p):
"typestr : typestr DOUBLE_COLON ident"
p[0] = '%s::%s' % (p[1], p[3])
def p_typestr__single(self, p):
"typestr : ident"
p[0] = p[1]
def p_type__one(self, p):
"type : typestr"
p[0] = ast.TypeAST(self, p[1])
def p_void(self, p):
"void : VOID"
p[0] = ast.TypeAST(self, p[1])
# Idents and lists
def p_idents__braced(self, p):
"idents : '{' identx '}'"
p[0] = p[2]
def p_idents__bare(self, p):
"idents : ident"
p[0] = [ p[1] ]
def p_identx__multiple_1(self, p):
"""identx : ident SEMI identx
| ident ',' identx"""
p[0] = [ p[1] ] + p[3]
def p_identx__multiple_2(self, p):
"identx : ident identx"
p[0] = [ p[1] ] + p[2]
def p_identx__single(self, p):
"identx : empty"
p[0] = [ ]
def p_ident(self, p):
"ident : IDENT"
p[0] = p[1]
def p_ident_or_star(self, p):
"""ident_or_star : ident
| STAR"""
p[0] = p[1]
# Pair and pair lists
def p_pairs__list(self, p):
"pairs : ',' pairsx"
p[0] = p[2]
def p_pairs__empty(self, p):
"pairs : empty"
p[0] = ast.PairListAST(self)
def p_pairsx__many(self, p):
"pairsx : pair ',' pairsx"
p[0] = p[3]
p[0].addPair(p[1])
def p_pairsx__one(self, p):
"pairsx : pair"
p[0] = ast.PairListAST(self)
p[0].addPair(p[1])
def p_pair__assign(self, p):
"""pair : ident '=' STRING
| ident '=' ident
| ident '=' NUMBER"""
p[0] = ast.PairAST(self, p[1], p[3])
def p_pair__literal(self, p):
"pair : STRING"
p[0] = ast.PairAST(self, "short", p[1])
# Below are the rules for action descriptions
def p_statements__inner(self, p):
"statements : '{' statements_inner '}'"
p[0] = ast.StatementListAST(self, p[2])
def p_statements__none(self, p):
"statements : '{' '}'"
p[0] = ast.StatementListAST(self, [])
def p_statements_inner__many(self, p):
"statements_inner : statement statements_inner"
p[0] = [ p[1] ] + p[2]
def p_statements_inner__one(self, p):
"statements_inner : statement"
p[0] = [ p[1] ]
def p_exprs__multiple(self, p):
"exprs : expr ',' exprs"
p[0] = [ p[1] ] + p[3]
def p_exprs__one(self, p):
"exprs : expr"
p[0] = [ p[1] ]
def p_exprs__empty(self, p):
"exprs : empty"""
p[0] = []
def p_statement__expression(self, p):
"statement : expr SEMI"
p[0] = ast.ExprStatementAST(self, p[1])
def p_statement__assign(self, p):
"statement : expr ASSIGN expr SEMI"
p[0] = ast.AssignStatementAST(self, p[1], p[3])
def p_statement__enqueue(self, p):
"statement : ENQUEUE '(' var ',' type ')' statements"
p[0] = ast.EnqueueStatementAST(self, p[3], p[5], None, p[7])
def p_statement__enqueue_latency(self, p):
"statement : ENQUEUE '(' var ',' type ',' expr ')' statements"
p[0] = ast.EnqueueStatementAST(self, p[3], p[5], p[7], p[9])
def p_statement__stall_and_wait(self, p):
"statement : STALL_AND_WAIT '(' var ',' var ')' SEMI"
p[0] = ast.StallAndWaitStatementAST(self, p[3], p[5])
def p_statement__peek(self, p):
"statement : PEEK '(' var ',' type pairs ')' statements"
p[0] = ast.PeekStatementAST(self, p[3], p[5], p[6], p[8], "peek")
def p_statement__check_allocate(self, p):
"statement : CHECK_ALLOCATE '(' var ')' SEMI"
p[0] = ast.CheckAllocateStatementAST(self, p[3])
def p_statement__check_next_cycle(self, p):
"statement : CHECK_NEXT_CYCLE '(' ')' SEMI"
p[0] = ast.CheckNextCycleAST(self)
def p_statement__check_stop(self, p):
"statement : CHECK_STOP_SLOTS '(' var ',' STRING ',' STRING ')' SEMI"
p[0] = ast.CheckStopStatementAST(self, p[3], p[5], p[7])
def p_statement__return(self, p):
"statement : RETURN expr SEMI"
p[0] = ast.ReturnStatementAST(self, p[2])
def p_statement__if(self, p):
"statement : if_statement"
p[0] = p[1]
def p_if_statement__if(self, p):
"if_statement : IF '(' expr ')' statements"
p[0] = ast.IfStatementAST(self, p[3], p[5], None)
def p_if_statement__if_else(self, p):
"if_statement : IF '(' expr ')' statements ELSE statements"
p[0] = ast.IfStatementAST(self, p[3], p[5], p[7])
def p_statement__if_else_if(self, p):
"if_statement : IF '(' expr ')' statements ELSE if_statement"
p[0] = ast.IfStatementAST(self, p[3], p[5],
ast.StatementListAST(self, p[7]))
def p_expr__static_cast(self, p):
"aexpr : STATIC_CAST '(' type ',' expr ')'"
p[0] = ast.StaticCastAST(self, p[3], "ref", p[5])
def p_expr__static_cast_ptr(self, p):
"aexpr : STATIC_CAST '(' type ',' STRING ',' expr ')'"
p[0] = ast.StaticCastAST(self, p[3], p[5], p[7])
def p_expr__var(self, p):
"aexpr : var"
p[0] = p[1]
def p_expr__localvar(self, p):
"aexpr : type ident"
p[0] = ast.LocalVariableAST(self, p[1], p[2])
def p_expr__literal(self, p):
"aexpr : literal"
p[0] = p[1]
def p_expr__enumeration(self, p):
"aexpr : enumeration"
p[0] = p[1]
def p_expr__func_call(self, p):
"aexpr : ident '(' exprs ')'"
p[0] = ast.FuncCallExprAST(self, p[1], p[3])
def p_expr__new(self, p):
"aexpr : NEW type"
p[0] = ast.NewExprAST(self, p[2])
def p_expr__null(self, p):
"aexpr : OOD"
p[0] = ast.OodAST(self)
def p_expr__member(self, p):
"aexpr : aexpr DOT ident"
p[0] = ast.MemberExprAST(self, p[1], p[3])
def p_expr__member_method_call(self, p):
"aexpr : aexpr DOT ident '(' exprs ')'"
p[0] = ast.MemberMethodCallExprAST(self, p[1],
ast.FuncCallExprAST(self, p[3], p[5]))
def p_expr__member_method_call_lookup(self, p):
"aexpr : aexpr '[' exprs ']'"
p[0] = ast.MemberMethodCallExprAST(self, p[1],
ast.FuncCallExprAST(self, "lookup", p[3]))
def p_expr__class_method_call(self, p):
"aexpr : type DOUBLE_COLON ident '(' exprs ')'"
p[0] = ast.ClassMethodCallExprAST(self, p[1],
ast.FuncCallExprAST(self, p[3], p[5]))
def p_expr__aexpr(self, p):
"expr : aexpr"
p[0] = p[1]
def p_expr__binary_op(self, p):
"""expr : expr STAR expr
| expr SLASH expr
| expr PLUS expr
| expr DASH expr
| expr LT expr
| expr GT expr
| expr LE expr
| expr GE expr
| expr EQ expr
| expr NE expr
| expr AND expr
| expr OR expr
| expr RIGHTSHIFT expr
| expr LEFTSHIFT expr"""
p[0] = ast.InfixOperatorExprAST(self, p[1], p[2], p[3])
# FIXME - unary not
def p_expr__unary_op(self, p):
"""expr : NOT expr
| INCR expr
| DECR expr
| DASH expr %prec UMINUS"""
p[0] = ast.PrefixOperatorExprAST(self, p[1], p[2])
def p_expr__parens(self, p):
"aexpr : '(' expr ')'"
p[0] = p[2]
def p_expr__is_valid_ptr(self, p):
"aexpr : IS_VALID '(' var ')'"
p[0] = ast.IsValidPtrExprAST(self, p[3], True)
def p_expr__is_invalid_ptr(self, p):
"aexpr : IS_INVALID '(' var ')'"
p[0] = ast.IsValidPtrExprAST(self, p[3], False)
def p_literal__string(self, p):
"literal : STRING"
p[0] = ast.LiteralExprAST(self, p[1], "std::string")
def p_literal__number(self, p):
"literal : NUMBER"
p[0] = ast.LiteralExprAST(self, p[1], "int")
def p_literal__float(self, p):
"literal : FLOATNUMBER"
p[0] = ast.LiteralExprAST(self, p[1], "int")
def p_literal__bool(self, p):
"literal : LIT_BOOL"
p[0] = ast.LiteralExprAST(self, p[1], "bool")
def p_enumeration(self, p):
"enumeration : ident ':' ident"
p[0] = ast.EnumExprAST(self, ast.TypeAST(self, p[1]), p[3])
def p_var(self, p):
"var : ident"
p[0] = ast.VarExprAST(self, p[1])
| gpl-3.0 |
nightjean/Deep-Learning | tensorflow/contrib/solvers/python/ops/util.py | 116 | 2092 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for solvers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
def create_operator(matrix):
"""Creates a linear operator from a rank-2 tensor."""
linear_operator = collections.namedtuple(
"LinearOperator", ["shape", "dtype", "apply", "apply_adjoint"])
# TODO(rmlarsen): Handle SparseTensor.
shape = matrix.get_shape()
if shape.is_fully_defined():
shape = shape.as_list()
else:
shape = array_ops.shape(matrix)
return linear_operator(
shape=shape,
dtype=matrix.dtype,
apply=lambda v: math_ops.matmul(matrix, v, adjoint_a=False),
apply_adjoint=lambda v: math_ops.matmul(matrix, v, adjoint_a=True))
# TODO(rmlarsen): Measure if we should just call matmul.
def dot(x, y):
return math_ops.reduce_sum(math_ops.conj(x) * y)
# TODO(rmlarsen): Implement matrix/vector norm op in C++ in core.
# We need 1-norm, inf-norm, and Frobenius norm.
def l2norm_squared(v):
return constant_op.constant(2, dtype=v.dtype.base_dtype) * nn_ops.l2_loss(v)
def l2norm(v):
return math_ops.sqrt(l2norm_squared(v))
def l2normalize(v):
norm = l2norm(v)
return v / norm, norm
| apache-2.0 |
wojciechtanski/robotframework | utest/model/test_keyword.py | 21 | 3891 | import unittest
from robot.utils.asserts import assert_equal, assert_true, assert_raises
from robot.model import TestSuite, Message
from robot.model.keyword import Keyword, Keywords
class TestKeyword(unittest.TestCase):
def test_id_without_parent(self):
assert_equal(Keyword().id, 'k1')
def test_id_with_suite_parent(self):
assert_equal(TestSuite().keywords.create().id, 's1-k1')
def test_id_with_test_parent(self):
assert_equal(TestSuite().tests.create().keywords.create().id, 's1-t1-k1')
def test_id_with_keyword_parents(self):
kw = TestSuite().tests.create().keywords.create()
kw.keywords = [Keyword(), Keyword()]
kw.keywords[-1].keywords.create()
assert_equal(kw.keywords[0].id, 's1-t1-k1-k1')
assert_equal(kw.keywords[1].id, 's1-t1-k1-k2')
assert_equal(kw.keywords[1].keywords[0].id, 's1-t1-k1-k2-k1')
def test_slots(self):
assert_raises(AttributeError, setattr, Keyword(), 'attr', 'value')
class TestChildren(unittest.TestCase):
def test_only_keywords(self):
kw = Keyword()
for i in range(10):
kw.keywords.create(str(i))
assert_equal(kw.children, list(kw.keywords))
def test_only_messages(self):
kw = Keyword()
for i in range(10):
kw.messages.create(str(i))
assert_equal(kw.children, list(kw.messages))
def test_order(self):
kw = Keyword()
m1 = kw.messages.create('m1')
k1 = kw.keywords.create('k1')
k2 = kw.keywords.create('k2')
m2 = kw.messages.create('m2')
k3 = kw.keywords.create('k3')
assert_equal(kw.children, [m1, k1, k2, m2, k3])
def test_order_after_modifications(self):
kw = Keyword()
kw.keywords.create('k1')
kw.messages.create('m1')
k2 = kw.keywords.create('k2')
m2 = kw.messages.create('m2')
k1 = kw.keywords[0] = Keyword('k1-new')
m1 = kw.messages[0] = Message('m1-new')
m3 = Message('m3')
kw.messages.append(m3)
k3 = Keyword('k3')
kw.keywords.extend([k3])
assert_equal(kw.children, [k1, m1, k2, m2, m3, k3])
kw.keywords = [k1, k3]
kw.messages = [m1]
assert_equal(kw.children, [k1, m1, k3])
class TestStringRepresentation(unittest.TestCase):
def setUp(self):
self.empty = Keyword()
self.ascii = Keyword(name='Kekkonen')
self.non_ascii = Keyword(name=u'hyv\xe4 nimi')
def test_unicode(self):
assert_equal(unicode(self.empty), '')
assert_equal(unicode(self.ascii), 'Kekkonen')
assert_equal(unicode(self.non_ascii), u'hyv\xe4 nimi')
def test_str(self):
assert_equal(str(self.empty), '')
assert_equal(str(self.ascii), 'Kekkonen')
assert_equal(str(self.non_ascii), 'hyv? nimi')
class TestKeywords(unittest.TestCase):
def test_setup(self):
assert_equal(Keywords().setup, None)
setup = Keyword(type='setup')
assert_true(Keywords(keywords=[setup, Keyword(), Keyword()]).setup is setup)
def test_teardown(self):
assert_equal(Keywords().teardown, None)
teardown = Keyword(type='teardown')
assert_true(Keywords(keywords=[Keyword(), teardown]).teardown is teardown)
def test_for_loops_are_included(self):
kws = [Keyword(type='for'), Keyword(), Keyword(type='foritem')]
assert_equal(list(Keywords(keywords=kws).normal), kws)
assert_equal(list(Keywords(keywords=kws).all), kws)
def test_iteration(self):
kws = [Keyword(type='setup'), Keyword(), Keyword(), Keyword(type='teardown')]
assert_equal(list(Keywords(keywords=kws)), kws)
assert_equal(list(Keywords(keywords=kws).all), kws)
assert_equal(list(Keywords(keywords=kws).normal), kws[1:-1])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
mozillazg/bustard | tests/httpbin/core.py | 1 | 21325 | # -*- coding: utf-8 -*-
"""
httpbin.core
~~~~~~~~~~~~
This module provides the core HttpBin experience.
"""
import base64
import json
import os
import random
import time
import uuid
from bustard.app import Bustard
from bustard.http import (
Response, Headers, jsonify as bustard_jsonify, redirect
)
from bustard.utils import json_dumps_default
from werkzeug.datastructures import WWWAuthenticate
from werkzeug.http import http_date
from werkzeug.serving import run_simple
from six.moves import range as xrange
from . import filters
from .helpers import (
get_headers, status_code, get_dict, get_request_range,
check_basic_auth, check_digest_auth, secure_cookie,
H, ROBOT_TXT, ANGRY_ASCII
)
from .utils import weighted_choice
from .structures import CaseInsensitiveDict
ENV_COOKIES = (
'_gauges_unique',
'_gauges_unique_year',
'_gauges_unique_month',
'_gauges_unique_day',
'_gauges_unique_hour',
'__utmz',
'__utma',
'__utmb'
)
def jsonify(*args, **kwargs):
response = bustard_jsonify(*args, **kwargs)
if not response.data.endswith(b'\n'):
response.data += b'\n'
return response
# Prevent WSGI from correcting the casing of the Location header
# BaseResponse.autocorrect_location_header = False
# Find the correct template folder when running from a different location
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'templates')
app = Bustard(__name__, template_dir=tmpl_dir)
render_template = app.render_template
url_for = app.url_for
# -----------
# Middlewares
# -----------
@app.after_request
def set_cors_headers(request, response):
response.headers['Access-Control-Allow-Origin'] = (
request.headers.get('Origin', '*')
)
response.headers['Access-Control-Allow-Credentials'] = 'true'
if request.method == 'OPTIONS':
# Both of these headers are only used for the "preflight request"
# http://www.w3.org/TR/cors/#access-control-allow-methods-response-header
response.headers['Access-Control-Allow-Methods'] = (
'GET, POST, PUT, DELETE, PATCH, OPTIONS'
)
response.headers['Access-Control-Max-Age'] = '3600' # 1 hour cache
if request.headers.get('Access-Control-Request-Headers') is not None:
response.headers['Access-Control-Allow-Headers'] = (
request.headers['Access-Control-Request-Headers']
)
return response
# ------
# Routes
# ------
@app.route('/')
def view_landing_page(request):
"""Generates Landing Page."""
tracking_enabled = 'HTTPBIN_TRACKING' in os.environ
return render_template('index.html', request=request,
tracking_enabled=tracking_enabled)
@app.route('/html')
def view_html_page(request):
"""Simple Html Page"""
return render_template('moby.html')
@app.route('/robots.txt')
def view_robots_page(request):
"""Simple Html Page"""
response = Response()
response.content = ROBOT_TXT
response.content_type = 'text/plain'
return response
@app.route('/deny')
def view_deny_page(request):
"""Simple Html Page"""
response = Response()
response.content = ANGRY_ASCII
response.content_type = 'text/plain'
return response
# return "YOU SHOULDN'T BE HERE"
@app.route('/ip')
def view_origin(request):
"""Returns Origin IP."""
return jsonify(origin=request.headers.get('X-Forwarded-For',
request.remote_addr))
@app.route('/headers')
def view_headers(request):
"""Returns HTTP HEADERS."""
return jsonify(get_dict(request, 'headers'))
@app.route('/user-agent')
def view_user_agent(request):
"""Returns User-Agent."""
headers = get_headers(request)
return jsonify({'user-agent': headers['user-agent']})
@app.route('/get', methods=('GET', 'OPTIONS'))
def view_get(request):
"""Returns GET Data."""
return jsonify(get_dict(request, 'url', 'args', 'headers', 'origin'))
@app.route('/post', methods=('POST',))
def view_post(request):
"""Returns POST Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/put', methods=('PUT',))
def view_put(request):
"""Returns PUT Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/patch', methods=('PATCH',))
def view_patch(request):
"""Returns PATCH Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/delete', methods=('DELETE',))
def view_delete(request):
"""Returns DELETE Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/gzip')
@filters.gzip
def view_gzip_encoded_content(request):
"""Returns GZip-Encoded Data."""
return jsonify(get_dict(request, 'origin', 'headers',
method=request.method, gzipped=True))
@app.route('/deflate')
@filters.deflate
def view_deflate_encoded_content(request):
"""Returns Deflate-Encoded Data."""
return jsonify(get_dict(request, 'origin', 'headers',
method=request.method, deflated=True))
@app.route('/redirect/<int:n>')
def redirect_n_times(request, n):
"""302 Redirects n times."""
n = int(n)
assert n > 0
absolute = request.args.get('absolute', 'false').lower() == 'true'
if n == 1:
return redirect(app.url_for('view_get', _request=request,
_external=absolute))
if absolute:
return _redirect(request, 'absolute', n, True)
else:
return _redirect(request, 'relative', n, False)
def _redirect(request, kind, n, external):
return redirect(url_for('{0}_redirect_n_times'.format(kind),
n=n - 1, _external=external, _request=request))
@app.route('/redirect-to')
def redirect_to(request):
"""302 Redirects to the given URL."""
args = CaseInsensitiveDict(request.args.items())
# We need to build the response manually and convert to UTF-8 to prevent
# werkzeug from "fixing" the URL. This endpoint should set the Location
# header to the exact string supplied.
response = Response('')
response.status_code = 302
response.headers['Location'] = args['url'].encode('utf-8')
return response
@app.route('/relative-redirect/<int:n>')
def relative_redirect_n_times(request, n):
"""302 Redirects n times."""
n = int(n)
assert n > 0
response = Response('')
response.status_code = 302
if n == 1:
response.headers['Location'] = url_for('view_get')
return response
response.headers['Location'] = app.url_for(
'relative_redirect_n_times', n=n - 1
)
return response
@app.route('/absolute-redirect/<int:n>')
def absolute_redirect_n_times(request, n):
"""302 Redirects n times."""
n = int(n)
assert n > 0
if n == 1:
return redirect(app.url_for('view_get', _request=request,
_external=True))
return _redirect(request, 'absolute', n, True)
@app.route('/stream/<int:n>')
def stream_n_messages(request, n):
"""Stream n JSON messages"""
n = int(n)
response = get_dict(request, 'url', 'args', 'headers', 'origin')
n = min(n, 100)
def generate_stream():
for i in range(n):
response['id'] = i
yield json.dumps(response, default=json_dumps_default) + '\n'
return Response(generate_stream(), headers={
'Content-Type': 'application/json',
})
@app.route('/status/<codes>',
methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'TRACE'])
def view_status_code(request, codes):
"""Return status code or random status code if more than one are given"""
if ',' not in codes:
code = int(codes)
return status_code(code)
choices = []
for choice in codes.split(','):
if ':' not in choice:
code = choice
weight = 1
else:
code, weight = choice.split(':')
choices.append((int(code), float(weight)))
code = weighted_choice(choices)
return status_code(code)
@app.route('/response-headers')
def response_headers(request):
"""Returns a set of response headers from the query string """
headers = Headers(request.args.to_dict())
response = jsonify(headers)
while True:
content_len_shown = response.headers['Content-Length']
d = {}
for key in response.headers.keys():
value = response.headers.get_all(key)
if len(value) == 1:
value = value[0]
d[key] = value
response = jsonify(d)
for key, value in headers.to_list():
response.headers.add(key, value)
if response.headers['Content-Length'] == content_len_shown:
break
return response
@app.route('/cookies')
def view_cookies(request, hide_env=True):
"""Returns cookie data."""
cookies = dict(request.cookies.items())
if hide_env and ('show_env' not in request.args):
for key in ENV_COOKIES:
try:
del cookies[key]
except KeyError:
pass
return jsonify(cookies=cookies)
@app.route('/forms/post')
def view_forms_post(request):
"""Simple HTML form."""
return render_template('forms-post.html')
@app.route('/cookies/set/<name>/<value>')
def set_cookie(request, name, value):
"""Sets a cookie and redirects to cookie list."""
r = app.make_response(redirect(url_for('view_cookies')))
r.set_cookie(key=name, value=value, secure=secure_cookie(request))
return r
@app.route('/cookies/set')
def set_cookies(request):
"""Sets cookie(s) as provided by the query string
and redirects to cookie list.
"""
cookies = dict(request.args.items())
r = app.make_response(redirect(url_for('view_cookies')))
for key, value in cookies.items():
r.set_cookie(key=key, value=value, secure=secure_cookie(request))
return r
@app.route('/cookies/delete')
def delete_cookies(request):
"""Deletes cookie(s) as provided by the query string
and redirects to cookie list.
"""
cookies = dict(request.args.items())
r = app.make_response(redirect(url_for('view_cookies')))
for key, value in cookies.items():
r.delete_cookie(key=key)
return r
@app.route('/basic-auth/<user>/<passwd>')
def basic_auth(request, user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(request, user, passwd):
return status_code(401)
return jsonify(authenticated=True, user=user)
@app.route('/hidden-basic-auth/<user>/<passwd>')
def hidden_basic_auth(request, user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(request, user, passwd):
return status_code(404)
return jsonify(authenticated=True, user=user)
@app.route('/digest-auth/<qop>/<user>/<passwd>')
def digest_auth(request, qop=None, user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Digest auth"""
if qop not in ('auth', 'auth-int'):
qop = None
if 'Authorization' not in request.headers or \
not check_digest_auth(user, passwd) or \
'Cookie' not in request.headers:
response = app.make_response('')
response.status_code = 401
# RFC2616 Section4.2: HTTP headers are ASCII. That means
# request.remote_addr was originally ASCII, so I should be able to
# encode it back to ascii. Also, RFC2617 says about nonces: "The
# contents of the nonce are implementation dependent"
nonce = H(b''.join([
getattr(request, 'remote_addr', u'').encode('ascii'),
b':',
str(time.time()).encode('ascii'),
b':',
os.urandom(10)
]))
opaque = H(os.urandom(10))
auth = WWWAuthenticate('digest')
auth.set_digest('me@kennethreitz.com', nonce, opaque=opaque,
qop=('auth', 'auth-int') if qop is None else (qop, ))
response.headers['WWW-Authenticate'] = auth.to_header()
response.headers['Set-Cookie'] = 'fake=fake_value'
return response
return jsonify(authenticated=True, user=user)
@app.route('/delay/<delay>')
def delay_response(request, delay):
"""Returns a delayed response"""
delay = min(float(delay), 10)
time.sleep(delay)
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files'))
@app.route('/drip')
def drip(request):
"""Drips data over a duration after an optional initial delay."""
args = CaseInsensitiveDict(request.args.items())
duration = float(args.get('duration', 2))
numbytes = int(args.get('numbytes', 10))
code = int(args.get('code', 200))
pause = duration / numbytes
delay = float(args.get('delay', 0))
if delay > 0:
time.sleep(delay)
def generate_bytes():
for i in xrange(numbytes):
yield u'*'.encode('utf-8')
time.sleep(pause)
response = Response(generate_bytes(), headers={
'Content-Type': 'application/octet-stream',
'Content-Length': str(numbytes),
})
response.status_code = code
return response
@app.route('/base64/<value>')
def decode_base64(request, value):
"""Decodes base64url-encoded string"""
encoded = value.encode('utf-8') # base64 expects binary string as input
return base64.urlsafe_b64decode(encoded).decode('utf-8')
@app.route('/cache', methods=('GET',))
def cache(request):
"""Returns a 304 if an If-Modified-Since header or
If-None-Match is present. Returns the same as a GET otherwise.
"""
is_conditional = (
request.headers.get('If-Modified-Since') or
request.headers.get('If-None-Match')
)
if is_conditional is None:
response = view_get(request)
response.headers['Last-Modified'] = http_date()
response.headers['ETag'] = uuid.uuid4().hex
return response
else:
return status_code(304)
@app.route('/cache/<int:value>')
def cache_control(request, value):
"""Sets a Cache-Control header."""
value = int(value)
response = view_get(request)
response.headers['Cache-Control'] = 'public, max-age={0}'.format(value)
return response
@app.route('/encoding/utf8')
def encoding(request):
return render_template('UTF-8-demo.txt')
@app.route('/bytes/<int:n>')
def random_bytes(request, n):
"""Returns n random bytes generated with given seed."""
n = int(n)
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if 'seed' in params:
random.seed(int(params['seed']))
response = Response()
# Note: can't just use os.urandom here because it ignores the seed
response.data = bytearray(random.randint(0, 255) for i in range(n))
response.content_type = 'application/octet-stream'
return response
@app.route('/stream-bytes/<int:n>')
def stream_random_bytes(request, n):
"""Streams n random bytes generated with given seed,
at given chunk size per packet.
"""
n = int(n)
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if 'seed' in params:
random.seed(int(params['seed']))
if 'chunk_size' in params:
chunk_size = max(1, int(params['chunk_size']))
else:
chunk_size = 10 * 1024
def generate_bytes():
chunks = bytearray()
for i in xrange(n):
chunks.append(random.randint(0, 255))
if len(chunks) == chunk_size:
yield(bytes(chunks))
chunks = bytearray()
if chunks:
yield(bytes(chunks))
headers = {'Content-Type': 'application/octet-stream'}
return Response(generate_bytes(), headers=headers)
@app.route('/range/<int:numbytes>')
def range_request(request, numbytes):
"""Streams n random bytes generated with given seed,
at given chunk size per packet.
"""
numbytes = int(numbytes)
if numbytes <= 0 or numbytes > (100 * 1024):
response = Response(headers={
'ETag': 'range%d' % numbytes,
'Accept-Ranges': 'bytes'
})
response.status_code = 404
response.content = 'number of bytes must be in the range (0, 10240]'
return response
params = CaseInsensitiveDict(request.args.items())
if 'chunk_size' in params:
chunk_size = max(1, int(params['chunk_size']))
else:
chunk_size = 10 * 1024
duration = float(params.get('duration', 0))
pause_per_byte = duration / numbytes
request_headers = get_headers(request)
first_byte_pos, last_byte_pos = get_request_range(request_headers,
numbytes)
if (
first_byte_pos > last_byte_pos or
first_byte_pos not in xrange(0, numbytes) or
last_byte_pos not in xrange(0, numbytes)
):
response = Response(headers={
'ETag': 'range%d' % numbytes,
'Accept-Ranges': 'bytes',
'Content-Range': 'bytes */%d' % numbytes
})
response.status_code = 416
return response
def generate_bytes():
chunks = bytearray()
for i in xrange(first_byte_pos, last_byte_pos + 1):
# We don't want the resource to change across requests, so we need
# to use a predictable data generation function
chunks.append(ord('a') + (i % 26))
if len(chunks) == chunk_size:
yield(bytes(chunks))
time.sleep(pause_per_byte * chunk_size)
chunks = bytearray()
if chunks:
time.sleep(pause_per_byte * len(chunks))
yield(bytes(chunks))
content_range = 'bytes %d-%d/%d' % (first_byte_pos, last_byte_pos,
numbytes)
response_headers = {
'Content-Type': 'application/octet-stream',
'ETag': 'range%d' % numbytes,
'Accept-Ranges': 'bytes',
'Content-Range': content_range}
response = Response(generate_bytes(), headers=response_headers)
if (first_byte_pos == 0) and (last_byte_pos == (numbytes - 1)):
response.status_code = 200
else:
response.status_code = 206
return response
@app.route('/links/<int:n>/<int:offset>')
def link_page(request, n, offset):
"""Generate a page containing n links to other pages which do the same."""
n = int(n)
offset = int(offset)
n = min(max(1, n), 200) # limit to between 1 and 200 links
link = "<a href='{0}'>{1}</a> "
html = ['<html><head><title>Links</title></head><body>']
for i in xrange(n):
if i == offset:
html.append('{0} '.format(i))
else:
html.append(link.format(url_for('link_page', n=n, offset=i), i))
html.append('</body></html>')
return ''.join(html)
@app.route('/links/<int:n>')
def links(request, n):
"""Redirect to first links page."""
n = int(n)
return redirect(url_for('link_page', n=n, offset=0))
@app.route('/image')
def image(request):
"""Returns a simple image of the type suggest by the Accept header."""
headers = get_headers(request)
if 'accept' not in headers:
return image_png(request) # Default media type to png
accept = headers['accept'].lower()
if 'image/webp' in accept:
return image_webp(request)
elif 'image/svg+xml' in accept:
return image_svg(request)
elif 'image/jpeg' in accept:
return image_jpeg(request)
elif 'image/png' in accept or 'image/*' in accept:
return image_png(request)
else:
return status_code(406) # Unsupported media type
@app.route('/image/png')
def image_png(request):
data = resource('images/pig_icon.png')
return Response(data, headers={'Content-Type': 'image/png'})
@app.route('/image/jpeg')
def image_jpeg(request):
data = resource('images/jackal.jpg')
return Response(data, headers={'Content-Type': 'image/jpeg'})
@app.route('/image/webp')
def image_webp(request):
data = resource('images/wolf_1.webp')
return Response(data, headers={'Content-Type': 'image/webp'})
@app.route('/image/svg')
def image_svg(request):
data = resource('images/svg_logo.svg')
return Response(data, headers={'Content-Type': 'image/svg+xml'})
def resource(filename):
path = os.path.join(
tmpl_dir,
filename)
return open(path, 'rb').read()
@app.route('/xml')
def xml(request):
response = Response(render_template('sample.xml'))
response.headers['Content-Type'] = 'application/xml'
return response
if __name__ == '__main__':
run_simple('0.0.0.0', 5000, app, use_reloader=True, use_debugger=True)
| mit |
yanchen036/tensorflow | tensorflow/python/kernel_tests/padding_fifo_queue_test.py | 49 | 58572 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PaddingFIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
class PaddingFIFOQueueTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((None,),), name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list { shape { dim { size: -1 } } } } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32), ((), ()),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list { shape { } shape { } } } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.PaddingFIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((3, 2),))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testParallelEnqueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(3, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32),
((), ()))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
dequeued_t.op.run()
self.assertEqual(0, size.eval())
def testEnqueueMany(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = dequeued_t.eval()
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, (
(None, None),))
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], size_t.eval())
enqueue_op.run()
self.assertEqual([0], size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, shapes=((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithDynamicShape(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpToWithDynamicShape(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testConstructPaddingFIFOQueueWithNoShape(self):
with self.test_session():
with self.assertRaisesRegexp(
ValueError,
r"When providing partial shapes, a list of shapes must be provided."):
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32,
None).queue_ref.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.float32, dtypes_lib.int32),
((), (2,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testMultiEnqueueManyWithPartiallyKnownShapes(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testDequeueUpToNoBlocking(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueManyWithPartiallyKnownShapes(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (None,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertTrue(
tensor_shape.TensorShape(float_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueManyWithPartiallyKnownShapesAndVariableSizeInput(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_many(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = sess.run(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueUpToPartiallyKnownShapesAndVariableInputNoBlocking(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.string, dtypes_lib.int32),
shapes=((None,), (1, None)))
str_elems = [["a"], ["ab"], ["abc"], ["abc", "d"], ["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [[[1]], [[2]], [[3]], [[1, 2]], [[1, 2, 3]], [[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_up_to(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = sess.run(dequeued_t)
self.assertAllEqual([[b"a", b"", b""], [b"ab", b"", b""],
[b"abc", b"", b""], [b"abc", b"d", b""],
[b"abc", b"d", b"e"]], string_val)
self.assertAllEqual([[[1, 0, 0]], [[2, 0, 0]], [[3, 0, 0]], [[1, 2, 0]],
[[1, 2, 3]]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(dequeued_t[
1].get_shape()))
string_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tensor_shape.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tensor_shape.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testHighDimension(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, ((4, 4, 4, 4),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testPartiallyKnownHighDimension(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, (
(4, None, 4, None),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32),
((), (2,)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongPartiallyKnownShapeAtRuntime(self):
with self.test_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Expected \[\?,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongPartiallyKnownShape(self):
with self.test_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = data_flow_ops.PaddingFIFOQueue(10,
(dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (None, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,\?,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(1000, dtypes_lib.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(50, dtypes_lib.float32, shapes=((),))
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = array_ops.placeholder(
dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(
elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.int32, shapes=((),))
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, dequeued_t.eval())
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(
elements_dequeued, elements_dequeued + count, dtype=np.int32)
self.assertAllEqual(expected_range,
dequeuemany_t.eval({
count_placeholder: count
}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.PaddingFIFOQueue(100, dtypes_lib.int32, ((),))
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.PaddingFIFOQueue(total_count, dtypes_lib.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
self.assertAllEqual(elems[3:], sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, (dtypes_lib.float32,
dtypes_lib.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([50.0], dequeued_t.eval())
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
time.sleep(0.01)
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([60.0], dequeued_t.eval())
# Make sure the thread finishes before exiting.
thread.join()
def testBlockingEnqueueBeforeClose(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(4, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, dequeued_t.eval())
def testDoesNotLoseValue(self):
with self.test_session():
q = data_flow_ops.PaddingFIFOQueue(1, dtypes_lib.float32, ((),))
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.test_session():
q1 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.PaddingFIFOQueue(
1, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_2 = data_flow_ops.PaddingFIFOQueue(
15, dtypes_lib.float32, ((),), shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_b")
q_b_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.int32, ((),), shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_c")
q_c_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.PaddingFIFOQueue(
10, dtypes_lib.float32, ((),), shared_name="q_f")
q_f_2 = data_flow_ops.PaddingFIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), ((), ()),
shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),)))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.PaddingFIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32, ((),))
q2 = data_flow_ops.PaddingFIFOQueue(15, dtypes_lib.float32, ((),))
enq_q = data_flow_ops.PaddingFIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.PaddingFIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.test_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.bool, dtypes_lib.complex64, dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.PaddingFIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = sess.run(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testUnknownRank(self):
with self.assertRaisesRegexp(ValueError, "must have a defined rank"):
data_flow_ops.PaddingFIFOQueue(32, [dtypes_lib.float32],
[tensor_shape.TensorShape(None)])
class QueueFromListTest(test.TestCase):
def testQueueFromListShapes(self):
which = constant_op.constant(1)
def _cmp(expected, *shapes):
qs = [
data_flow_ops.PaddingFIFOQueue(10, [dtypes_lib.float32],
[tensor_shape.TensorShape(s)])
for s in shapes
]
s_expected = tensor_shape.TensorShape(expected)
s = data_flow_ops.QueueBase.from_list(which, qs).shapes[0]
if s_expected.ndims is None:
self.assertEqual(s_expected.ndims, s.ndims)
else:
self.assertEqual(s_expected.as_list(), s.as_list())
_cmp(None, [1, None], [None])
_cmp([None], [1], [2])
_cmp([1, None], [1, 1], [1, 2])
_cmp([1, None], [1, 1], [1, None])
_cmp([None, None], [None, 1], [1, None])
_cmp([1], [1], [1], [1])
_cmp([None], [1], [None], [1])
_cmp(None, [1, None], [1], [1])
def testQueueFromListShapesMultipleComponents(self):
q_u_u = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([None])])
q_u_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([1, 2])])
q_f_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([3, 4]), tensor_shape.TensorShape([1, 2])])
which = constant_op.constant(1)
s_cmp_1 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_u]).shapes
self.assertEqual([1, 1], [x.ndims for x in s_cmp_1])
self.assertEqual([None, None], [x.as_list()[0] for x in s_cmp_1])
s_cmp_2 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_f]).shapes
self.assertEqual([1, None], [x.ndims for x in s_cmp_2])
self.assertEqual([None], s_cmp_2[0].as_list())
s_cmp_3 = data_flow_ops.QueueBase.from_list(which, [q_f_f, q_f_f]).shapes
self.assertEqual([2, 2], [x.ndims for x in s_cmp_3])
self.assertEqual([[3, 4], [1, 2]], [x.as_list() for x in s_cmp_3])
if __name__ == "__main__":
test.main()
| apache-2.0 |
SurfasJones/icecream-info | icecream/lib/python2.7/site-packages/sphinx/search/__init__.py | 1 | 11415 | # -*- coding: utf-8 -*-
"""
sphinx.search
~~~~~~~~~~~~~
Create a full-text search index for offline search.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import with_statement
import re
import cPickle as pickle
from docutils.nodes import raw, comment, title, Text, NodeVisitor, SkipNode
from sphinx.util import jsdump, rpartition
class SearchLanguage(object):
"""
This class is the base class for search natural language preprocessors. If
you want to add support for a new language, you should override the methods
of this class.
You should override `lang` class property too (e.g. 'en', 'fr' and so on).
.. attribute:: stopwords
This is a set of stop words of the target language. Default `stopwords`
is empty. This word is used for building index and embedded in JS.
.. attribute:: js_stemmer_code
Return stemmer class of JavaScript version. This class' name should be
``Stemmer`` and this class must have ``stemWord`` method. This string is
embedded as-is in searchtools.js.
This class is used to preprocess search word which Sphinx HTML readers
type, before searching index. Default implementation does nothing.
"""
lang = None
stopwords = set()
js_stemmer_code = """
/**
* Dummy stemmer for languages without stemming rules.
*/
var Stemmer = function() {
this.stemWord = function(w) {
return w;
}
}
"""
_word_re = re.compile(r'\w+(?u)')
def __init__(self, options):
self.options = options
self.init(options)
def init(self, options):
"""
Initialize the class with the options the user has given.
"""
def split(self, input):
"""
This method splits a sentence into words. Default splitter splits input
at white spaces, which should be enough for most languages except CJK
languages.
"""
return self._word_re.findall(input)
def stem(self, word):
"""
This method implements stemming algorithm of the Python version.
Default implementation does nothing. You should implement this if the
language has any stemming rules.
This class is used to preprocess search words before registering them in
the search index. The stemming of the Python version and the JS version
(given in the js_stemmer_code attribute) must be compatible.
"""
return word
def word_filter(self, word):
"""
Return true if the target word should be registered in the search index.
This method is called after stemming.
"""
return not (((len(word) < 3) and (12353 < ord(word[0]) < 12436)) or
(ord(word[0]) < 256 and (len(word) < 3 or word in self.stopwords or
word.isdigit())))
from sphinx.search import en, ja
languages = {
'en': en.SearchEnglish,
'ja': ja.SearchJapanese,
}
class _JavaScriptIndex(object):
"""
The search index as javascript file that calls a function
on the documentation search object to register the index.
"""
PREFIX = 'Search.setIndex('
SUFFIX = ')'
def dumps(self, data):
return self.PREFIX + jsdump.dumps(data) + self.SUFFIX
def loads(self, s):
data = s[len(self.PREFIX):-len(self.SUFFIX)]
if not data or not s.startswith(self.PREFIX) or not \
s.endswith(self.SUFFIX):
raise ValueError('invalid data')
return jsdump.loads(data)
def dump(self, data, f):
f.write(self.dumps(data))
def load(self, f):
return self.loads(f.read())
js_index = _JavaScriptIndex()
class WordCollector(NodeVisitor):
"""
A special visitor that collects words for the `IndexBuilder`.
"""
def __init__(self, document, lang):
NodeVisitor.__init__(self, document)
self.found_words = []
self.found_title_words = []
self.lang = lang
def dispatch_visit(self, node):
if node.__class__ is comment:
raise SkipNode
if node.__class__ is raw:
# Some people might put content in raw HTML that should be searched,
# so we just amateurishly strip HTML tags and index the remaining
# content
nodetext = re.sub(r'(?is)<style.*?</style>', '', node.astext())
nodetext = re.sub(r'(?is)<script.*?</script>', '', nodetext)
nodetext = re.sub(r'<[^<]+?>', '', nodetext)
self.found_words.extend(self.lang.split(nodetext))
raise SkipNode
if node.__class__ is Text:
self.found_words.extend(self.lang.split(node.astext()))
elif node.__class__ is title:
self.found_title_words.extend(self.lang.split(node.astext()))
class IndexBuilder(object):
"""
Helper class that creates a searchindex based on the doctrees
passed to the `feed` method.
"""
formats = {
'jsdump': jsdump,
'pickle': pickle
}
def __init__(self, env, lang, options, scoring):
self.env = env
# filename -> title
self._titles = {}
# stemmed word -> set(filenames)
self._mapping = {}
# stemmed words in titles -> set(filenames)
self._title_mapping = {}
# word -> stemmed word
self._stem_cache = {}
# objtype -> index
self._objtypes = {}
# objtype index -> (domain, type, objname (localized))
self._objnames = {}
# add language-specific SearchLanguage instance
self.lang = languages[lang](options)
if scoring:
with open(scoring, 'rb') as fp:
self.js_scorer_code = fp.read().decode('utf-8')
else:
self.js_scorer_code = u''
def load(self, stream, format):
"""Reconstruct from frozen data."""
if isinstance(format, basestring):
format = self.formats[format]
frozen = format.load(stream)
# if an old index is present, we treat it as not existing.
if not isinstance(frozen, dict) or \
frozen.get('envversion') != self.env.version:
raise ValueError('old format')
index2fn = frozen['filenames']
self._titles = dict(zip(index2fn, frozen['titles']))
def load_terms(mapping):
rv = {}
for k, v in mapping.iteritems():
if isinstance(v, int):
rv[k] = set([index2fn[v]])
else:
rv[k] = set(index2fn[i] for i in v)
return rv
self._mapping = load_terms(frozen['terms'])
self._title_mapping = load_terms(frozen['titleterms'])
# no need to load keywords/objtypes
def dump(self, stream, format):
"""Dump the frozen index to a stream."""
if isinstance(format, basestring):
format = self.formats[format]
format.dump(self.freeze(), stream)
def get_objects(self, fn2index):
rv = {}
otypes = self._objtypes
onames = self._objnames
for domainname, domain in self.env.domains.iteritems():
for fullname, dispname, type, docname, anchor, prio in \
domain.get_objects():
# XXX use dispname?
if docname not in fn2index:
continue
if prio < 0:
continue
prefix, name = rpartition(fullname, '.')
pdict = rv.setdefault(prefix, {})
try:
typeindex = otypes[domainname, type]
except KeyError:
typeindex = len(otypes)
otypes[domainname, type] = typeindex
otype = domain.object_types.get(type)
if otype:
# use unicode() to fire translation proxies
onames[typeindex] = (domainname, type,
unicode(domain.get_type_name(otype)))
else:
onames[typeindex] = (domainname, type, type)
if anchor == fullname:
shortanchor = ''
elif anchor == type + '-' + fullname:
shortanchor = '-'
else:
shortanchor = anchor
pdict[name] = (fn2index[docname], typeindex, prio, shortanchor)
return rv
def get_terms(self, fn2index):
rvs = {}, {}
for rv, mapping in zip(rvs, (self._mapping, self._title_mapping)):
for k, v in mapping.iteritems():
if len(v) == 1:
fn, = v
if fn in fn2index:
rv[k] = fn2index[fn]
else:
rv[k] = [fn2index[fn] for fn in v if fn in fn2index]
return rvs
def freeze(self):
"""Create a usable data structure for serializing."""
filenames = self._titles.keys()
titles = self._titles.values()
fn2index = dict((f, i) for (i, f) in enumerate(filenames))
terms, title_terms = self.get_terms(fn2index)
objects = self.get_objects(fn2index) # populates _objtypes
objtypes = dict((v, k[0] + ':' + k[1])
for (k, v) in self._objtypes.iteritems())
objnames = self._objnames
return dict(filenames=filenames, titles=titles, terms=terms,
objects=objects, objtypes=objtypes, objnames=objnames,
titleterms=title_terms, envversion=self.env.version)
def prune(self, filenames):
"""Remove data for all filenames not in the list."""
new_titles = {}
for filename in filenames:
if filename in self._titles:
new_titles[filename] = self._titles[filename]
self._titles = new_titles
for wordnames in self._mapping.itervalues():
wordnames.intersection_update(filenames)
for wordnames in self._title_mapping.itervalues():
wordnames.intersection_update(filenames)
def feed(self, filename, title, doctree):
"""Feed a doctree to the index."""
self._titles[filename] = title
visitor = WordCollector(doctree, self.lang)
doctree.walk(visitor)
# memoize self.lang.stem
def stem(word):
try:
return self._stem_cache[word]
except KeyError:
self._stem_cache[word] = self.lang.stem(word)
return self._stem_cache[word]
_filter = self.lang.word_filter
for word in visitor.found_title_words:
word = stem(word)
if _filter(word):
self._title_mapping.setdefault(word, set()).add(filename)
for word in visitor.found_words:
word = stem(word)
if word not in self._title_mapping and _filter(word):
self._mapping.setdefault(word, set()).add(filename)
def context_for_searchtool(self):
return dict(
search_language_stemming_code = self.lang.js_stemmer_code,
search_language_stop_words =
jsdump.dumps(sorted(self.lang.stopwords)),
search_scorer_tool = self.js_scorer_code,
)
| mit |
fsimkovic/cptbx | conkit/applications/hhblits.py | 2 | 7994 | # coding=utf-8
#
# BSD 3-Clause License
#
# Copyright (c) 2016-19, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Command line object for HHblits Multiple Sequence Alignment application
"""
__author__ = "Felix Simkovic"
__date__ = "05 Aug 2016"
__version__ = "0.1"
from Bio.Application import _Option
from Bio.Application import _Switch
from Bio.Application import AbstractCommandline
class HHblitsCommandline(AbstractCommandline):
"""
Command line object for HHblits [#]_ [#]_ alignment generation
https://toolkit.tuebingen.mpg.de/hhblits
The HHblits program is a homology detection tool by iterative HMM-HMM comparison.
.. [#] Alva V., Nam SZ., Söding J., Lupas AN. (2016). The MPI bioinformatics Toolkit as an
integrative platform for advanced protein sequence and structure analysis. Nucleic Acids Res. pii: gkw348.
.. [#] Remmert M., Biegert A., Hauser A., Söding J. (2011). HHblits: Lightning-fast iterative
protein sequence searching by HMM-HMM alignment. Nat Methods. 9(2):173-5.
Examples
--------
To generate a Multiple Sequence Alignment, use:
>>> from conkit.applications import HHblitsCommandline
>>> hhblits_cline = HHblitsCommandline(
... input="test.fasta", database="uniprot20_29Feb2012"
... )
>>> print(hhblits_cline)
hhblits -i test.fasta -d uniprot20_29Feb2012
You would typically run the command line with :func:`hhblits_cline` or via
the :mod:`~subprocess` module.
"""
def __init__(self, cmd="hhblits", **kwargs):
# TODO: Figure out how to do mutual groups
if "local" in list(kwargs.keys()) and "global" in list(kwargs.keys()):
raise ValueError('Use only one of "global_aln/local_aln" alignment modes')
self.parameters = [
_Option(
["-i", "input"],
"single sequence or multiple sequence alignment in " "a3m, a2m, or FASTA format, or HMM in hmm format",
filename=True,
is_required=True,
equate=False,
),
# Options
_Option(["-d", "database"], "database name (e.g. uniprot20_29Feb2012)", is_required=True, equate=False),
_Option(["-n", "niterations"], "number of iterations [default: 2]", equate=False),
_Option(
["-e", "evalue"], "E-value cutoff for inclusion in result alignment [default: 0.001]", equate=False
),
# # Input alignment options
# _Option(['-M', 'a2m'],
# 'use A2M/A3M input alignment format',
# equate=False),
# _Option(['-M', 'fasta'],
# 'use FASTA input alignment format',
# equate=False),
# _Option(['-M', 'match_states'],
# 'use FASTA: columns with fewer than X% gaprs are match states',
# equate=False),
# Output options
_Option(
["-o", "output"],
"write results in standard format to file [default: <infile.hhr>]",
filename=True,
equate=False,
),
_Option(
["-oa3m", "oa3m"],
"write result MSA with significant matches in a3m format",
filename=True,
equate=False,
),
_Option(
["-ohhm", "ohhm"],
"write result MSA with significant matches in hmm format",
filename=True,
equate=False,
),
_Option(
["-opsi", "opsi"],
"write result MSA with significant matches in psi format",
filename=True,
equate=False,
),
_Option(["-oalis", "oalis"], "write MSAs in A3M format after each iteration", filename=True, equate=False),
# Filter options applied to query MSA, database MSAs, and result MSA
_Switch(["-all", "show_all"], "show all sequences in result MSA; do not filter result MSA"),
_Option(["-id", "id"], "maximum pairwise sequence identity [default: 90]", equate=False),
_Option(
["-diff", "diff"],
"filter MSAs by selecting most diverse set of sequences, keeping "
"at least this many seqs in each MSA block of length 50 [default: 1000]",
equate=False,
),
_Option(["-cov", "cov"], "minimum coverage with master sequence (%) [default: 0]", equate=False),
_Option(["-qid", "qid"], "minimum sequence identity with master sequence (%) [default: 0]", equate=False),
_Option(["-qsc", "qsc"], "minimum score per column with master sequence [default: -20.0]", equate=False),
_Option(["-neff", "neff"], "target diversity of multiple sequence alignment [default: off]", equate=False),
# HMM-HMM alignment options
_Switch(["-norealign", "norealign"], "do NOT realign displayed hits with MAC algorithm [default: realign]"),
_Option(
["-mact", "mac_realignment_threshold"],
"posterior probability threshold for MAC re-alignment [default: 0.350], "
"Parameter controls alignment greediness: 0:global >0.1:local",
equate=False,
),
_Switch(["-glob", "global_aln"], "use global alignment mode for searching/ranking [default: local]"),
_Switch(["-loc", "loca_alnl"], "use local alignment mode for searching/ranking [default: local]"),
# Other options
_Option(
["-v", "verbose"],
"verbose mode: 0:no screen output 1:only warings 2: verbose [default: 2]",
equate=False,
),
_Option(
["-neffmax", "neffmax"],
"skip further search iterations when diversity Neff of query "
"MSA becomes larger than neffmax [default: 10.0]",
equate=False,
),
_Option(["-cpu", "cpu"], "number of CPUs to use (for shared memory SMPs) [default: 2]"),
# Extra options from `-h all`
_Option(
["-maxfilt", "maxfilt"],
"max number of hits allowed to pass 2nd prefilter (default=20000)",
equate=False,
),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
| gpl-3.0 |
darrenbilby/grr | lib/flows/general/network.py | 8 | 1499 | #!/usr/bin/env python
"""These are network related flows."""
from grr.lib import aff4
from grr.lib import flow
# pylint: disable=unused-import
from grr.lib.aff4_objects import network
# pylint: enable=unused-import
class Netstat(flow.GRRFlow):
"""List running processes on a system."""
category = "/Network/"
behaviours = flow.GRRFlow.behaviours + "BASIC"
@flow.StateHandler(next_state=["StoreNetstat"])
def Start(self):
"""Start processing."""
self.CallClient("Netstat", next_state="StoreNetstat")
@flow.StateHandler()
def StoreNetstat(self, responses):
"""Collect the connections and store in the datastore.
Args:
responses: A list of sysinfo_pb2.NetworkConnection objects.
Raises:
flow.FlowError: On failure to get retrieve the connections.
"""
self.state.Register("urn", self.client_id.Add("network"))
net_fd = aff4.FACTORY.Create(self.state.urn, "Network", token=self.token)
if responses.success:
conns = net_fd.Schema.CONNECTIONS()
for response in responses:
self.SendReply(response)
conns.Append(response)
else:
raise flow.FlowError("Failed to get connections. Err: {0}".format(
responses.status))
self.state.Register("conn_count", len(conns))
net_fd.Set(conns)
net_fd.Close()
@flow.StateHandler()
def End(self):
self.Log("Successfully wrote %d connections.", self.state.conn_count)
self.Notify("ViewObject", self.state.urn, "Listed Connections")
| apache-2.0 |
agaurav/ansible | lib/ansible/inventory/vars_plugins/noop.py | 317 | 1632 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class VarsModule(object):
"""
Loads variables for groups and/or hosts
"""
def __init__(self, inventory):
""" constructor """
self.inventory = inventory
self.inventory_basedir = inventory.basedir()
def run(self, host, vault_password=None):
""" For backwards compatibility, when only vars per host were retrieved
This method should return both host specific vars as well as vars
calculated from groups it is a member of """
return {}
def get_host_vars(self, host, vault_password=None):
""" Get host specific variables. """
return {}
def get_group_vars(self, group, vault_password=None):
""" Get group specific variables. """
return {}
| gpl-3.0 |
flyfei/python-for-android | python-modules/twisted/twisted/test/test_roots.py | 81 | 1823 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.python import roots
import types
class RootsTest(unittest.TestCase):
def testExceptions(self):
request = roots.Request()
try:
request.write("blah")
except NotImplementedError:
pass
else:
self.fail()
try:
request.finish()
except NotImplementedError:
pass
else:
self.fail()
def testCollection(self):
collection = roots.Collection()
collection.putEntity("x", 'test')
self.failUnlessEqual(collection.getStaticEntity("x"),
'test')
collection.delEntity("x")
self.failUnlessEqual(collection.getStaticEntity('x'),
None)
try:
collection.storeEntity("x", None)
except NotImplementedError:
pass
else:
self.fail()
try:
collection.removeEntity("x", None)
except NotImplementedError:
pass
else:
self.fail()
def testConstrained(self):
class const(roots.Constrained):
def nameConstraint(self, name):
return (name == 'x')
c = const()
self.failUnlessEqual(c.putEntity('x', 'test'), None)
self.failUnlessRaises(roots.ConstraintViolation,
c.putEntity, 'y', 'test')
def testHomogenous(self):
h = roots.Homogenous()
h.entityType = types.IntType
h.putEntity('a', 1)
self.failUnlessEqual(h.getStaticEntity('a'),1 )
self.failUnlessRaises(roots.ConstraintViolation,
h.putEntity, 'x', 'y')
| apache-2.0 |
omarkohl/pytest | _pytest/python.py | 1 | 89408 | """ Python test discovery, setup and run of test functions. """
import fnmatch
import functools
import inspect
import re
import types
import sys
import py
import pytest
from _pytest._code.code import TerminalRepr
from _pytest.mark import MarkDecorator, MarkerError
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
import _pytest
import _pytest._pluggy as pluggy
cutdir2 = py.path.local(_pytest.__file__).dirpath()
cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
NoneType = type(None)
NOTSET = object()
isfunction = inspect.isfunction
isclass = inspect.isclass
callable = py.builtin.callable
# used to work around a python2 exception info leak
exc_clear = getattr(sys, 'exc_clear', lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(''))
_PY3 = sys.version_info > (3, 0)
_PY2 = not _PY3
if hasattr(inspect, 'signature'):
def _format_args(func):
return str(inspect.signature(func))
else:
def _format_args(func):
return inspect.formatargspec(*inspect.getargspec(func))
if sys.version_info[:2] == (2, 6):
def isclass(object):
""" Return true if the object is a class. Overrides inspect.isclass for
python 2.6 because it will return True for objects which always return
something on __getattr__ calls (see #1035).
Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc
"""
return isinstance(object, (type, types.ClassType))
def _has_positional_arg(func):
return func.__code__.co_argcount
def filter_traceback(entry):
# entry.path might sometimes return a str object when the entry
# points to dynamically generated code
# see https://bitbucket.org/pytest-dev/py/issues/71
raw_filename = entry.frame.code.raw.co_filename
is_generated = '<' in raw_filename and '>' in raw_filename
if is_generated:
return False
# entry.path might point to an inexisting file, in which case it will
# alsso return a str object. see #1133
p = py.path.local(entry.path)
return p != cutdir1 and not p.relto(cutdir2)
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = _pytest._code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception.
Attribute access can potentially fail for 'evil' Python objects.
See issue214
"""
try:
return getattr(object, name, default)
except Exception:
return default
class FixtureFunctionMarker:
def __init__(self, scope, params,
autouse=False, yieldctx=False, ids=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.yieldctx = yieldctx
self.ids = ids
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, yieldctx=True)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse,
yieldctx=True, ids=ids)
defaultfuncargprefixmarker = fixture()
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(pytest, name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--fixtures', '--funcargs',
action="store_true", dest="showfixtures", default=False,
help="show available fixtures, sorted by plugin appearance")
parser.addini("usefixtures", type="args", default=[],
help="list of default fixtures to be used with this project")
parser.addini("python_files", type="args",
default=['test_*.py', '*_test.py'],
help="glob-style file patterns for Python test module discovery")
parser.addini("python_classes", type="args", default=["Test",],
help="prefixes or glob names for Python test class discovery")
parser.addini("python_functions", type="args", default=["test",],
help="prefixes or glob names for Python test function and "
"method discovery")
group.addoption("--import-mode", default="prepend",
choices=["prepend", "append"], dest="importmode",
help="prepend/append to sys.path when importing test modules, "
"default is to prepend.")
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
def pytest_generate_tests(metafunc):
# those alternative spellings are common - raise a specific error to alert
# the user
alt_spellings = ['parameterize', 'parametrise', 'parameterise']
for attr in alt_spellings:
if hasattr(metafunc.function, attr):
msg = "{0} has '{1}', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__, attr))
try:
markers = metafunc.function.parametrize
except AttributeError:
return
for marker in markers:
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line("markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples."
)
config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
)
def pytest_sessionstart(session):
session._fixturemanager = FixtureManager(session)
@pytest.hookimpl(trylast=True)
def pytest_namespace():
raises.Exception = pytest.fail.Exception
return {
'fixture': fixture,
'yield_fixture': yield_fixture,
'raises' : raises,
'collect': {
'Module': Module, 'Class': Class, 'Instance': Instance,
'Function': Function, 'Generator': Generator,
'_fillfuncargs': fillfixtures}
}
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
@pytest.hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini('python_files'):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
raise StopIteration
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.istestclass(obj, name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
# We need to try and unwrap the function if it's a functools.partial
# or a funtools.wrapped.
# We musn't if it's been wrapped with mock.patch (python 2 only)
if not (isfunction(obj) or isfunction(get_real_func(obj))):
collector.warn(code="C2", message=
"cannot collect %r because it is not a function."
% name, )
elif getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def is_generator(func):
try:
return _pytest._code.getrawcode(func).co_flags & 32 # generator function
except AttributeError: # builtin functions have no bytecode
# assume them to not be generators
return False
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
def obj():
def fget(self):
try:
return self._obj
except AttributeError:
self._obj = obj = self._getobj()
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
assert name.endswith(".py")
name = name[:-3]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
compat_co_firstlineno = getattr(obj, 'compat_co_firstlineno', None)
if isinstance(compat_co_firstlineno, int):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, pytest.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_functions', name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
# We explicitly check for "is True" here to not mistakenly treat
# classes with a custom __getattr__ returning something truthy (like a
# function) as test classes.
return safe_getattr(obj, '__test__', False) is True
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_classes', name)
def istestfunction(self, obj, name):
return (
(self.funcnamefilter(name) or self.isnosetest(obj)) and
safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
)
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ('*' in option or '?' in option or '[' in option) and \
fnmatch.fnmatch(name, option):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, '__dict__', {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
l = []
for dic in dicts:
for name, obj in list(dic.items()):
if name in seen:
continue
seen[name] = True
res = self.makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
l.extend(res)
l.sort(key=lambda item: item.reportinfo()[:2])
return l
def makeitem(self, name, obj):
#assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
metafunc = Metafunc(funcobj, fixtureinfo, self.config,
cls=cls, module=module)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(methods,
dict(metafunc=metafunc))
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" %(name, callspec.id)
yield Function(name=subname, parent=self,
callspec=callspec, callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id:True})
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
def _marked(func, mark):
""" Returns True if :func: is already marked with :mark:, False otherwise.
This can happen if marker is applied to class and the test file is
invoked more than once.
"""
try:
func_mark = getattr(func, mark.name)
except AttributeError:
return False
return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
def transfer_markers(funcobj, cls, mod):
# XXX this should rather be code in the mark plugin or the mark
# plugin should merge with the python plugin.
for holder in (cls, mod):
try:
pytestmark = holder.pytestmark
except AttributeError:
continue
if isinstance(pytestmark, list):
for mark in pytestmark:
if not _marked(funcobj, mark):
mark(funcobj)
else:
if not _marked(funcobj, pytestmark):
pytestmark(funcobj)
class Module(pytest.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._memoizedcall('_obj', self._importtestmodule)
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
importmode = self.config.getoption("--import-mode")
try:
mod = self.fspath.pyimport(ensuresyspath=importmode)
except SyntaxError:
raise self.CollectError(
_pytest._code.ExceptionInfo().getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules"
% e.args
)
#print "imported test module", mod
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one
# so we pass the current module object
if _has_positional_arg(setup_module):
setup_module(self.obj)
else:
setup_module()
fin = getattr(self.obj, 'tearDownModule', None)
if fin is None:
fin = getattr(self.obj, 'teardown_module', None)
if fin is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, it's probably a pytest style one
# so we pass the current module object
if _has_positional_arg(fin):
finalizer = lambda: fin(self.obj)
else:
finalizer = fin
self.addfinalizer(finalizer)
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if hasinit(self.obj):
self.warn("C1", "cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = xunitsetup(self.obj, 'setup_class')
if setup_class is not None:
setup_class = getattr(setup_class, 'im_func', setup_class)
setup_class = getattr(setup_class, '__func__', setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, 'teardown_class', None)
if fin_class is not None:
fin_class = getattr(fin_class, 'im_func', fin_class)
fin_class = getattr(fin_class, '__func__', fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
def _getobj(self):
obj = self.parent.obj()
return obj
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, '_preservedparent'):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = 'setup_method'
teardown_name = 'teardown_method'
else:
setup_name = 'setup_function'
teardown_name = 'teardown_function'
setup_func_or_method = xunitsetup(obj, setup_name)
if setup_func_or_method is not None:
setup_func_or_method(self.obj)
fin = getattr(obj, teardown_name, None)
if fin is not None:
self.addfinalizer(lambda: fin(self.obj))
def _prunetraceback(self, excinfo):
if hasattr(self, '_obj') and not self.config.option.fulltrace:
code = _pytest._code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
#ntraceback = ntraceback.cut(excludepath=cutdir2)
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style('short')
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(pytest.fail.Exception):
if not excinfo.value.pytrace:
return py._builtin._totext(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo,
style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
l = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError("%r generated tests with non-unique name %r" %(self, name))
seen[name] = True
l.append(self.Function(name, self, args=args, callobj=call))
return l
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explict naming
if isinstance(obj[0], py.builtin._basestring):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, '__init__', None)
if init:
if init != object.__init__:
return True
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
_notexists = object()
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = _notexists
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" %(arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is _notexists:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
param_index):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
valtype_for_arg = valtypes[arg]
getattr(self, valtype_for_arg)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not _notexists:
self._idlist.append(id)
if param is not _notexists:
assert self._globalparam is _notexists
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = scopenum_function
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
class Metafunc(FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
:ivar fixturenames: set of fixture names required by the test function
:ivar function: underlying python test function
:ivar cls: class object where the test function is defined in or ``None``.
:ivar module: the module object where the test function is defined in.
:ivar config: access to the :class:`_pytest.config.Config` object for the
test session.
:ivar funcargnames:
.. deprecated:: 2.3
Use ``fixturenames`` instead.
"""
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
self.config = config
self.module = module
self.function = function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: The list of argnames or boolean. A list of arguments'
names (subset of argnames). If True the list contains all names from
the argnames. Each argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
# individual parametrized argument sets can be wrapped in a series
# of markers in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
while isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newmarks = newkeywords.setdefault(i, {})
newmarks[newmark.markname] = newmark
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
if scope is None:
scope = "function"
scopenum = scopes.index(scope)
valtypes = {}
for arg in argnames:
if arg not in self.fixturenames:
raise ValueError("%r uses no fixture %r" %(self.function, arg))
if indirect is True:
valtypes = dict.fromkeys(argnames, "params")
elif indirect is False:
valtypes = dict.fromkeys(argnames, "funcargs")
elif isinstance(indirect, (tuple, list)):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
raise ValueError("indirect given to %r: fixture %r doesn't exist" %(
self.function, arg))
valtypes[arg] = "params"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if not ids:
ids = idmaker(argnames, argvalues, idfn)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, valset in enumerate(argvalues):
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtypes, argnames, valset, ids[param_index],
newkeywords.get(param_index, {}), scopenum,
param_index)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
""" (deprecated, use parametrize) Add a new call to the underlying
test function during the collection phase of a test run. Note that
request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
pytest.fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is _notexists:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
if _PY3:
import codecs
def _escape_bytes(val):
"""
If val is pure ascii, returns it as a str(), otherwise escapes
into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in the string, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
"""
if val:
# source: http://goo.gl/bGsnwC
encoded_bytes, _ = codecs.escape_encode(val)
return encoded_bytes.decode('ascii')
else:
# empty bytes crashes codecs.escape_encode (#1087)
return ''
else:
def _escape_bytes(val):
"""
In py2 bytes and str are the same type, so return it unchanged if it
is a full ascii string, otherwise escape it into its binary form.
"""
try:
return val.decode('ascii')
except UnicodeDecodeError:
return val.encode('string-escape')
def _idval(val, argname, idx, idfn):
if idfn:
try:
s = idfn(val)
if s:
return s
except Exception:
pass
if isinstance(val, bytes):
return _escape_bytes(val)
elif isinstance(val, (float, int, str, bool, NoneType)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return _escape_bytes(val.pattern) if isinstance(val.pattern, bytes) else val.pattern
elif enum is not None and isinstance(val, enum.Enum):
return str(val)
elif isclass(val) and hasattr(val, '__name__'):
return val.__name__
elif _PY2 and isinstance(val, unicode):
# special case for python 2: if a unicode string is
# convertible to ascii, return it as an str() object instead
try:
return str(val)
except UnicodeError:
# fallthrough
pass
return str(argname)+str(idx)
def _idvalset(idx, valset, argnames, idfn):
this_id = [_idval(val, argname, idx, idfn)
for val, argname in zip(valset, argnames)]
return "-".join(this_id)
def idmaker(argnames, argvalues, idfn=None):
ids = [_idvalset(valindex, valset, argnames, idfn)
for valindex, valset in enumerate(argvalues)]
if len(set(ids)) < len(ids):
# user may have provided a bad idfn which means the ids are not unique
ids = [str(i) + testid for i, testid in enumerate(ids)]
return ids
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
fixturedef = fixturedefs[-1]
loc = getlocation(fixturedef.func, curdir)
available.append((len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname, fixturedef))
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" %(module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" %(argname, bestrel,)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
for line in doc.strip().split("\n"):
tw.line(" " + line.strip())
else:
tw.line(" %s: no docstring available" %(loc,),
red=True)
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
""" assert that a code block/function call raises ``expected_exception``
and raise a failure exception otherwise.
This helper produces a ``ExceptionInfo()`` object (see below).
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
.. note::
When using ``pytest.raises`` as a context manager, it's worthwhile to
note that normal context manager rules apply and that the exception
raised *must* be the final line in the scope of the context manager.
Lines of code after that, within the scope of the context manager will
not be executed. For example::
>>> with raises(OSError) as exc_info:
assert 1 == 1 # this will execute as expected
raise OSError(errno.EEXISTS, 'directory exists')
assert exc_info.value.errno == errno.EEXISTS # this will not execute
Instead, the following approach must be taken (note the difference in
scope)::
>>> with raises(OSError) as exc_info:
assert 1 == 1 # this will execute as expected
raise OSError(errno.EEXISTS, 'directory exists')
assert exc_info.value.errno == errno.EEXISTS # this will now execute
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
.. autoclass:: _pytest._code.ExceptionInfo
:members:
.. note::
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
if expected_exception is AssertionError:
# we want to catch a AssertionError
# replace our subclass with the builtin one
# see https://github.com/pytest-dev/pytest/issues/176
from _pytest.assertion.util import BuiltinAssertionError \
as expected_exception
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(expected_exception, tuple):
for exc in expected_exception:
if not isclass(exc):
raise TypeError(msg % type(exc))
elif not isclass(expected_exception):
raise TypeError(msg % type(expected_exception))
if not args:
return RaisesContext(expected_exception)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
#print "raises frame scope: %r" % frame.f_locals
try:
code = _pytest._code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return _pytest._code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return _pytest._code.ExceptionInfo()
pytest.fail("DID NOT RAISE {0}".format(expected_exception))
class RaisesContext(object):
def __init__(self, expected_exception):
self.expected_exception = expected_exception
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail("DID NOT RAISE")
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
return issubclass(self.excinfo.type, self.expected_exception)
#
# the basic pytest Function item
#
class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
def __init__(self, name, parent, args=None, config=None,
callspec=None, callobj=NOTSET, keywords=None, session=None,
fixtureinfo=None):
super(Function, self).__init__(name, parent, config=config,
session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
if callspec:
self.callspec = callspec
self.keywords.update(callspec.keywords)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self.parent, self.obj, self.cls,
funcargs=not self._isyieldedfunction())
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs")
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, 'im_func', self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
# check if parametrization happend with an empty list
try:
self.callspec._emptyparamspecified
except AttributeError:
pass
else:
fs, lineno = self._getfslineno()
pytest.skip("got empty parameter set, function %s at %s:%d" %(
self.function.__name__, fs, lineno))
super(Function, self).setup()
fillfixtures(self)
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "class", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self.fixturenames = fixtureinfo.names_closure
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfuncargvalue(argname) usage which was naturally
# not known at parsing/collection time
fixturedefs = self._fixturemanager.getfixturedefs(
argname, self._pyfuncitem.parent.nodeid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(pytest.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(pytest.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfuncargvalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfuncargvalue(self, argname):
""" Dynamically retrieve a named fixture function argument.
As of pytest-2.3, it is easier and usually better to access other
fixture values by stating it as an input argument in the fixture
function. If you only can decide about using another fixture at test
setup time, you may use this function to retrieve it inside a fixture
function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixturedefs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfuncargvalue(fixturedef)
self._funcargs[argname] = result
self._fixturedefs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfuncargvalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" %(
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" %(
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._funcargs = request._funcargs
self._fixturedefs = request._fixturedefs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self.fixturenames = request.fixturenames
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# the last fixture raise an error, let's present
# it at the requesting side
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (IOError, IndexError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
for name, fixturedef in fm._arg2fixturedefs.items():
parentid = self.request._pyfuncitem.parent.nodeid
faclist = list(fm._matchfactories(fixturedef, parentid))
if faclist:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(available),)
msg += "\n use 'py.test --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
for line in self.errorstring.split("\n"):
tw.line(" " + line.strip(), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
argnames = func_params[0]
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if argname not in func_params and argname not in argnames:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
obj = getattr(holderobj, name, None)
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
if not callable(obj):
continue
marker = defaultfuncargprefixmarker
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
assert not name.startswith(self._argprefix)
fixturedef = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
yieldctx=marker.yieldctx,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixturedef.has_location:
faclist.append(fixturedef)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixturedef)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = _pytest._code.Source(fixturefunc)
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
if yieldctx:
if not is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="yield_fixture requires yield statement in function")
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
if is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="pytest.fixture functions cannot use ``yield``. "
"Instead write and return an inner function/generator "
"and let the consumer call and iterate over it.")
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
yieldctx, unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scopes.index(scope or "function")
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.yieldctx = yieldctx
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
kwargs = {}
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixturedef.cached_result
request._check_scope(argname, request.scope, fixturedef.scope)
kwargs[argname] = result
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
fixturefunc = self.func
if self.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = self.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "self" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(self.func)
if fixturefunc != self.func:
fixturefunc = fixturefunc.__get__(request.instance)
try:
result = call_fixture_func(fixturefunc, request, kwargs,
self.yieldctx)
except Exception:
self.cached_result = (None, my_cache_key, sys.exc_info())
raise
self.cached_result = (result, my_cache_key, None)
return result
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
if isinstance(function, functools.partial):
argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0]
partial = function
argnames = argnames[len(partial.args):]
if partial.keywords:
for kw in partial.keywords:
argnames.remove(kw)
else:
argnames = inspect.getargs(_pytest._code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
def xunitsetup(obj, name):
meth = getattr(obj, name, None)
if getfixturemarker(meth) is None:
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
scopename2class = {
'class': Class,
'module': Module,
'function': pytest.Item,
}
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
| mit |
rockyzhang/zhangyanhit-python-for-android-mips | python3-alpha/python3-src/Lib/test/test_wait3.py | 63 | 1057 | """This test checks for correct wait3() behavior.
"""
import os
import time
import unittest
from test.fork_wait import ForkWait
from test.support import run_unittest, reap_children
try:
os.fork
except AttributeError:
raise unittest.SkipTest("os.fork not defined -- skipping test_wait3")
try:
os.wait3
except AttributeError:
raise unittest.SkipTest("os.wait3 not defined -- skipping test_wait3")
class Wait3Test(ForkWait):
def wait_impl(self, cpid):
for i in range(10):
# wait3() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status, rusage = os.wait3(os.WNOHANG)
if spid == cpid:
break
time.sleep(1.0)
self.assertEqual(spid, cpid)
self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
self.assertTrue(rusage)
def test_main():
run_unittest(Wait3Test)
reap_children()
if __name__ == "__main__":
test_main()
| apache-2.0 |
cryptovein/p2pool-mintcoin | SOAPpy/WSDL.py | 294 | 5100 | """Parse web services description language to get SOAP methods.
Rudimentary support."""
ident = '$Id: WSDL.py 1467 2008-05-16 23:32:51Z warnes $'
from version import __version__
import wstools
import xml
from Errors import Error
from Client import SOAPProxy, SOAPAddress
from Config import Config
import urllib
class Proxy:
"""WSDL Proxy.
SOAPProxy wrapper that parses method names, namespaces, soap actions from
the web service description language (WSDL) file passed into the
constructor. The WSDL reference can be passed in as a stream, an url, a
file name, or a string.
Loads info into self.methods, a dictionary with methodname keys and values
of WSDLTools.SOAPCallinfo.
For example,
url = 'http://www.xmethods.org/sd/2001/TemperatureService.wsdl'
wsdl = WSDL.Proxy(url)
print len(wsdl.methods) # 1
print wsdl.methods.keys() # getTemp
See WSDLTools.SOAPCallinfo for more info on each method's attributes.
"""
def __init__(self, wsdlsource, config=Config, **kw ):
reader = wstools.WSDLTools.WSDLReader()
self.wsdl = None
# From Mark Pilgrim's "Dive Into Python" toolkit.py--open anything.
if self.wsdl is None and hasattr(wsdlsource, "read"):
print 'stream:', wsdlsource
try:
self.wsdl = reader.loadFromStream(wsdlsource)
except xml.parsers.expat.ExpatError, e:
newstream = urllib.URLopener(key_file=config.SSL.key_file, cert_file=config.SSL.cert_file).open(wsdlsource)
buf = newstream.readlines()
raise Error, "Unable to parse WSDL file at %s: \n\t%s" % \
(wsdlsource, "\t".join(buf))
# NOT TESTED (as of April 17, 2003)
#if self.wsdl is None and wsdlsource == '-':
# import sys
# self.wsdl = reader.loadFromStream(sys.stdin)
# print 'stdin'
if self.wsdl is None:
try:
file(wsdlsource)
self.wsdl = reader.loadFromFile(wsdlsource)
#print 'file'
except (IOError, OSError): pass
except xml.parsers.expat.ExpatError, e:
newstream = urllib.urlopen(wsdlsource)
buf = newstream.readlines()
raise Error, "Unable to parse WSDL file at %s: \n\t%s" % \
(wsdlsource, "\t".join(buf))
if self.wsdl is None:
try:
stream = urllib.URLopener(key_file=config.SSL.key_file, cert_file=config.SSL.cert_file).open(wsdlsource)
self.wsdl = reader.loadFromStream(stream, wsdlsource)
except (IOError, OSError): pass
except xml.parsers.expat.ExpatError, e:
newstream = urllib.urlopen(wsdlsource)
buf = newstream.readlines()
raise Error, "Unable to parse WSDL file at %s: \n\t%s" % \
(wsdlsource, "\t".join(buf))
if self.wsdl is None:
import StringIO
self.wsdl = reader.loadFromString(str(wsdlsource))
#print 'string'
# Package wsdl info as a dictionary of remote methods, with method name
# as key (based on ServiceProxy.__init__ in ZSI library).
self.methods = {}
service = self.wsdl.services[0]
port = service.ports[0]
name = service.name
binding = port.getBinding()
portType = binding.getPortType()
for operation in portType.operations:
callinfo = wstools.WSDLTools.callInfoFromWSDL(port, operation.name)
self.methods[callinfo.methodName] = callinfo
self.soapproxy = SOAPProxy('http://localhost/dummy.webservice',
config=config, **kw)
def __str__(self):
s = ''
for method in self.methods.values():
s += str(method)
return s
def __getattr__(self, name):
"""Set up environment then let parent class handle call.
Raises AttributeError is method name is not found."""
if not self.methods.has_key(name): raise AttributeError, name
callinfo = self.methods[name]
self.soapproxy.proxy = SOAPAddress(callinfo.location)
self.soapproxy.namespace = callinfo.namespace
self.soapproxy.soapaction = callinfo.soapAction
return self.soapproxy.__getattr__(name)
def show_methods(self):
for key in self.methods.keys():
method = self.methods[key]
print "Method Name:", key.ljust(15)
print
inps = method.inparams
for parm in range(len(inps)):
details = inps[parm]
print " In #%d: %s (%s)" % (parm, details.name, details.type)
print
outps = method.outparams
for parm in range(len(outps)):
details = outps[parm]
print " Out #%d: %s (%s)" % (parm, details.name, details.type)
print
| gpl-3.0 |
jevonearth/frappe | frappe/async.py | 6 | 6369 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import os
import time
import redis
from functools import wraps
from frappe.utils import get_site_path
from frappe import conf
END_LINE = '<!-- frappe: end-file -->'
TASK_LOG_MAX_AGE = 86400 # 1 day in seconds
redis_server = None
def handler(f):
cmd = f.__module__ + '.' + f.__name__
def run(args, set_in_response=True, hijack_std=False):
from frappe.tasks import run_async_task
from frappe.handler import execute_cmd
if frappe.conf.disable_async:
return execute_cmd(cmd, from_async=True)
args = frappe._dict(args)
task = run_async_task.delay(site=frappe.local.site,
user=(frappe.session and frappe.session.user) or 'Administrator', cmd=cmd,
form_dict=args, hijack_std=hijack_std)
if set_in_response:
frappe.local.response['task_id'] = task.id
return task.id
@wraps(f)
def queue(*args, **kwargs):
task_id = run(frappe.local.form_dict, set_in_response=True)
return {
"status": "queued",
"task_id": task_id
}
queue.async = True
queue.queue = f
queue.run = run
frappe.whitelisted.append(f)
frappe.whitelisted.append(queue)
return queue
@frappe.whitelist()
def get_pending_tasks_for_doc(doctype, docname):
return frappe.db.sql_list("select name from `tabAsync Task` where status in ('Queued', 'Running') and reference_doctype=%s and reference_name=%s", (doctype, docname))
@handler
def ping():
from time import sleep
sleep(1)
return "pong"
@frappe.whitelist()
def get_task_status(task_id):
from frappe.celery_app import get_celery
c = get_celery()
a = c.AsyncResult(task_id)
frappe.local.response['response'] = a.result
return {
"state": a.state,
"progress": 0
}
def set_task_status(task_id, status, response=None):
if not response:
response = {}
response.update({
"status": status,
"task_id": task_id
})
emit_via_redis("task_status_change", response, room="task:" + task_id)
def remove_old_task_logs():
logs_path = get_site_path('task-logs')
def full_path(_file):
return os.path.join(logs_path, _file)
files_to_remove = [full_path(_file) for _file in os.listdir(logs_path)]
files_to_remove = [_file for _file in files_to_remove if is_file_old(_file) and os.path.isfile(_file)]
for _file in files_to_remove:
os.remove(_file)
def is_file_old(file_path):
return ((time.time() - os.stat(file_path).st_mtime) > TASK_LOG_MAX_AGE)
def publish_realtime(event=None, message=None, room=None, user=None, doctype=None, docname=None, now=False):
"""Publish real-time updates
:param event: Event name, like `task_progress` etc. that will be handled by the client (default is `task_progress` if within task or `global`)
:param message: JSON message object. For async must contain `task_id`
:param room: Room in which to publish update (default entire site)
:param user: Transmit to user
:param doctype: Transmit to doctype, docname
:param docname: Transmit to doctype, docname"""
if message is None:
message = {}
if event is None:
if frappe.local.task_id:
event = "task_progress"
else:
event = "global"
if not room:
if frappe.local.task_id:
room = get_task_progress_room()
if not "task_id" in message:
message["task_id"] = frappe.local.task_id
now = True
elif user:
room = get_user_room(user)
elif doctype and docname:
room = get_doc_room(doctype, docname)
else:
room = get_site_room()
if now:
emit_via_redis(event, message, room)
else:
frappe.local.realtime_log.append([event, message, room])
def emit_via_redis(event, message, room):
"""Publish real-time updates via redis
:param event: Event name, like `task_progress` etc.
:param message: JSON message object. For async must contain `task_id`
:param room: name of the room"""
r = get_redis_server()
try:
r.publish('events', frappe.as_json({'event': event, 'message': message, 'room': room}))
except redis.exceptions.ConnectionError:
# print frappe.get_traceback()
pass
def put_log(line_no, line, task_id=None):
r = get_redis_server()
if not task_id:
task_id = frappe.local.task_id
task_progress_room = get_task_progress_room()
task_log_key = "task_log:" + task_id
publish_realtime('task_progress', {
"message": {
"lines": {line_no: line}
},
"task_id": task_id
}, room=task_progress_room, now=True)
r.hset(task_log_key, line_no, line)
r.expire(task_log_key, 3600)
def get_redis_server():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from redis import Redis
redis_server = Redis.from_url(conf.get("async_redis_server") or "redis://localhost:12311")
return redis_server
class FileAndRedisStream(file):
def __init__(self, *args, **kwargs):
ret = super(FileAndRedisStream, self).__init__(*args, **kwargs)
self.count = 0
return ret
def write(self, data):
ret = super(FileAndRedisStream, self).write(data)
if frappe.local.task_id:
put_log(self.count, data, task_id=frappe.local.task_id)
self.count += 1
return ret
def get_std_streams(task_id):
stdout = FileAndRedisStream(get_task_log_file_path(task_id, 'stdout'), 'w')
# stderr = FileAndRedisStream(get_task_log_file_path(task_id, 'stderr'), 'w')
return stdout, stdout
def get_task_log_file_path(task_id, stream_type):
logs_dir = frappe.utils.get_site_path('task-logs')
return os.path.join(logs_dir, task_id + '.' + stream_type)
@frappe.whitelist(allow_guest=True)
def can_subscribe_doc(doctype, docname, sid):
from frappe.sessions import Session
from frappe.exceptions import PermissionError
session = Session(None, resume=True).get_session_data()
if not frappe.has_permission(user=session.user, doctype=doctype, doc=docname, ptype='read'):
raise PermissionError()
return True
@frappe.whitelist(allow_guest=True)
def get_user_info(sid):
from frappe.sessions import Session
session = Session(None, resume=True).get_session_data()
return {
'user': session.user,
}
def get_doc_room(doctype, docname):
return ''.join([frappe.local.site, ':doc:', doctype, '/', docname])
def get_user_room(user):
return ''.join([frappe.local.site, ':user:', user])
def get_site_room():
return ''.join([frappe.local.site, ':all'])
def get_task_progress_room():
return "task_progress:" + frappe.local.task_id
| mit |
darktears/chromium-crosswalk | tools/telemetry/third_party/typ/tools/cov.py | 35 | 4646 | #!/usr/bin/python
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import sys
import textwrap
is_python3 = bool(sys.version_info.major == 3)
ALL_PRAGMAS = ['no cover', 'no win32', 'python2', 'python3', 'untested',
'win32']
DEFAULT_PRAGMAS = ALL_PRAGMAS[:]
if is_python3:
DEFAULT_PRAGMAS.remove('python3')
else:
DEFAULT_PRAGMAS.remove('python2')
if sys.platform == 'win32':
DEFAULT_PRAGMAS.remove('win32')
else:
DEFAULT_PRAGMAS.remove('no win32')
def add_arguments(parser):
parser.add_argument('--no-pragmas', action='store_true', default=False,
help='Show all uncovered lines (no pragmas).')
parser.add_argument('--path', action='append', default=[],
help='Prepend given directories to sys.path.')
parser.add_argument('--pragma', action='append', default=[],
help=('The coverage pragmas to honor '
'(defaults to %s).' % DEFAULT_PRAGMAS))
parser.add_argument('--show', action='append', default=[],
help='Show code protected by the specified pragmas '
'(uses all pragmas *except* for the ones '
'specified).')
parser.add_argument('--show-missing', action='store_true',
default=False, help='Show missing lines.')
parser.add_argument('--source', action='append', default=[],
help='Limit coverage data to the given directories.')
parser.formatter_class = argparse.RawTextHelpFormatter
parser.epilog = textwrap.dedent("""
Valid pragma values are:
'no cover': The default coverage pragma, this now means we
truly cannot cover it.
'no win32': Code that only executes when not on Windows.
'python2': Code that only executes under Python2.
'python3': Code that only executees under Python3.
'untested': Code that does not yet have tests.
'win32': Code that only executes on Windows.
In typ, we aim for 'no cover' to only apply to code that executes only
when coverage is not available (and hence can never be counted). Most
code, if annotated at all, should be 'untested', and we should strive
for 'untested' to not be used, either.
""")
def argv_from_args(args):
argv = []
if args.no_pragmas:
argv.append('--no-pragmas')
for arg in args.path:
argv.extend(['--path', arg])
for arg in args.show:
argv.extend(['--show', arg])
if args.show_missing:
argv.append('--show-missing')
for arg in args.source:
argv.extend(['--source', arg])
for arg in args.pragma:
argv.extend(['--pragma', arg])
return argv
def main(argv=None):
parser = argparse.ArgumentParser()
add_arguments(parser)
args, remaining_args = parser.parse_known_args(argv)
for path in args.path:
if path not in sys.path:
sys.path.append(path)
try:
import coverage
from coverage.execfile import run_python_module, run_python_file
except ImportError:
print("Error: coverage is not available.")
sys.exit(1)
cov = coverage.coverage(source=args.source)
cov.erase()
cov.clear_exclude()
if args.no_pragmas:
args.pragma = []
args.pragma = args.pragma or DEFAULT_PRAGMAS
if args.show:
args.show_missing = True
for pragma in args.show:
if pragma in args.pragma:
args.pragma.remove(pragma)
for pragma in args.pragma:
cov.exclude('pragma: %s' % pragma)
ret = 0
cov.start()
try:
if remaining_args[0] == '-m':
run_python_module(remaining_args[1], remaining_args[1:])
else:
run_python_file(remaining_args[0], remaining_args)
except SystemExit as e:
ret = e.code
cov.stop()
cov.save()
cov.report(show_missing=args.show_missing)
return ret
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
ahmedaljazzar/edx-platform | openedx/core/djangoapps/header_control/tests/test_decorators.py | 34 | 1095 | """Tests for remove_headers and force_header decorator. """
from django.http import HttpResponse, HttpRequest
from django.test import TestCase
from openedx.core.djangoapps.header_control.decorators import remove_headers, force_header
def fake_view(_request):
"""Fake view that returns an empty response."""
return HttpResponse()
class TestRemoveHeaders(TestCase):
"""Test the `remove_headers` decorator."""
def test_remove_headers(self):
request = HttpRequest()
wrapper = remove_headers('Vary', 'Accept-Encoding')
wrapped_view = wrapper(fake_view)
response = wrapped_view(request)
self.assertEqual(len(response.remove_headers), 2)
class TestForceHeader(TestCase):
"""Test the `force_header` decorator."""
def test_force_header(self):
request = HttpRequest()
wrapper = force_header('Vary', 'Origin')
wrapped_view = wrapper(fake_view)
response = wrapped_view(request)
self.assertEqual(len(response.force_headers), 1)
self.assertEqual(response.force_headers['Vary'], 'Origin')
| agpl-3.0 |
ilya-epifanov/ansible | lib/ansible/plugins/strategies/linear.py | 1 | 14293 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook.block import Block
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task import Task
from ansible.plugins import action_loader
from ansible.plugins.strategies import StrategyBase
from ansible.template import Templar
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class StrategyModule(StrategyBase):
def _get_next_task_lockstep(self, hosts, iterator):
'''
Returns a list of (host, task) tuples, where the task may
be a noop task to keep the iterator in lock step across
all hosts.
'''
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
host_tasks = {}
display.debug("building list of next tasks for hosts")
for host in hosts:
host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
display.debug("done building task lists")
num_setups = 0
num_tasks = 0
num_rescue = 0
num_always = 0
lowest_cur_block = len(iterator._blocks)
display.debug("counting tasks in each state of execution")
for (k, v) in host_tasks.iteritems():
if v is None:
continue
(s, t) = v
if t is None:
continue
if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
lowest_cur_block = s.cur_block
if s.run_state == PlayIterator.ITERATING_SETUP:
num_setups += 1
elif s.run_state == PlayIterator.ITERATING_TASKS:
num_tasks += 1
elif s.run_state == PlayIterator.ITERATING_RESCUE:
num_rescue += 1
elif s.run_state == PlayIterator.ITERATING_ALWAYS:
num_always += 1
display.debug("done counting tasks in each state of execution")
def _advance_selected_hosts(hosts, cur_block, cur_state):
'''
This helper returns the task for all hosts in the requested
state, otherwise they get a noop dummy task. This also advances
the state of the host, since the given states are determined
while using peek=True.
'''
# we return the values in the order they were originally
# specified in the given hosts array
rvals = []
display.debug("starting to advance hosts")
for host in hosts:
host_state_task = host_tasks[host.name]
if host_state_task is None:
continue
(s, t) = host_state_task
if t is None:
continue
if s.run_state == cur_state and s.cur_block == cur_block:
new_t = iterator.get_next_task_for_host(host)
rvals.append((host, t))
else:
rvals.append((host, noop_task))
display.debug("done advancing hosts to next task")
return rvals
# if any hosts are in ITERATING_SETUP, return the setup task
# while all other hosts get a noop
if num_setups:
display.debug("advancing hosts in ITERATING_SETUP")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)
# if any hosts are in ITERATING_TASKS, return the next normal
# task for these hosts, while all other hosts get a noop
if num_tasks:
display.debug("advancing hosts in ITERATING_TASKS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)
# if any hosts are in ITERATING_RESCUE, return the next rescue
# task for these hosts, while all other hosts get a noop
if num_rescue:
display.debug("advancing hosts in ITERATING_RESCUE")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)
# if any hosts are in ITERATING_ALWAYS, return the next always
# task for these hosts, while all other hosts get a noop
if num_always:
display.debug("advancing hosts in ITERATING_ALWAYS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)
# at this point, everything must be ITERATING_COMPLETE, so we
# return None for all hosts in the list
display.debug("all hosts are done, so returning None's for all hosts")
return [(host, None) for host in hosts]
def run(self, iterator, play_context):
'''
The linear strategy is simple - get the next task and queue
it for all hosts, then wait for the queue to drain before
moving on to the next task
'''
# iteratate over each task, while there is one left to run
result = True
work_to_do = True
while work_to_do and not self._tqm._terminated:
try:
self._display.debug("getting the remaining hosts for this loop")
hosts_left = self._inventory.get_hosts(iterator._play.hosts)
self._display.debug("done getting the remaining hosts for this loop")
# queue up this task for each host in the inventory
callback_sent = False
work_to_do = False
host_results = []
host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
# skip control
skip_rest = False
choose_step = True
for (host, task) in host_tasks:
if not task:
continue
run_once = False
work_to_do = True
# test to see if the task across all hosts points to an action plugin which
# sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
# will only send this task to the first host in the list.
try:
action = action_loader.get(task.action, class_only=True)
if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
# check to see if this task should be skipped, due to it being a member of a
# role which has already run (and whether that role allows duplicate execution)
if task._role and task._role.has_run(host):
# If there is no metadata, the default behavior is to not allow duplicates,
# if there is metadata, check to see if the allow_duplicates flag was set to true
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
self._display.debug("'%s' skipped because role has already run" % task)
continue
if task.action == 'meta':
self._execute_meta(task, play_context, iterator)
else:
# handle step if needed, skip meta actions as they are used internally
if self._step and choose_step:
if self._take_step(task):
choose_step = False
else:
skip_rest = True
break
self._display.debug("getting variables")
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
templar = Templar(loader=self._loader, variables=task_vars)
self._display.debug("done getting variables")
if not callback_sent:
display.debug("sending task start callback, copying the task so we can template it temporarily")
saved_name = task.name
display.debug("done copying, going to template now")
try:
task.name = unicode(templar.template(task.name, fail_on_undefined=False))
display.debug("done templating")
except:
# just ignore any errors during task name templating,
# we don't care if it just shows the raw name
display.debug("templating failed for some reason")
pass
display.debug("here goes the callback...")
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
task.name = saved_name
callback_sent = True
display.debug("sending task start callback")
self._blocked_hosts[host.get_name()] = True
self._queue_task(host, task, task_vars, play_context)
results = self._process_pending_results(iterator)
host_results.extend(results)
# if we're bypassing the host loop, break out now
if run_once:
break
# go to next host/task group
if skip_rest:
continue
self._display.debug("done queuing things up, now waiting for results queue to drain")
results = self._wait_on_pending_results(iterator)
host_results.extend(results)
if not work_to_do and len(iterator.get_failed_hosts()) > 0:
self._display.debug("out of hosts to run on")
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
try:
included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
except AnsibleError, e:
return False
if len(included_files) > 0:
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
all_blocks = dict((host, []) for host in hosts_left)
for included_file in included_files:
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
new_blocks = self._load_included_file(included_file, iterator=iterator)
except AnsibleError, e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._display.warning(str(e))
continue
for new_block in new_blocks:
noop_block = Block(parent_block=task._block)
noop_block.block = [noop_task for t in new_block.block]
noop_block.always = [noop_task for t in new_block.always]
noop_block.rescue = [noop_task for t in new_block.rescue]
for host in hosts_left:
if host in included_file._hosts:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
final_block = new_block.filter_tagged_tasks(play_context, task_vars)
all_blocks[host].append(final_block)
else:
all_blocks[host].append(noop_block)
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
self._display.debug("results queue empty")
except (IOError, EOFError), e:
self._display.debug("got IOError/EOFError in task loop: %s" % e)
# most likely an abort, return failed
return False
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
| gpl-3.0 |
ceejatec/git-repo | subcmds/version.py | 90 | 1376 | #
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from command import Command, MirrorSafeCommand
from git_command import git
from git_refs import HEAD
class Version(Command, MirrorSafeCommand):
wrapper_version = None
wrapper_path = None
common = False
helpSummary = "Display the version of repo"
helpUsage = """
%prog
"""
def Execute(self, opt, args):
rp = self.manifest.repoProject
rem = rp.GetRemote(rp.remote.name)
print('repo version %s' % rp.work_git.describe(HEAD))
print(' (from %s)' % rem.url)
if Version.wrapper_path is not None:
print('repo launcher version %s' % Version.wrapper_version)
print(' (from %s)' % Version.wrapper_path)
print(git.version().strip())
print('Python %s' % sys.version)
| apache-2.0 |
programadorjc/django | tests/user_commands/tests.py | 205 | 7165 | import os
from django.apps import apps
from django.core import management
from django.core.management import BaseCommand, CommandError, find_commands
from django.core.management.utils import find_command, popen_wrapper
from django.db import connection
from django.test import SimpleTestCase, ignore_warnings, override_settings
from django.test.utils import captured_stderr, captured_stdout, extend_sys_path
from django.utils import translation
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.six import StringIO
# A minimal set of apps to avoid system checks running on all apps.
@override_settings(
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'user_commands',
],
)
class CommandTests(SimpleTestCase):
def test_command(self):
out = StringIO()
management.call_command('dance', stdout=out)
self.assertIn("I don't feel like dancing Rock'n'Roll.\n", out.getvalue())
def test_command_style(self):
out = StringIO()
management.call_command('dance', style='Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
# Passing options as arguments also works (thanks argparse)
management.call_command('dance', '--style', 'Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
def test_language_preserved(self):
out = StringIO()
with translation.override('fr'):
management.call_command('dance', stdout=out)
self.assertEqual(translation.get_language(), 'fr')
def test_explode(self):
""" Test that an unknown command raises CommandError """
self.assertRaises(CommandError, management.call_command, ('explode',))
def test_system_exit(self):
""" Exception raised in a command should raise CommandError with
call_command, but SystemExit when run from command line
"""
with self.assertRaises(CommandError):
management.call_command('dance', example="raise")
with captured_stderr() as stderr, self.assertRaises(SystemExit):
management.ManagementUtility(['manage.py', 'dance', '--example=raise']).execute()
self.assertIn("CommandError", stderr.getvalue())
def test_deactivate_locale_set(self):
# Deactivate translation when set to true
out = StringIO()
with translation.override('pl'):
management.call_command('leave_locale_alone_false', stdout=out)
self.assertEqual(out.getvalue(), "")
def test_configured_locale_preserved(self):
# Leaves locale from settings when set to false
out = StringIO()
with translation.override('pl'):
management.call_command('leave_locale_alone_true', stdout=out)
self.assertEqual(out.getvalue(), "pl\n")
def test_find_command_without_PATH(self):
"""
find_command should still work when the PATH environment variable
doesn't exist (#22256).
"""
current_path = os.environ.pop('PATH', None)
try:
self.assertIsNone(find_command('_missing_'))
finally:
if current_path is not None:
os.environ['PATH'] = current_path
def test_discover_commands_in_eggs(self):
"""
Test that management commands can also be loaded from Python eggs.
"""
egg_dir = '%s/eggs' % os.path.dirname(upath(__file__))
egg_name = '%s/basic.egg' % egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['commandegg']):
cmds = find_commands(os.path.join(apps.get_app_config('commandegg').path, 'management'))
self.assertEqual(cmds, ['eggcommand'])
def test_call_command_option_parsing(self):
"""
When passing the long option name to call_command, the available option
key is the option dest name (#22985).
"""
out = StringIO()
management.call_command('dance', stdout=out, opt_3=True)
self.assertIn("option3", out.getvalue())
self.assertNotIn("opt_3", out.getvalue())
self.assertNotIn("opt-3", out.getvalue())
@ignore_warnings(category=RemovedInDjango110Warning)
def test_optparse_compatibility(self):
"""
optparse should be supported during Django 1.8/1.9 releases.
"""
out = StringIO()
management.call_command('optparse_cmd', stdout=out)
self.assertEqual(out.getvalue(), "All right, let's dance Rock'n'Roll.\n")
# Simulate command line execution
with captured_stdout() as stdout, captured_stderr():
management.execute_from_command_line(['django-admin', 'optparse_cmd'])
self.assertEqual(stdout.getvalue(), "All right, let's dance Rock'n'Roll.\n")
def test_calling_a_command_with_only_empty_parameter_should_ends_gracefully(self):
out = StringIO()
management.call_command('hal', "--empty", stdout=out)
self.assertIn("Dave, I can't do that.\n", out.getvalue())
def test_calling_command_with_app_labels_and_parameters_should_be_ok(self):
out = StringIO()
management.call_command('hal', 'myapp', "--verbosity", "3", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_command_with_parameters_and_app_labels_at_the_end_should_be_ok(self):
out = StringIO()
management.call_command('hal', "--verbosity", "3", "myapp", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_a_command_with_no_app_labels_and_parameters_should_raise_a_command_error(self):
out = StringIO()
with self.assertRaises(CommandError):
management.call_command('hal', stdout=out)
def test_output_transaction(self):
out = StringIO()
management.call_command('transaction', stdout=out, no_color=True)
output = out.getvalue().strip()
self.assertTrue(output.startswith(connection.ops.start_transaction_sql()))
self.assertTrue(output.endswith(connection.ops.end_transaction_sql()))
def test_call_command_no_checks(self):
"""
By default, call_command should not trigger the check framework, unless
specifically asked.
"""
self.counter = 0
def patched_check(self_, **kwargs):
self.counter = self.counter + 1
saved_check = BaseCommand.check
BaseCommand.check = patched_check
try:
management.call_command("dance", verbosity=0)
self.assertEqual(self.counter, 0)
management.call_command("dance", verbosity=0, skip_checks=False)
self.assertEqual(self.counter, 1)
finally:
BaseCommand.check = saved_check
class UtilsTests(SimpleTestCase):
def test_no_existent_external_program(self):
self.assertRaises(CommandError, popen_wrapper, ['a_42_command_that_doesnt_exist_42'])
| bsd-3-clause |
befair/gasistafelice | gasistafelice/rest/views/contextmenu.py | 3 | 3240 | from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from django.conf import settings
from django.core.urlresolvers import reverse
import copy, time
from socket import gethostname
from lib.shortcuts import render_to_response
#---------------------------------------------------------------------#
# #
#---------------------------------------------------------------------#
DEFAULT_CONTEXTMENU_ENTRIES = [
# This entry points to the resource page
{
'id' : 'page',
'icon' : 'info.png',
'descr': _('Resource page'),
'type' : 'url',
'data' : '#rest/%(resource_type)s/%(resource_id)s/',
},
]
#---------------------------------------------------------------------#
# #
#---------------------------------------------------------------------#
def timestamp_to_str(t):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
def get_default_menu_entries(resource, user=None):
entries = copy.deepcopy(DEFAULT_CONTEXTMENU_ENTRIES)
for entry in entries:
entry['data'] = entry['data'] % {
'resource_type': resource.resource_type,
'resource_id' : resource.pk
}
#
# Mail TO
#
to = resource.preferred_contact_email
cc = ''
bcc = ''
subj = '[%s]' % (resource.site.name)
body_lines = []
#FUTURE TODO: mail template management
body = 'body=%s' % ('%0A'.join(body_lines))
#
# Return entry
#
mailto_entry = {
'id' : 'mailto',
'icon' : 'info.png',
'descr': 'Mail',
'type' : 'url',
'data' : 'mailto:%s?cc=%s&bcc=%s&subject=%s&%s' % (to, cc, bcc, subj, body) ,
}
entries.append(mailto_entry)
return entries
def __calculate_node_url(node):
try:
vname = "state.views.node_by_path"
kwargs = { 'path': node.name }
url = reverse(vname, args=[], kwargs=kwargs)
return url
except Exception, e:
return ''
#---------------------------------------------------------------------#
# #
#---------------------------------------------------------------------#
def get_external_menu_entries(resource, user=None):
#FUTURE TODO: provide hooks for external scripts
entries = []
return entries
#---------------------------------------------------------------------#
# #
#---------------------------------------------------------------------#
def get_context_menu(request):
resource = request.resource
user = request.user
entries = []
entries += get_default_menu_entries(resource, user)
entries += get_external_menu_entries(resource, user)
context = {
'static_url': settings.STATIC_URL
, 'resource': request.resource
, 'menu_entries': entries
}
pt = ['blocks/menu.xml']
return render_to_response(pt, context)
| agpl-3.0 |
bowang/tensorflow | tensorflow/python/estimator/canned/metric_keys.py | 48 | 1578 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Enum for model prediction keys."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator import model_fn
class MetricKeys(object):
"""Metric key strings."""
LOSS = model_fn.LOSS_METRIC_KEY
LOSS_MEAN = model_fn.AVERAGE_LOSS_METRIC_KEY
ACCURACY = 'accuracy'
# This is the best the model could do by always predicting one class.
# Should be < ACCURACY in a trained model.
ACCURACY_BASELINE = 'accuracy_baseline'
AUC = 'auc'
AUC_PR = 'auc_precision_recall'
LABEL_MEAN = 'label/mean'
PREDICTION_MEAN = 'prediction/mean'
# The following require a threshold applied, should be float in range (0, 1).
ACCURACY_AT_THRESHOLD = 'accuracy/positive_threshold_%g'
PRECISION_AT_THRESHOLD = 'precision/positive_threshold_%g'
RECALL_AT_THRESHOLD = 'recall/positive_threshold_%g'
| apache-2.0 |
jcpyun/Django-facebook | django_facebook/api.py | 14 | 26484 | try:
from django.forms.utils import ValidationError
except ImportError:
from django.forms.util import ValidationError
from django_facebook import settings as facebook_settings, signals
from django_facebook.exceptions import FacebookException
from django_facebook.utils import get_user_model, mass_get_or_create, \
cleanup_oauth_url, get_profile_model, parse_signed_request, hash_key, \
try_get_profile, get_user_attribute
from open_facebook import exceptions as open_facebook_exceptions
from open_facebook.exceptions import OpenFacebookException
from open_facebook.utils import send_warning, validate_is_instance
import datetime
import json
import logging
try:
from dateutil.parser import parse as parse_date
except ImportError:
from django_facebook.utils import parse_like_datetime as parse_date
logger = logging.getLogger(__name__)
def require_persistent_graph(request, *args, **kwargs):
'''
Just like get_persistent graph, but instead of returning None
raise an OpenFacebookException if we can't access facebook
'''
kwargs['raise_'] = True
graph = get_persistent_graph(request, *args, **kwargs)
if not graph:
raise OpenFacebookException('please authenticate')
return graph
def require_facebook_graph(request, *args, **kwargs):
'''
Just like get_facebook graph, but instead of returning None
raise an OpenFacebookException if we can't access facebook
'''
kwargs['raise_'] = True
graph = get_facebook_graph(request, *args, **kwargs)
if not graph:
raise OpenFacebookException('please authenticate')
return graph
def get_persistent_graph(request, *args, **kwargs):
'''
Wraps itself around get facebook graph
But stores the graph in the session, allowing usage across multiple
pageviews.
Note that Facebook session's expire at some point, you can't store this
for permanent usage
Atleast not without asking for the offline_access permission
'''
from open_facebook.api import OpenFacebook
if not request:
raise(ValidationError,
'Request is required if you want to use persistent tokens')
graph = None
# some situations like an expired access token require us to refresh our
# graph
require_refresh = False
code = request.REQUEST.get('code')
if code:
require_refresh = True
local_graph = getattr(request, 'facebook', None)
if local_graph:
# gets the graph from the local memory if available
graph = local_graph
if not graph:
# search for the graph in the session
cached_graph_dict = request.session.get('graph_dict')
if cached_graph_dict:
graph = OpenFacebook()
graph.__setstate__(cached_graph_dict)
graph._me = None
if not graph or require_refresh:
# gets the new graph, note this might do token conversions (slow)
graph = get_facebook_graph(request, *args, **kwargs)
# if it's valid replace the old cache
if graph is not None and graph.access_token:
request.session['graph_dict'] = graph.__getstate__()
# add the current user id and cache the graph at the request level
_add_current_user_id(graph, request.user)
request.facebook = graph
return graph
def get_facebook_graph(request=None, access_token=None, redirect_uri=None, raise_=False):
'''
given a request from one of these
- js authentication flow (signed cookie)
- facebook app authentication flow (signed cookie)
- facebook oauth redirect (code param in url)
- mobile authentication flow (direct access_token)
- offline access token stored in user profile
returns a graph object
redirect path is the path from which you requested the token
for some reason facebook needs exactly this uri when converting the code
to a token
falls back to the current page without code in the request params
specify redirect_uri if you are not posting and recieving the code
on the same page
'''
# this is not a production flow, but very handy for testing
if not access_token and request.REQUEST.get('access_token'):
access_token = request.REQUEST['access_token']
# should drop query params be included in the open facebook api,
# maybe, weird this...
from open_facebook import OpenFacebook, FacebookAuthorization
from django.core.cache import cache
expires = None
if hasattr(request, 'facebook') and request.facebook:
graph = request.facebook
_add_current_user_id(graph, request.user)
return graph
# parse the signed request if we have it
signed_data = None
if request:
signed_request_string = request.REQUEST.get('signed_data')
if signed_request_string:
logger.info('Got signed data from facebook')
signed_data = parse_signed_request(signed_request_string)
if signed_data:
logger.info('We were able to parse the signed data')
# the easy case, we have an access token in the signed data
if signed_data and 'oauth_token' in signed_data:
access_token = signed_data['oauth_token']
if not access_token:
# easy case, code is in the get
code = request.REQUEST.get('code')
if code:
logger.info('Got code from the request data')
if not code:
# signed request or cookie leading, base 64 decoding needed
cookie_name = 'fbsr_%s' % facebook_settings.FACEBOOK_APP_ID
cookie_data = request.COOKIES.get(cookie_name)
if cookie_data:
signed_request_string = cookie_data
if signed_request_string:
logger.info('Got signed data from cookie')
signed_data = parse_signed_request(signed_request_string)
if signed_data:
logger.info('Parsed the cookie data')
# the javascript api assumes a redirect uri of ''
redirect_uri = ''
if signed_data:
# parsed data can fail because of signing issues
if 'oauth_token' in signed_data:
logger.info('Got access_token from parsed data')
# we already have an active access token in the data
access_token = signed_data['oauth_token']
else:
logger.info('Got code from parsed data')
# no access token, need to use this code to get one
code = signed_data.get('code', None)
if not access_token:
if code:
cache_key = hash_key('convert_code_%s' % code)
access_token = cache.get(cache_key)
if not access_token:
# exchange the code for an access token
# based on the php api
# https://github.com/facebook/php-sdk/blob/master/src/base_facebook.php
# create a default for the redirect_uri
# when using the javascript sdk the default
# should be '' an empty string
# for other pages it should be the url
if not redirect_uri:
redirect_uri = ''
# we need to drop signed_data, code and state
redirect_uri = cleanup_oauth_url(redirect_uri)
try:
logger.info(
'trying to convert the code with redirect uri: %s',
redirect_uri)
# This is realy slow, that's why it's cached
token_response = FacebookAuthorization.convert_code(
code, redirect_uri=redirect_uri)
expires = token_response.get('expires')
access_token = token_response['access_token']
# would use cookies instead, but django's cookie setting
# is a bit of a mess
cache.set(cache_key, access_token, 60 * 60 * 2)
except (open_facebook_exceptions.OAuthException, open_facebook_exceptions.ParameterException) as e:
# this sometimes fails, but it shouldnt raise because
# it happens when users remove your
# permissions and then try to reauthenticate
logger.warn('Error when trying to convert code %s',
unicode(e))
if raise_:
raise
else:
return None
elif request.user.is_authenticated():
# support for offline access tokens stored in the users profile
profile = try_get_profile(request.user)
access_token = get_user_attribute(
request.user, profile, 'access_token')
if not access_token:
if raise_:
message = 'Couldnt find an access token in the request or the users profile'
raise open_facebook_exceptions.OAuthException(message)
else:
return None
else:
if raise_:
message = 'Couldnt find an access token in the request or cookies'
raise open_facebook_exceptions.OAuthException(message)
else:
return None
graph = OpenFacebook(access_token, signed_data, expires=expires)
# add user specific identifiers
if request:
_add_current_user_id(graph, request.user)
return graph
def _add_current_user_id(graph, user):
'''
set the current user id, convenient if you want to make sure you
fb session and user belong together
'''
if graph:
graph.current_user_id = None
if user.is_authenticated():
profile = try_get_profile(user)
facebook_id = get_user_attribute(user, profile, 'facebook_id')
if facebook_id:
graph.current_user_id = facebook_id
class FacebookUserConverter(object):
'''
This conversion class helps you to convert Facebook users to Django users
Helps with
- extracting and prepopulating full profile data
- invite flows
- importing and storing likes
'''
def __init__(self, open_facebook):
from open_facebook.api import OpenFacebook
self.open_facebook = open_facebook
validate_is_instance(open_facebook, OpenFacebook)
self._profile = None
def is_authenticated(self):
return self.open_facebook.is_authenticated()
def facebook_registration_data(self, username=True):
'''
Gets all registration data
and ensures its correct input for a django registration
'''
facebook_profile_data = self.facebook_profile_data()
user_data = {}
try:
user_data = self._convert_facebook_data(
facebook_profile_data, username=username)
except OpenFacebookException as e:
self._report_broken_facebook_data(
user_data, facebook_profile_data, e)
raise
return user_data
def facebook_profile_data(self):
'''
Returns the facebook profile data, together with the image locations
'''
if self._profile is None:
profile = self.open_facebook.me()
profile['image'] = self.open_facebook.my_image_url('large')
profile['image_thumb'] = self.open_facebook.my_image_url()
self._profile = profile
return self._profile
@classmethod
def _convert_facebook_data(cls, facebook_profile_data, username=True):
'''
Takes facebook user data and converts it to a format for
usage with Django
'''
user_data = facebook_profile_data.copy()
profile = facebook_profile_data.copy()
website = profile.get('website')
if website:
user_data['website_url'] = cls._extract_url(website)
user_data['facebook_profile_url'] = profile.get('link')
user_data['facebook_name'] = profile.get('name')
if len(user_data.get('email', '')) > 75:
# no more fake email accounts for facebook
del user_data['email']
gender = profile.get('gender', None)
if gender == 'male':
user_data['gender'] = 'm'
elif gender == 'female':
user_data['gender'] = 'f'
user_data['username'] = cls._retrieve_facebook_username(user_data)
user_data['password2'], user_data['password1'] = (
cls._generate_fake_password(),) * 2 # same as double equal
facebook_map = dict(birthday='date_of_birth',
about='about_me', id='facebook_id')
for k, v in facebook_map.items():
user_data[v] = user_data.get(k)
user_data['facebook_id'] = int(user_data['facebook_id'])
if not user_data['about_me'] and user_data.get('quotes'):
user_data['about_me'] = user_data.get('quotes')
user_data['date_of_birth'] = cls._parse_data_of_birth(
user_data['date_of_birth'])
if username:
user_data['username'] = cls._create_unique_username(
user_data['username'])
# make sure the first and last name are not too long
if 'first_name' in user_data:
user_data['first_name'] = user_data['first_name'][:30]
if 'last_name' in user_data:
user_data['last_name'] = user_data['last_name'][:30]
return user_data
@classmethod
def _extract_url(cls, text_url_field):
'''
>>> from django_facebook.api import FacebookApi
>>> url_text = 'http://www.google.com blabla'
>>> FacebookAPI._extract_url(url_text)
u'http://www.google.com/'
>>> url_text = 'http://www.google.com/'
>>> FacebookAPI._extract_url(url_text)
u'http://www.google.com/'
>>> url_text = 'google.com/'
>>> FacebookAPI._extract_url(url_text)
u'http://google.com/'
>>> url_text = 'http://www.fahiolista.com/www.myspace.com/www.google.com'
>>> FacebookAPI._extract_url(url_text)
u'http://www.fahiolista.com/www.myspace.com/www.google.com'
'''
import re
text_url_field = text_url_field.encode('utf8')
seperation = re.compile('[ ,;\n\r]+')
try:
parts = seperation.split(text_url_field)
except TypeError:
parts = seperation.split(text_url_field.decode())
for part in parts:
from django_facebook.utils import get_url_field
url_check = get_url_field()
try:
clean_url = url_check.clean(part)
return clean_url
except ValidationError:
continue
@classmethod
def _generate_fake_password(cls):
'''
Returns a random fake password
'''
import string
from random import choice
size = 9
try:
string.letters
except AttributeError:
string.letters = string.ascii_letters
password = ''.join([choice(string.letters + string.digits)
for i in range(size)])
return password.lower()
@classmethod
def _parse_data_of_birth(cls, data_of_birth_string):
if data_of_birth_string:
format = '%m/%d/%Y'
try:
parsed_date = datetime.datetime.strptime(
data_of_birth_string, format)
return parsed_date
except ValueError:
# Facebook sometimes provides a partial date format
# ie 04/07 (ignore those)
if data_of_birth_string.count('/') != 1:
raise
@classmethod
def _report_broken_facebook_data(cls, facebook_data,
original_facebook_data, e):
'''
Sends a nice error email with the
- facebook data
- exception
- stacktrace
'''
from pprint import pformat
data_dump = json.dumps(original_facebook_data)
data_dump_python = pformat(original_facebook_data)
message_format = 'The following facebook data failed with error %s' \
'\n\n json %s \n\n python %s \n'
data_tuple = (unicode(e), data_dump, data_dump_python)
message = message_format % data_tuple
extra_data = {
'data_dump': data_dump,
'data_dump_python': data_dump_python,
'facebook_data': facebook_data,
}
send_warning(message, **extra_data)
@classmethod
def _create_unique_username(cls, base_username):
'''
Check the database and add numbers to the username to ensure its unique
'''
usernames = list(
get_user_model().objects.filter(
username__istartswith=base_username
).values_list('username', flat=True))
usernames_lower = [str(u).lower() for u in usernames]
username = str(base_username)
i = 1
while base_username.lower() in usernames_lower:
base_username = username + str(i)
i += 1
return base_username
@classmethod
def _retrieve_facebook_username(cls, facebook_data):
'''
Search for the username in 3 places
- public profile
- email
- name
'''
username = None
# start by checking the public profile link (your facebook username)
link = facebook_data.get('link')
if link:
username = link.split('/')[-1]
username = cls._make_username(username)
if username and 'profilephp' in username:
username = None
# try the email adress next
if not username and 'email' in facebook_data:
username = cls._make_username(facebook_data.get(
'email').split('@')[0])
# last try the name of the user
if not username or len(username) < 4:
username = cls._make_username(facebook_data.get('name'))
if not username:
raise FacebookException('couldnt figure out a username')
return username
@classmethod
def _make_username(cls, username):
'''
Slugify the username and replace - with _ to meet username requirements
'''
from django.template.defaultfilters import slugify
from unidecode import unidecode
slugified_name = slugify(unidecode(username)).replace('-', '_')
# consider the username min and max constraints
slugified_name = slugified_name[:30]
if len(username) < 4:
slugified_name = None
return slugified_name
def get_and_store_likes(self, user):
'''
Gets and stores your facebook likes to DB
Both the get and the store run in a async task when
FACEBOOK_CELERY_STORE = True
'''
if facebook_settings.FACEBOOK_CELERY_STORE:
from django_facebook.tasks import get_and_store_likes
get_and_store_likes.delay(user, self)
else:
self._get_and_store_likes(user)
def _get_and_store_likes(self, user):
likes = self.get_likes()
stored_likes = self._store_likes(user, likes)
return stored_likes
def get_likes(self, limit=5000):
'''
Parses the facebook response and returns the likes
'''
likes_response = self.open_facebook.get('me/likes', limit=limit)
likes = likes_response and likes_response.get('data')
logger.info('found %s likes', len(likes))
return likes
def store_likes(self, user, likes):
'''
Given a user and likes store these in the db
Note this can be a heavy operation, best to do it
in the background using celery
'''
if facebook_settings.FACEBOOK_CELERY_STORE:
from django_facebook.tasks import store_likes
store_likes.delay(user, likes)
else:
self._store_likes(user, likes)
@classmethod
def _store_likes(self, user, likes):
current_likes = inserted_likes = None
if likes:
from django_facebook.models import FacebookLike
base_queryset = FacebookLike.objects.filter(user_id=user.id)
global_defaults = dict(user_id=user.id)
id_field = 'facebook_id'
default_dict = {}
for like in likes:
name = like.get('name')
created_time_string = like.get('created_time')
created_time = None
if created_time_string:
created_time = parse_date(like['created_time'])
default_dict[like['id']] = dict(
created_time=created_time,
category=like.get('category'),
name=name
)
current_likes, inserted_likes = mass_get_or_create(
FacebookLike, base_queryset, id_field, default_dict,
global_defaults)
logger.debug('found %s likes and inserted %s new likes',
len(current_likes), len(inserted_likes))
# fire an event, so u can do things like personalizing the users' account
# based on the likes
signals.facebook_post_store_likes.send(sender=get_profile_model(),
user=user, likes=likes, current_likes=current_likes,
inserted_likes=inserted_likes,
)
return likes
def get_and_store_friends(self, user):
'''
Gets and stores your facebook friends to DB
Both the get and the store run in a async task when
FACEBOOK_CELERY_STORE = True
'''
if facebook_settings.FACEBOOK_CELERY_STORE:
from django_facebook.tasks import get_and_store_friends
get_and_store_friends.delay(user, self)
else:
self._get_and_store_friends(user)
def _get_and_store_friends(self, user):
'''
Getting the friends via fb and storing them
'''
friends = self.get_friends()
stored_friends = self._store_friends(user, friends)
return stored_friends
def get_friends(self, limit=5000):
'''
Connects to the facebook api and gets the users friends
'''
friends = getattr(self, '_friends', None)
if friends is None:
friends_response = self.open_facebook.get('me/friends', limit=limit, fields='gender,name')
friends = []
for response_dict in friends_response.get('data'):
response_dict['id'] = response_dict['id']
friends.append(response_dict)
logger.info('found %s friends', len(friends))
return friends
def store_friends(self, user, friends):
'''
Stores the given friends locally for this user
Quite slow, better do this using celery on a secondary db
'''
if facebook_settings.FACEBOOK_CELERY_STORE:
from django_facebook.tasks import store_friends
store_friends.delay(user, friends)
else:
self._store_friends(user, friends)
@classmethod
def _store_friends(self, user, friends):
from django_facebook.models import FacebookUser
current_friends = inserted_friends = None
# store the users for later retrieval
if friends:
# see which ids this user already stored
base_queryset = FacebookUser.objects.filter(user_id=user.id)
# if none if your friend have a gender clean the old data
genders = FacebookUser.objects.filter(
user_id=user.id, gender__in=('M', 'F')).count()
if not genders:
FacebookUser.objects.filter(user_id=user.id).delete()
global_defaults = dict(user_id=user.id)
default_dict = {}
gender_map = dict(female='F', male='M')
gender_map['male (hidden)'] = 'M'
gender_map['female (hidden)'] = 'F'
for f in friends:
name = f.get('name')
gender = None
if f.get('sex'):
gender = gender_map[f.get('sex')]
default_dict[str(f['id'])] = dict(name=name, gender=gender)
id_field = 'facebook_id'
current_friends, inserted_friends = mass_get_or_create(
FacebookUser, base_queryset, id_field, default_dict,
global_defaults)
logger.debug('found %s friends and inserted %s new ones',
len(current_friends), len(inserted_friends))
# fire an event, so u can do things like personalizing suggested users
# to follow
signals.facebook_post_store_friends.send(sender=get_profile_model(),
user=user, friends=friends, current_friends=current_friends,
inserted_friends=inserted_friends,
)
return friends
def registered_friends(self, user):
'''
Returns all profile models which are already registered on your site
and a list of friends which are not on your site
'''
profile_class = get_profile_model()
friends = self.get_friends(limit=1000)
if friends:
friend_ids = [f['id'] for f in friends]
friend_objects = profile_class.objects.filter(
facebook_id__in=friend_ids).select_related('user')
registered_ids = [f.facebook_id for f in friend_objects]
new_friends = [f for f in friends if f['id'] not in registered_ids]
else:
new_friends = []
friend_objects = profile_class.objects.none()
return friend_objects, new_friends
| bsd-3-clause |
mramanathan/pydiary_notes | decorators/starter.py | 1 | 1078 | # _*_ coding: utf-8 _*_
#!/usr/bin/env/python
def jewel(crown):
''' Novice decorator function '''
def necklace(*args, **kwargs):
print("necklace is always below the crown {}".format(jewel.__name__))
return crown(*args, **kwargs)
return necklace
def funcLog(crowns):
''' Real world application '''
import logging
logging.basicConfig(filename='{}.log'.format(crowns.__name__), level=logging.INFO)
def necklace(*args, **kwargs):
logging.info("Ran with args: {}, and kwargs: {}".format(args, kwargs))
print("necklace is always below the crown {}".format(jewel.__name__))
return crowns(*args, **kwargs)
return necklace
@jewel
def king():
print("King was crowned without necklace")
@jewel
def queen(orn1, orn2):
print("Queen was crowned with ({}, {})".format(orn1, orn2))
@funcLog
def king():
print("King was crowned without necklace")
@funcLog
def queen(orn1, orn2):
print("Queen was crowned with ({}, {})".format(orn1, orn2))
king()
queen('pearl-chain', 'gold-chain')
| gpl-3.0 |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/PyQt4/QtGui/__init__/QWorkspace.py | 2 | 4443 | # encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib/python2.7/dist-packages/PyQt4/QtGui.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
from QWidget import QWidget
class QWorkspace(QWidget):
""" QWorkspace(QWidget parent=None) """
def activateNextWindow(self): # real signature unknown; restored from __doc__
""" QWorkspace.activateNextWindow() """
pass
def activatePreviousWindow(self): # real signature unknown; restored from __doc__
""" QWorkspace.activatePreviousWindow() """
pass
def activeWindow(self): # real signature unknown; restored from __doc__
""" QWorkspace.activeWindow() -> QWidget """
return QWidget
def addWindow(self, QWidget, Qt_WindowFlags_flags=0): # real signature unknown; restored from __doc__
""" QWorkspace.addWindow(QWidget, Qt.WindowFlags flags=0) -> QWidget """
return QWidget
def arrangeIcons(self): # real signature unknown; restored from __doc__
""" QWorkspace.arrangeIcons() """
pass
def background(self): # real signature unknown; restored from __doc__
""" QWorkspace.background() -> QBrush """
return QBrush
def cascade(self): # real signature unknown; restored from __doc__
""" QWorkspace.cascade() """
pass
def changeEvent(self, QEvent): # real signature unknown; restored from __doc__
""" QWorkspace.changeEvent(QEvent) """
pass
def childEvent(self, QChildEvent): # real signature unknown; restored from __doc__
""" QWorkspace.childEvent(QChildEvent) """
pass
def closeActiveWindow(self): # real signature unknown; restored from __doc__
""" QWorkspace.closeActiveWindow() """
pass
def closeAllWindows(self): # real signature unknown; restored from __doc__
""" QWorkspace.closeAllWindows() """
pass
def event(self, QEvent): # real signature unknown; restored from __doc__
""" QWorkspace.event(QEvent) -> bool """
return False
def eventFilter(self, QObject, QEvent): # real signature unknown; restored from __doc__
""" QWorkspace.eventFilter(QObject, QEvent) -> bool """
return False
def hideEvent(self, QHideEvent): # real signature unknown; restored from __doc__
""" QWorkspace.hideEvent(QHideEvent) """
pass
def paintEvent(self, QPaintEvent): # real signature unknown; restored from __doc__
""" QWorkspace.paintEvent(QPaintEvent) """
pass
def resizeEvent(self, QResizeEvent): # real signature unknown; restored from __doc__
""" QWorkspace.resizeEvent(QResizeEvent) """
pass
def scrollBarsEnabled(self): # real signature unknown; restored from __doc__
""" QWorkspace.scrollBarsEnabled() -> bool """
return False
def setActiveWindow(self, QWidget): # real signature unknown; restored from __doc__
""" QWorkspace.setActiveWindow(QWidget) """
pass
def setBackground(self, QBrush): # real signature unknown; restored from __doc__
""" QWorkspace.setBackground(QBrush) """
pass
def setScrollBarsEnabled(self, bool): # real signature unknown; restored from __doc__
""" QWorkspace.setScrollBarsEnabled(bool) """
pass
def showEvent(self, QShowEvent): # real signature unknown; restored from __doc__
""" QWorkspace.showEvent(QShowEvent) """
pass
def sizeHint(self): # real signature unknown; restored from __doc__
""" QWorkspace.sizeHint() -> QSize """
pass
def tile(self): # real signature unknown; restored from __doc__
""" QWorkspace.tile() """
pass
def wheelEvent(self, QWheelEvent): # real signature unknown; restored from __doc__
""" QWorkspace.wheelEvent(QWheelEvent) """
pass
def windowActivated(self, *args, **kwargs): # real signature unknown
""" QWorkspace.windowActivated[QWidget] [signal] """
pass
def windowList(self, QWorkspace_WindowOrder_order=None): # real signature unknown; restored from __doc__
""" QWorkspace.windowList(QWorkspace.WindowOrder order=QWorkspace.CreationOrder) -> list-of-QWidget """
pass
def __init__(self, QWidget_parent=None): # real signature unknown; restored from __doc__
pass
CreationOrder = 0
StackingOrder = 1
WindowOrder = None # (!) real value is ''
| gpl-2.0 |
ryangallen/django | tests/bash_completion/tests.py | 327 | 3888 | """
A series of tests to establish that the command-line bash completion works.
"""
import os
import sys
import unittest
from django.apps import apps
from django.core.management import ManagementUtility
from django.test.utils import captured_stdout
class BashCompletionTests(unittest.TestCase):
"""
Testing the Python level bash completion code.
This requires setting up the environment as if we got passed data
from bash.
"""
def setUp(self):
self.old_DJANGO_AUTO_COMPLETE = os.environ.get('DJANGO_AUTO_COMPLETE')
os.environ['DJANGO_AUTO_COMPLETE'] = '1'
def tearDown(self):
if self.old_DJANGO_AUTO_COMPLETE:
os.environ['DJANGO_AUTO_COMPLETE'] = self.old_DJANGO_AUTO_COMPLETE
else:
del os.environ['DJANGO_AUTO_COMPLETE']
def _user_input(self, input_str):
"""
Set the environment and the list of command line arguments.
This sets the bash variables $COMP_WORDS and $COMP_CWORD. The former is
an array consisting of the individual words in the current command
line, the latter is the index of the current cursor position, so in
case a word is completed and the cursor is placed after a whitespace,
$COMP_CWORD must be incremented by 1:
* 'django-admin start' -> COMP_CWORD=1
* 'django-admin startproject' -> COMP_CWORD=1
* 'django-admin startproject ' -> COMP_CWORD=2
"""
os.environ['COMP_WORDS'] = input_str
idx = len(input_str.split(' ')) - 1 # Index of the last word
comp_cword = idx + 1 if input_str.endswith(' ') else idx
os.environ['COMP_CWORD'] = str(comp_cword)
sys.argv = input_str.split()
def _run_autocomplete(self):
util = ManagementUtility(argv=sys.argv)
with captured_stdout() as stdout:
try:
util.autocomplete()
except SystemExit:
pass
return stdout.getvalue().strip().split('\n')
def test_django_admin_py(self):
"django_admin.py will autocomplete option flags"
self._user_input('django-admin sqlmigrate --verb')
output = self._run_autocomplete()
self.assertEqual(output, ['--verbosity='])
def test_manage_py(self):
"manage.py will autocomplete option flags"
self._user_input('manage.py sqlmigrate --verb')
output = self._run_autocomplete()
self.assertEqual(output, ['--verbosity='])
def test_custom_command(self):
"A custom command can autocomplete option flags"
self._user_input('django-admin test_command --l')
output = self._run_autocomplete()
self.assertEqual(output, ['--list'])
def test_subcommands(self):
"Subcommands can be autocompleted"
self._user_input('django-admin sql')
output = self._run_autocomplete()
self.assertEqual(output, ['sqlflush sqlmigrate sqlsequencereset'])
def test_completed_subcommand(self):
"Show option flags in case a subcommand is completed"
self._user_input('django-admin startproject ') # Trailing whitespace
output = self._run_autocomplete()
for item in output:
self.assertTrue(item.startswith('--'))
def test_help(self):
"No errors, just an empty list if there are no autocomplete options"
self._user_input('django-admin help --')
output = self._run_autocomplete()
self.assertEqual(output, [''])
def test_app_completion(self):
"Application names will be autocompleted for an AppCommand"
self._user_input('django-admin sqlmigrate a')
output = self._run_autocomplete()
a_labels = sorted(app_config.label
for app_config in apps.get_app_configs()
if app_config.label.startswith('a'))
self.assertEqual(output, a_labels)
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/django/core/files/move.py | 554 | 2794 | """
Move a file in the safest way possible::
>>> from django.core.files.move import file_move_safe
>>> file_move_safe("/tmp/old_file", "/tmp/new_file")
"""
import os
from shutil import copystat
from django.core.files import locks
__all__ = ['file_move_safe']
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def file_move_safe(old_file_name, new_file_name, chunk_size=1024 * 64, allow_overwrite=False):
"""
Moves a file from one location to another in the safest way possible.
First, tries ``os.rename``, which is simple but will break across filesystems.
If that fails, streams manually from one file to another in pure Python.
If the destination file exists and ``allow_overwrite`` is ``False``, this
function will throw an ``IOError``.
"""
# There's no reason to move if we don't have to.
if _samefile(old_file_name, new_file_name):
return
try:
# If the destination file exists and allow_overwrite is False then raise an IOError
if not allow_overwrite and os.access(new_file_name, os.F_OK):
raise IOError("Destination file %s exists and allow_overwrite is False" % new_file_name)
os.rename(old_file_name, new_file_name)
return
except OSError:
# This will happen with os.rename if moving to another filesystem
# or when moving opened files on certain operating systems
pass
# first open the old file, so that it won't go away
with open(old_file_name, 'rb') as old_file:
# now open the new file, not forgetting allow_overwrite
fd = os.open(new_file_name, (os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0) |
(os.O_EXCL if not allow_overwrite else 0)))
try:
locks.lock(fd, locks.LOCK_EX)
current_chunk = None
while current_chunk != b'':
current_chunk = old_file.read(chunk_size)
os.write(fd, current_chunk)
finally:
locks.unlock(fd)
os.close(fd)
copystat(old_file_name, new_file_name)
try:
os.remove(old_file_name)
except OSError as e:
# Certain operating systems (Cygwin and Windows)
# fail when deleting opened files, ignore it. (For the
# systems where this happens, temporary files will be auto-deleted
# on close anyway.)
if getattr(e, 'winerror', 0) != 32 and getattr(e, 'errno', 0) != 13:
raise
| apache-2.0 |
sindrig/django-guardian | guardian/management/__init__.py | 13 | 1719 | from __future__ import unicode_literals
import django
from django.db.models import signals
from guardian import models as guardian_app
from guardian.conf import settings as guardian_settings
from guardian.compat import get_user_model
from guardian.compat import import_string
def get_init_anonymous_user(User):
"""
Returns User model instance that would be referenced by guardian when
permissions are checked against users that haven't signed into the system.
:param User: User model - result of ``django.contrib.auth.get_user_model``.
"""
kwargs = {
User.USERNAME_FIELD: guardian_settings.ANONYMOUS_DEFAULT_USERNAME_VALUE
}
return User(**kwargs)
def create_anonymous_user(sender, **kwargs):
"""
Creates anonymous User instance with id and username from settings.
"""
User = get_user_model()
try:
User.objects.get(pk=guardian_settings.ANONYMOUS_USER_ID)
except User.DoesNotExist:
if django.VERSION >= (1, 5):
retrieve_anonymous_function = import_string(
guardian_settings.GET_INIT_ANONYMOUS_USER)
user = retrieve_anonymous_function(User)
# Always set pk to the one pointed at settings
user.pk = guardian_settings.ANONYMOUS_USER_ID
user.save()
else:
User.objects.create(pk=guardian_settings.ANONYMOUS_USER_ID,
username=guardian_settings.ANONYMOUS_DEFAULT_USERNAME_VALUE)
# Only create an anonymous user if support is enabled.
if guardian_settings.ANONYMOUS_USER_ID is not None:
signals.post_syncdb.connect(create_anonymous_user, sender=guardian_app,
dispatch_uid="guardian.management.create_anonymous_user")
| bsd-2-clause |
dhananjay92/servo | tests/wpt/css-tests/tools/webdriver/webdriver/keys.py | 263 | 1481 | """Constants for special keys."""
class Keys:
"""Constants for special keys."""
NULL = '\uE000'
CANCEL = '\uE001'
HELP = '\uE002'
BACK_SPACE = '\uE003'
TAB = '\uE004'
CLEAR = '\uE005'
RETURN = '\uE006'
ENTER = '\uE007'
SHIFT = '\uE008'
LEFT_SHIFT = '\uE008'
CONTROL = '\uE009'
LEFT_CONTROL = '\uE009'
ALT = '\uE00A'
LEFT_ALT = '\uE00A'
PAUSE = '\uE00B'
ESCAPE = '\uE00C'
SPACE = '\uE00D'
PAGE_UP = '\uE00E'
PAGE_DOWN = '\uE00F'
END = '\uE010'
HOME = '\uE011'
LEFT = '\uE012'
ARROW_LEFT = '\uE012'
UP = '\uE013'
ARROW_UP = '\uE013'
RIGHT = '\uE014'
ARROW_RIGHT = '\uE014'
DOWN = '\uE015'
ARROW_DOWN = '\uE015'
INSERT = '\uE016'
DELETE = '\uE017'
SEMICOLON = '\uE018'
EQUALS = '\uE019'
NUMPAD0 = '\uE01A'
NUMPAD1 = '\uE01B'
NUMPAD2 = '\uE01C'
NUMPAD3 = '\uE01D'
NUMPAD4 = '\uE01E'
NUMPAD5 = '\uE01F'
NUMPAD6 = '\uE020'
NUMPAD7 = '\uE021'
NUMPAD8 = '\uE022'
NUMPAD9 = '\uE023'
MULTIPLY = '\uE024'
ADD = '\uE025'
SEPARATOR = '\uE026'
SUBTRACT = '\uE027'
DECIMAL = '\uE028'
DIVIDE = '\uE029'
F1 = '\uE031'
F2 = '\uE032'
F3 = '\uE033'
F4 = '\uE034'
F5 = '\uE035'
F6 = '\uE036'
F7 = '\uE037'
F8 = '\uE038'
F9 = '\uE039'
F10 = '\uE03A'
F11 = '\uE03B'
F12 = '\uE03C'
META = '\uE03D'
COMMAND = '\uE03D'
ZENKAKU_HANKAKU = '\uE040'
| mpl-2.0 |
mammique/django | tests/regressiontests/extra_regress/tests.py | 2 | 14184 | from __future__ import absolute_import, unicode_literals
import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils.datastructures import SortedDict
from .models import TestObject, Order, RevisionableModel
class ExtraRegressTests(TestCase):
def setUp(self):
self.u = User.objects.create_user(
username="fred",
password="secret",
email="fred@example.com"
)
def test_regression_7314_7372(self):
"""
Regression tests for #7314 and #7372
"""
rm = RevisionableModel.objects.create(
title='First Revision',
when=datetime.datetime(2008, 9, 28, 10, 30, 0)
)
self.assertEqual(rm.pk, rm.base.pk)
rm2 = rm.new_revision()
rm2.title = "Second Revision"
rm.when = datetime.datetime(2008, 9, 28, 14, 25, 0)
rm2.save()
self.assertEqual(rm2.title, 'Second Revision')
self.assertEqual(rm2.base.title, 'First Revision')
self.assertNotEqual(rm2.pk, rm.pk)
self.assertEqual(rm2.base.pk, rm.pk)
# Queryset to match most recent revision:
qs = RevisionableModel.objects.extra(
where=["%(table)s.id IN (SELECT MAX(rev.id) FROM %(table)s rev GROUP BY rev.base_id)" % {
'table': RevisionableModel._meta.db_table,
}]
)
self.assertQuerysetEqual(qs,
[('Second Revision', 'First Revision')],
transform=lambda r: (r.title, r.base.title)
)
# Queryset to search for string in title:
qs2 = RevisionableModel.objects.filter(title__contains="Revision")
self.assertQuerysetEqual(qs2,
[
('First Revision', 'First Revision'),
('Second Revision', 'First Revision'),
],
transform=lambda r: (r.title, r.base.title),
ordered=False
)
# Following queryset should return the most recent revision:
self.assertQuerysetEqual(qs & qs2,
[('Second Revision', 'First Revision')],
transform=lambda r: (r.title, r.base.title),
ordered=False
)
def test_extra_stay_tied(self):
# Extra select parameters should stay tied to their corresponding
# select portions. Applies when portions are updated or otherwise
# moved around.
qs = User.objects.extra(
select=SortedDict((("alpha", "%s"), ("beta", "2"), ("gamma", "%s"))),
select_params=(1, 3)
)
qs = qs.extra(select={"beta": 4})
qs = qs.extra(select={"alpha": "%s"}, select_params=[5])
self.assertEqual(
list(qs.filter(id=self.u.id).values('alpha', 'beta', 'gamma')),
[{'alpha': 5, 'beta': 4, 'gamma': 3}]
)
def test_regression_7957(self):
"""
Regression test for #7957: Combining extra() calls should leave the
corresponding parameters associated with the right extra() bit. I.e.
internal dictionary must remain sorted.
"""
self.assertEqual(
User.objects.extra(select={"alpha": "%s"}, select_params=(1,)
).extra(select={"beta": "%s"}, select_params=(2,))[0].alpha,
1)
self.assertEqual(
User.objects.extra(select={"beta": "%s"}, select_params=(1,)
).extra(select={"alpha": "%s"}, select_params=(2,))[0].alpha,
2)
def test_regression_7961(self):
"""
Regression test for #7961: When not using a portion of an
extra(...) in a query, remove any corresponding parameters from the
query as well.
"""
self.assertEqual(
list(User.objects.extra(select={"alpha": "%s"}, select_params=(-6,)
).filter(id=self.u.id).values_list('id', flat=True)),
[self.u.id]
)
def test_regression_8063(self):
"""
Regression test for #8063: limiting a query shouldn't discard any
extra() bits.
"""
qs = User.objects.all().extra(where=['id=%s'], params=[self.u.id])
self.assertQuerysetEqual(qs, ['<User: fred>'])
self.assertQuerysetEqual(qs[:1], ['<User: fred>'])
def test_regression_8039(self):
"""
Regression test for #8039: Ordering sometimes removed relevant tables
from extra(). This test is the critical case: ordering uses a table,
but then removes the reference because of an optimisation. The table
should still be present because of the extra() call.
"""
self.assertQuerysetEqual(
Order.objects.extra(where=["username=%s"],
params=["fred"],
tables=["auth_user"]
).order_by('created_by'),
[]
)
def test_regression_8819(self):
"""
Regression test for #8819: Fields in the extra(select=...) list
should be available to extra(order_by=...).
"""
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}).distinct(),
['<User: fred>']
)
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']),
['<User: fred>']
)
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']).distinct(),
['<User: fred>']
)
def test_dates_query(self):
"""
When calling the dates() method on a queryset with extra selection
columns, we can (and should) ignore those columns. They don't change
the result and cause incorrect SQL to be produced otherwise.
"""
rm = RevisionableModel.objects.create(
title='First Revision',
when=datetime.datetime(2008, 9, 28, 10, 30, 0)
)
self.assertQuerysetEqual(
RevisionableModel.objects.extra(select={"the_answer": 'id'}).dates('when', 'month'),
['datetime.datetime(2008, 9, 1, 0, 0)']
)
def test_values_with_extra(self):
"""
Regression test for #10256... If there is a values() clause, Extra
columns are only returned if they are explicitly mentioned.
"""
obj = TestObject(first='first', second='second', third='third')
obj.save()
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values()),
[{'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first', 'id': obj.pk, 'first': 'first'}]
)
# Extra clauses after an empty values clause are still included
self.assertEqual(
list(TestObject.objects.values().extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third'))))),
[{'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first', 'id': obj.pk, 'first': 'first'}]
)
# Extra columns are ignored if not mentioned in the values() clause
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values('first', 'second')),
[{'second': 'second', 'first': 'first'}]
)
# Extra columns after a non-empty values() clause are ignored
self.assertEqual(
list(TestObject.objects.values('first', 'second').extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third'))))),
[{'second': 'second', 'first': 'first'}]
)
# Extra columns can be partially returned
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values('first', 'second', 'foo')),
[{'second': 'second', 'foo': 'first', 'first': 'first'}]
)
# Also works if only extra columns are included
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values('foo', 'whiz')),
[{'foo': 'first', 'whiz': 'third'}]
)
# Values list works the same way
# All columns are returned for an empty values_list()
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list()),
[('first', 'second', 'third', obj.pk, 'first', 'second', 'third')]
)
# Extra columns after an empty values_list() are still included
self.assertEqual(
list(TestObject.objects.values_list().extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third'))))),
[('first', 'second', 'third', obj.pk, 'first', 'second', 'third')]
)
# Extra columns ignored completely if not mentioned in values_list()
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('first', 'second')),
[('first', 'second')]
)
# Extra columns after a non-empty values_list() clause are ignored completely
self.assertEqual(
list(TestObject.objects.values_list('first', 'second').extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third'))))),
[('first', 'second')]
)
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('second', flat=True)),
['second']
)
# Only the extra columns specified in the values_list() are returned
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('first', 'second', 'whiz')),
[('first', 'second', 'third')]
)
# ...also works if only extra columns are included
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('foo','whiz')),
[('first', 'third')]
)
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('whiz', flat=True)),
['third']
)
# ... and values are returned in the order they are specified
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('whiz','foo')),
[('third', 'first')]
)
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('first','id')),
[('first', obj.pk)]
)
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('whiz', 'first', 'bar', 'id')),
[('third', 'first', 'second', obj.pk)]
)
def test_regression_10847(self):
"""
Regression for #10847: the list of extra columns can always be
accurately evaluated. Using an inner query ensures that as_sql() is
producing correct output without requiring full evaluation and
execution of the inner query.
"""
obj = TestObject(first='first', second='second', third='third')
obj.save()
self.assertEqual(
list(TestObject.objects.extra(select={'extra': 1}).values('pk')),
[{'pk': obj.pk}]
)
self.assertQuerysetEqual(
TestObject.objects.filter(
pk__in=TestObject.objects.extra(select={'extra': 1}).values('pk')
),
['<TestObject: TestObject: first,second,third>']
)
self.assertEqual(
list(TestObject.objects.values('pk').extra(select={'extra': 1})),
[{'pk': obj.pk}]
)
self.assertQuerysetEqual(
TestObject.objects.filter(
pk__in=TestObject.objects.values('pk').extra(select={'extra': 1})
),
['<TestObject: TestObject: first,second,third>']
)
self.assertQuerysetEqual(
TestObject.objects.filter(pk=obj.pk) |
TestObject.objects.extra(where=["id > %s"], params=[obj.pk]),
['<TestObject: TestObject: first,second,third>']
)
def test_regression_17877(self):
"""
Ensure that extra WHERE clauses get correctly ANDed, even when they
contain OR operations.
"""
# Test Case 1: should appear in queryset.
t = TestObject(first='a', second='a', third='a')
t.save()
# Test Case 2: should appear in queryset.
t = TestObject(first='b', second='a', third='a')
t.save()
# Test Case 3: should not appear in queryset, bug case.
t = TestObject(first='a', second='a', third='b')
t.save()
# Test Case 4: should not appear in queryset.
t = TestObject(first='b', second='a', third='b')
t.save()
# Test Case 5: should not appear in queryset.
t = TestObject(first='b', second='b', third='a')
t.save()
# Test Case 6: should not appear in queryset, bug case.
t = TestObject(first='a', second='b', third='b')
t.save()
self.assertQuerysetEqual(
TestObject.objects.extra(
where=["first = 'a' OR second = 'a'", "third = 'a'"],
),
['<TestObject: TestObject: a,a,a>', '<TestObject: TestObject: b,a,a>'],
ordered=False
)
| bsd-3-clause |
Huyuwei/tvm | topi/python/topi/image/resize.py | 1 | 7184 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""TVM operator input resize compute."""
from __future__ import absolute_import
import tvm
from .. import tag
def resize(data, size, layout="NCHW", method="bilinear", align_corners=True, out_dtype=None):
"""Perform resize operation on the data.
Parameters
----------
inputs : tvm.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
size: Tuple
Output resolution scale to
layout: string, optional
"NCHW", "NHWC", or "NCHWc".
align_corners: Boolean, optional
To preserve the values at the corner pixels.
method: {"bilinear", "nearest_neighbor", "bicubic"}
Method to be used for resizing.
out_dtype: string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : tvm.Tensor
4-D with shape [batch, channel, in_height*scale, in_width*scale]
or [batch, in_height*scale, in_width*scale, channel]
or 5-D with shape [batch, channel-major, in_height*scale, in_width*scale, channel-minor]
"""
method = method.lower()
if layout == 'NHWC':
in_n, in_h, in_w, in_c = data.shape
output_shape = [in_n, size[0], size[1], in_c]
elif layout == 'NCHW':
in_n, in_c, in_h, in_w = data.shape
output_shape = [in_n, in_c, size[0], size[1]]
# Otherwise layout must be NCHWxc
else:
in_n, in_c, in_h, in_w, in_cc = data.shape
output_shape = [in_n, in_c, size[0], size[1], in_cc]
if align_corners:
y_ratio = (in_h - 1).astype('float') / (size[0] - 1)
x_ratio = (in_w - 1).astype('float') / (size[1] - 1)
else:
y_ratio = (in_h).astype('float') / (size[0])
x_ratio = (in_w).astype('float') / (size[1])
def _get_pixel(n, c, y, x, cc):
y = tvm.max(tvm.min(y, in_h - 1), 0)
x = tvm.max(tvm.min(x, in_w - 1), 0)
if layout == 'NHWC':
return data(n, y, x, c).astype('float')
if layout == 'NCHW':
return data(n, c, y, x).astype('float')
# else must be NCHWxc
return data(n, c, y, x, cc).astype('float')
def _get_indices(*indices):
if layout == 'NHWC':
n, y, x, c = indices
cc = None
elif layout == 'NCHW':
n, c, y, x = indices
cc = None
else:
n, c, y, x, cc = indices
return n, c, y, x, cc
def _cast_output(value):
if out_dtype:
dtype = out_dtype
else:
dtype = data.dtype
return value.astype(dtype)
# Nearest neighbor computation
def _nearest_neighbor(*indices):
n, c, y, x, cc = _get_indices(*indices)
in_y = y_ratio * y
in_x = x_ratio * x
if align_corners:
yint = tvm.round(in_y).astype('int32')
xint = tvm.round(in_x).astype('int32')
else:
# Add epsilon to floor to prevent gpu rounding errors.
epsilon = 1e-5
yint = tvm.floor(in_y + epsilon).astype('int32')
xint = tvm.floor(in_x + epsilon).astype('int32')
return _cast_output(_get_pixel(n, c, yint, xint, cc))
# Bilinear helper functions and computation.
def _lerp(A, B, t):
return A * (1.0 - t) + B * t
def _bilinear(*indices):
n, c, y, x, cc = _get_indices(*indices)
in_y = y_ratio * y
in_x = x_ratio * x
xint = tvm.floor(in_x).astype('int32')
xfract = in_x - tvm.floor(in_x)
yint = tvm.floor(in_y).astype('int32')
yfract = in_y - tvm.floor(in_y)
p00 = _get_pixel(n, c, yint, xint, cc)
p10 = _get_pixel(n, c, yint, xint + 1, cc)
p01 = _get_pixel(n, c, yint + 1, xint, cc)
p11 = _get_pixel(n, c, yint + 1, xint + 1, cc)
col0 = _lerp(p00, p10, xfract)
col1 = _lerp(p01, p11, xfract)
value = _lerp(col0, col1, yfract)
return _cast_output(value)
# Bicubic helper function and computation.
def _cubic_kernel(A, B, C, D, t):
a = -A / 2.0 + (3.0*B) / 2.0 - (3.0*C) / 2.0 + D / 2.0
b = A - (5.0*B) / 2.0 + 2.0*C - D / 2.0
c = -A / 2.0 + C / 2.0
d = B
return a*t*t*t + b*t*t + c*t + d
def _bicubic(*indices):
n, c, y, x, cc = _get_indices(*indices)
in_y = y_ratio * y
in_x = x_ratio * x
xint = tvm.floor(in_x).astype('int32')
xfract = in_x - tvm.floor(in_x)
yint = tvm.floor(in_y).astype('int32')
yfract = in_y - tvm.floor(in_y)
# 1st row
p00 = _get_pixel(n, c, yint - 1, xint - 1, cc)
p10 = _get_pixel(n, c, yint - 1, xint + 0, cc)
p20 = _get_pixel(n, c, yint - 1, xint + 1, cc)
p30 = _get_pixel(n, c, yint - 1, xint + 2, cc)
# 2nd row
p01 = _get_pixel(n, c, yint + 0, xint - 1, cc)
p11 = _get_pixel(n, c, yint + 0, xint + 0, cc)
p21 = _get_pixel(n, c, yint + 0, xint + 1, cc)
p31 = _get_pixel(n, c, yint + 0, xint + 2, cc)
# 3rd row
p02 = _get_pixel(n, c, yint + 1, xint - 1, cc)
p12 = _get_pixel(n, c, yint + 1, xint + 0, cc)
p22 = _get_pixel(n, c, yint + 1, xint + 1, cc)
p32 = _get_pixel(n, c, yint + 1, xint + 2, cc)
# 4th row
p03 = _get_pixel(n, c, yint + 2, xint - 1, cc)
p13 = _get_pixel(n, c, yint + 2, xint + 0, cc)
p23 = _get_pixel(n, c, yint + 2, xint + 1, cc)
p33 = _get_pixel(n, c, yint + 2, xint + 2, cc)
# Interpolate bicubically
col0 = _cubic_kernel(p00, p10, p20, p30, xfract)
col1 = _cubic_kernel(p01, p11, p21, p31, xfract)
col2 = _cubic_kernel(p02, p12, p22, p32, xfract)
col3 = _cubic_kernel(p03, p13, p23, p33, xfract)
value = _cubic_kernel(col0, col1, col2, col3, yfract)
return _cast_output(value)
# Determine which interpolation method to use then run it.
if method == "nearest_neighbor":
compute_func = _nearest_neighbor
elif method == "bilinear":
compute_func = _bilinear
elif method == "bicubic":
compute_func = _bicubic
else:
raise ValueError('%s method is not supported.' % method)
return tvm.compute(output_shape, compute_func, name='resize', tag=tag.INJECTIVE)
| apache-2.0 |
dulems/hue | desktop/core/ext-py/Django-1.6.10/django/db/models/fields/__init__.py | 64 | 53213 | from __future__ import unicode_literals
import copy
import datetime
import decimal
import math
import warnings
from base64 import b64decode, b64encode
from itertools import tee
from django.db import connection
from django.db.models.loading import get_model
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import curry, total_ordering
from django.utils.itercompat import is_iterator
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text, force_text, force_bytes
from django.utils.ipv6 import clean_ipv6_address
from django.utils import six
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
@total_ordering
class Field(object):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = (unique_for_date,
unique_for_month)
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
if hasattr(self.rel, 'field') and self.rel.field is self:
obj.rel.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app cache to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
if self.model._deferred:
# Deferred model will not be found from the app cache. This could
# be fixed by reconstructing the deferred model on unpickle.
raise RuntimeError("Fields of deferred models can't be reduced")
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return (connection.creation.data_types[self.get_internal_type()]
% data)
except KeyError:
return None
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, virtual_only=False):
self.set_attributes_from_name(name)
self.model = cls
if virtual_only:
cls._meta.add_virtual_field(self)
else:
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'iexact', 'contains', 'icontains',
'startswith', 'istartswith', 'endswith', 'iendswith',
'month', 'day', 'week_day', 'hour', 'minute', 'second',
'isnull', 'search', 'regex', 'iregex',
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
prepared = True
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabeled_clone method it means the
# value will be handled later on.
if hasattr(value, 'relabeled_clone'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute',
'second', 'search', 'regex', 'iregex'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if isinstance(self, DateTimeField):
return connection.ops.year_lookup_bounds_for_datetime_field(value)
elif isinstance(self, DateField):
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return [value] # this isn't supposed to happen
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return force_text(self.default, strings_only=True)
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = blank_choice if include_blank else []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = blank_choice if include_blank else []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_text(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if is_iterator(self._choices):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, \
"%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = (self.null or
not (self.has_default() or 'initial' in kwargs))
defaults = {'choices': self.get_choices(
include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For dates lookups, convert the value to an int
# so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', 'second'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime (%s)"
" while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def _format(self, value):
if isinstance(value, six.string_types) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length should be overridden to 254 characters to be fully
# compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = True
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
Field.__init__(self, verbose_name, name, *args, **kwargs)
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return value or None
def get_prep_value(self, value):
if value is None:
return value
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(BinaryField, self).__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "BinaryField"
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super(BinaryField, self).get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super(BinaryField, self
).get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(force_bytes(self._get_val_from_obj(obj))).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, six.text_type):
return six.memoryview(b64decode(force_bytes(value)))
return value
| apache-2.0 |
jandersson/website | lib/flask/config.py | 781 | 6234 | # -*- coding: utf-8 -*-
"""
flask.config
~~~~~~~~~~~~
Implements the configuration related objects.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import imp
import os
import errno
from werkzeug.utils import import_string
from ._compat import string_types
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to `True` if you want silent failure for missing
files.
:return: bool. `True` if able to load config, `False` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to `True` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = imp.new_module('config')
d.__file__ = filename
try:
with open(filename) as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
| apache-2.0 |
kenshay/ImageScript | ProgramData/Android/ADB/platform-tools/systrace/catapult/telemetry/telemetry/internal/results/csv_pivot_table_output_formatter.py | 5 | 2408 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import csv
from telemetry.internal.results import output_formatter
from telemetry.value import scalar
from telemetry.value import trace
class CsvPivotTableOutputFormatter(output_formatter.OutputFormatter):
"""Output the results as CSV suitable for reading into a spreadsheet.
This will write a header row, and one row for each value. Each value row
contains the value and unit, identifies the value (story_set, page, name), and
(optionally) data from --output-trace-tag. This format matches what
spreadsheet programs expect as input for a "pivot table".
A trace tag (--output-trace-tag) can be used to tag each value, to allow
easy combination of the resulting CSVs from several runs.
If the trace_tag contains a comma, it will be written as several
comma-separated values.
This class only processes scalar values.
"""
FIELDS = ['story_set', 'page', 'name', 'value', 'units', 'run_index']
def __init__(self, output_stream, trace_tag=''):
super(CsvPivotTableOutputFormatter, self).__init__(output_stream)
self._trace_tag = trace_tag
def Format(self, page_test_results):
csv_writer = csv.writer(self.output_stream)
# Observe trace_tag. Use comma to split up the trace tag.
tag_values = self._trace_tag.split(',')
tag_values = [x for x in tag_values if x] # filter empty list entries
tag_headers = ['trace_tag_%d' % i for i in range(len(tag_values))]
# Write header.
csv_writer.writerow(self.FIELDS + tag_headers)
# Write all values. Each row contains a value + page-level metadata.
for run in page_test_results.all_page_runs:
run_index = page_test_results.all_page_runs.index(run)
page_dict = {
'page': run.story.display_name,
'story_set': run.story.page_set.Name(),
'run_index': run_index,
}
for value in run.values:
if (isinstance(value, scalar.ScalarValue) or
isinstance(value, trace.TraceValue)):
value_dict = {
'name': value.name,
'value': value.value,
'units': value.units,
}
value_dict.update(page_dict.items())
csv_writer.writerow(
[value_dict[field] for field in self.FIELDS] + tag_values)
| gpl-3.0 |
cloudbase/lis-tempest | tempest/services/volume/xml/availability_zone_client.py | 3 | 1525 | # Copyright 2014 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from tempest.common import rest_client
from tempest.common import xml_utils
from tempest import config
CONF = config.CONF
class BaseVolumeAvailabilityZoneClientXML(rest_client.RestClient):
TYPE = "xml"
def __init__(self, auth_provider):
super(BaseVolumeAvailabilityZoneClientXML, self).__init__(
auth_provider)
self.service = CONF.volume.catalog_type
def _parse_array(self, node):
return [xml_utils.xml_to_json(x) for x in node]
def get_availability_zone_list(self):
resp, body = self.get('os-availability-zone')
availability_zone = self._parse_array(etree.fromstring(body))
self.expected_success(200, resp.status)
return resp, availability_zone
class VolumeAvailabilityZoneClientXML(BaseVolumeAvailabilityZoneClientXML):
"""
Volume V1 availability zone client.
"""
| apache-2.0 |
yelizariev/addons-yelizariev | web_debranding/__manifest__.py | 1 | 1144 | # Copyright 2015-2020 Ivan Yelizariev <https://it-projects.info/team/yelizariev>
# Copyright 2017 Ilmir Karamov <https://it-projects.info/team/ilmir-k>
# Copyright 2018-2019 Kolushov Alexandr <https://it-projects.info/team/KolushovAlexandr>
# Copyright 2018 Ildar Nasyrov <https://it-projects.info/team/iledarn>
# Copyright 2018 WohthaN <https://github.com/WohthaN>
# Copyright 2019 Eugene Molotov <https://github.com/em230418>
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
{
"name": "Backend debranding",
"version": "12.0.1.0.29",
"author": "IT-Projects LLC, Ivan Yelizariev",
"license": "LGPL-3",
"category": "Debranding",
"images": ["images/web_debranding.png"],
"website": "https://twitter.com/yelizariev",
"price": 250.00,
"currency": "EUR",
"depends": ["web", "mail", "access_settings_menu"],
"data": ["data.xml", "views.xml", "js.xml", "pre_install.xml"],
"qweb": ["static/src/xml/web.xml"],
"post_load": "post_load",
"auto_install": False,
"uninstall_hook": "uninstall_hook",
"installable": True,
"saas_demo_title": "Backend debranding demo",
}
| lgpl-3.0 |
beomyeol/models | slim/nets/resnet_v2_test.py | 15 | 19147 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.resnet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nets import resnet_utils
from nets import resnet_v2
slim = tf.contrib.slim
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch_size, 1, 1, channels]))
class ResnetUtilsTest(tf.test.TestCase):
def testSubsampleThreeByThree(self):
x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = tf.reshape(tf.to_float(tf.range(16)), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 26],
[28, 48, 66, 37],
[43, 66, 84, 46],
[26, 37, 46, 22]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43],
[43, 84]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = tf.to_float([[48, 37],
[37, 22]])
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 58, 34],
[28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43, 34],
[43, 84, 55],
[34, 55, 30]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with tf.variable_scope(scope, values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = dict(tf.get_collection('end_points'))
return net, end_points
def testEndPointsV2(self):
"""Test the end points of a tiny v2 bottleneck network."""
bottleneck = resnet_v2.bottleneck
blocks = [resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]),
resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 1)])]
inputs = create_test_input(2, 32, 16, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v2/shortcut',
'tiny/block1/unit_1/bottleneck_v2/conv1',
'tiny/block1/unit_1/bottleneck_v2/conv2',
'tiny/block1/unit_1/bottleneck_v2/conv3',
'tiny/block1/unit_2/bottleneck_v2/conv1',
'tiny/block1/unit_2/bottleneck_v2/conv2',
'tiny/block1/unit_2/bottleneck_v2/conv3',
'tiny/block2/unit_1/bottleneck_v2/shortcut',
'tiny/block2/unit_1/bottleneck_v2/conv1',
'tiny/block2/unit_1/bottleneck_v2/conv2',
'tiny/block2/unit_1/bottleneck_v2/conv3',
'tiny/block2/unit_2/bottleneck_v2/conv1',
'tiny/block2/unit_2/bottleneck_v2/conv2',
'tiny/block2/unit_2/bottleneck_v2/conv3']
self.assertItemsEqual(expected, end_points)
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
depth, depth_bottleneck, stride = unit
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net,
depth=depth,
depth_bottleneck=depth_bottleneck,
stride=stride,
rate=1)
return net
def _atrousValues(self, bottleneck):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
Args:
bottleneck: The bottleneck function.
"""
blocks = [
resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]),
resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 2)]),
resnet_utils.Block('block3', bottleneck, [(16, 4, 1), (16, 4, 2)]),
resnet_utils.Block('block4', bottleneck, [(32, 8, 1), (32, 8, 1)])
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs,
blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
def testAtrousValuesBottleneck(self):
self._atrousValues(resnet_v2.bottleneck)
class ResnetCompleteNetworkTest(tf.test.TestCase):
"""Tests with complete small ResNet v2 networks."""
def _resnet_small(self,
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope='resnet_v2_small'):
"""A shallow and thin ResNet v2 for faster tests."""
bottleneck = resnet_v2.bottleneck
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(4, 1, 1)] * 2 + [(4, 1, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(8, 2, 1)] * 2 + [(8, 2, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(16, 4, 1)] * 2 + [(16, 4, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(32, 8, 1)] * 2)]
return resnet_v2.resnet_v2(inputs, blocks, num_classes,
is_training=is_training,
global_pool=global_pool,
output_stride=output_stride,
include_root_block=include_root_block,
reuse=reuse,
scope=scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
include_root_block=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 64, 64, 4],
'resnet/block2': [2, 32, 32, 8],
'resnet/block3': [2, 16, 16, 16],
'resnet/block4': [2, 16, 16, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(inputs, None,
is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None,
is_training=False,
global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(inputs, num_classes,
global_pool=global_pool,
scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs, None,
global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs,
None,
global_pool=global_pool,
output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(),
[batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
byt3bl33d3r/Empire | lib/modules/powershell/management/mailraider/mail_search.py | 10 | 4295 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-MailSearch',
'Author': ['@xorrior'],
'Description': ("Searches the given Outlook folder for items (Emails, Contacts, Tasks, Notes, etc. *Depending on the folder*) and returns any matches found."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/xorrior/EmailRaider',
'http://www.xorrior.com/phishing-on-the-inside/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'DefaultFolder' : {
'Description' : 'Folder to search in.',
'Required' : True,
'Value' : 'Inbox'
},
'Keywords' : {
'Description' : 'Keyword/s to search for.',
'Required' : True,
'Value' : ''
},
'MaxResults' : {
'Description' : 'Maximum number of results to return.',
'Required' : False,
'Value' : '100'
},
'MaxSearch' : {
'Description' : 'Maximum number of emails to search through.',
'Required' : False,
'Value' : ''
},
'MaxThreads' : {
'Description' : 'Maximum number of threads to use when searching.',
'Required' : True,
'Value' : '15'
},
'File' : {
'Description' : 'Path to results file (instead of stdout).',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/management/MailRaider.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode + "\n"
scriptEnd = moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
scriptEnd += " -" + str(option)
else:
scriptEnd += " -" + str(option) + " " + str(values['Value'])
scriptEnd += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
| bsd-3-clause |
msabramo/kallithea | kallithea/lib/caching_query.py | 2 | 10467 | """caching_query.py
Represent persistence structures which allow the usage of
Beaker caching with SQLAlchemy.
The three new concepts introduced here are:
* CachingQuery - a Query subclass that caches and
retrieves results in/from Beaker.
* FromCache - a query option that establishes caching
parameters on a Query
* RelationshipCache - a variant of FromCache which is specific
to a query invoked during a lazy load.
* _params_from_query - extracts value parameters from
a Query.
The rest of what's here are standard SQLAlchemy and
Beaker constructs.
"""
import beaker
from beaker.exceptions import BeakerException
from sqlalchemy.orm.interfaces import MapperOption
from sqlalchemy.orm.query import Query
from sqlalchemy.sql import visitors
from kallithea.lib.utils2 import safe_str
class CachingQuery(Query):
"""A Query subclass which optionally loads full results from a Beaker
cache region.
The CachingQuery stores additional state that allows it to consult
a Beaker cache before accessing the database:
* A "region", which is a cache region argument passed to a
Beaker CacheManager, specifies a particular cache configuration
(including backend implementation, expiration times, etc.)
* A "namespace", which is a qualifying name that identifies a
group of keys within the cache. A query that filters on a name
might use the name "by_name", a query that filters on a date range
to a joined table might use the name "related_date_range".
When the above state is present, a Beaker cache is retrieved.
The "namespace" name is first concatenated with
a string composed of the individual entities and columns the Query
requests, i.e. such as ``Query(User.id, User.name)``.
The Beaker cache is then loaded from the cache manager based
on the region and composed namespace. The key within the cache
itself is then constructed against the bind parameters specified
by this query, which are usually literals defined in the
WHERE clause.
The FromCache and RelationshipCache mapper options below represent
the "public" method of configuring this state upon the CachingQuery.
"""
def __init__(self, manager, *args, **kw):
self.cache_manager = manager
Query.__init__(self, *args, **kw)
def __iter__(self):
"""override __iter__ to pull results from Beaker
if particular attributes have been configured.
Note that this approach does *not* detach the loaded objects from
the current session. If the cache backend is an in-process cache
(like "memory") and lives beyond the scope of the current session's
transaction, those objects may be expired. The method here can be
modified to first expunge() each loaded item from the current
session before returning the list of items, so that the items
in the cache are not the same ones in the current Session.
"""
if hasattr(self, '_cache_parameters'):
return self.get_value(createfunc=lambda:
list(Query.__iter__(self)))
else:
return Query.__iter__(self)
def invalidate(self):
"""Invalidate the value represented by this Query."""
cache, cache_key = _get_cache_parameters(self)
cache.remove(cache_key)
def get_value(self, merge=True, createfunc=None):
"""Return the value from the cache for this query.
Raise KeyError if no value present and no
createfunc specified.
"""
cache, cache_key = _get_cache_parameters(self)
ret = cache.get_value(cache_key, createfunc=createfunc)
if merge:
ret = self.merge_result(ret, load=False)
return ret
def set_value(self, value):
"""Set the value in the cache for this query."""
cache, cache_key = _get_cache_parameters(self)
cache.put(cache_key, value)
def query_callable(manager, query_cls=CachingQuery):
def query(*arg, **kw):
return query_cls(manager, *arg, **kw)
return query
def get_cache_region(name, region):
if region not in beaker.cache.cache_regions:
raise BeakerException('Cache region `%s` not configured '
'Check if proper cache settings are in the .ini files' % region)
kw = beaker.cache.cache_regions[region]
return beaker.cache.Cache._get_cache(name, kw)
def _get_cache_parameters(query):
"""For a query with cache_region and cache_namespace configured,
return the corresponding Cache instance and cache key, based
on this query's current criterion and parameter values.
"""
if not hasattr(query, '_cache_parameters'):
raise ValueError("This Query does not have caching "
"parameters configured.")
region, namespace, cache_key = query._cache_parameters
namespace = _namespace_from_query(namespace, query)
if cache_key is None:
# cache key - the value arguments from this query's parameters.
args = [safe_str(x) for x in _params_from_query(query)]
args.extend(filter(lambda k: k not in ['None', None, u'None'],
[str(query._limit), str(query._offset)]))
cache_key = " ".join(args)
if cache_key is None:
raise Exception('Cache key cannot be None')
# get cache
#cache = query.cache_manager.get_cache_region(namespace, region)
cache = get_cache_region(namespace, region)
# optional - hash the cache_key too for consistent length
# import uuid
# cache_key= str(uuid.uuid5(uuid.NAMESPACE_DNS, cache_key))
return cache, cache_key
def _namespace_from_query(namespace, query):
# cache namespace - the token handed in by the
# option + class we're querying against
namespace = " ".join([namespace] + [str(x) for x in query._entities])
# memcached wants this
namespace = namespace.replace(' ', '_')
return namespace
def _set_cache_parameters(query, region, namespace, cache_key):
if hasattr(query, '_cache_parameters'):
region, namespace, cache_key = query._cache_parameters
raise ValueError("This query is already configured "
"for region %r namespace %r" %
(region, namespace)
)
query._cache_parameters = region, safe_str(namespace), cache_key
class FromCache(MapperOption):
"""Specifies that a Query should load results from a cache."""
propagate_to_loaders = False
def __init__(self, region, namespace, cache_key=None):
"""Construct a new FromCache.
:param region: the cache region. Should be a
region configured in the Beaker CacheManager.
:param namespace: the cache namespace. Should
be a name uniquely describing the target Query's
lexical structure.
:param cache_key: optional. A string cache key
that will serve as the key to the query. Use this
if your query has a huge amount of parameters (such
as when using in_()) which correspond more simply to
some other identifier.
"""
self.region = region
self.namespace = namespace
self.cache_key = cache_key
def process_query(self, query):
"""Process a Query during normal loading operation."""
_set_cache_parameters(query, self.region, self.namespace,
self.cache_key)
class RelationshipCache(MapperOption):
"""Specifies that a Query as called within a "lazy load"
should load results from a cache."""
propagate_to_loaders = True
def __init__(self, region, namespace, attribute):
"""Construct a new RelationshipCache.
:param region: the cache region. Should be a
region configured in the Beaker CacheManager.
:param namespace: the cache namespace. Should
be a name uniquely describing the target Query's
lexical structure.
:param attribute: A Class.attribute which
indicates a particular class relationship() whose
lazy loader should be pulled from the cache.
"""
self.region = region
self.namespace = namespace
self._relationship_options = {
(attribute.property.parent.class_, attribute.property.key): self
}
def process_query_conditionally(self, query):
"""Process a Query that is used within a lazy loader.
(the process_query_conditionally() method is a SQLAlchemy
hook invoked only within lazyload.)
"""
if query._current_path:
mapper, key = query._current_path[-2:]
for cls in mapper.class_.__mro__:
if (cls, key) in self._relationship_options:
relationship_option = \
self._relationship_options[(cls, key)]
_set_cache_parameters(
query,
relationship_option.region,
relationship_option.namespace,
None)
def and_(self, option):
"""Chain another RelationshipCache option to this one.
While many RelationshipCache objects can be specified on a single
Query separately, chaining them together allows for a more efficient
lookup during load.
"""
self._relationship_options.update(option._relationship_options)
return self
def _params_from_query(query):
"""Pull the bind parameter values from a query.
This takes into account any scalar attribute bindparam set up.
E.g. params_from_query(query.filter(Cls.foo==5).filter(Cls.bar==7)))
would return [5, 7].
"""
v = []
def visit_bindparam(bind):
if bind.key in query._params:
value = query._params[bind.key]
elif bind.callable:
# lazyloader may dig a callable in here, intended
# to late-evaluate params after autoflush is called.
# convert to a scalar value.
value = bind.callable()
else:
value = bind.value
v.append(value)
if query._criterion is not None:
visitors.traverse(query._criterion, {}, {'bindparam':visit_bindparam})
for f in query._from_obj:
visitors.traverse(f, {}, {'bindparam':visit_bindparam})
return v
| gpl-3.0 |
FreeScienceCommunity/or-tools | tools/setup.py | 5 | 2452 | from setuptools import setup, Extension
from os.path import join as pjoin
from os.path import dirname
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(pjoin(dirname(__file__), fname)).read()
dummy_module = Extension('dummy_ortools_dependency',
sources = ['dummy/dummy_ortools_dependency.cc'],
DELETEUNIX extra_link_args=['/MANIFEST'],
)
setup(
name='ortools',
version='2.VVVV',
packages=[
'ortools',
'ortools.algorithms',
'ortools.constraint_solver',
'ortools.graph',
'ortools.linear_solver',],
ext_modules = [dummy_module],
install_requires = [
'google-apputils >= 0.4',
'protobuf >= 2.5.0'],
dependency_links = ['http://google-apputils-python.googlecode.com/files/'],
package_data = {
'ortools.constraint_solver' : ['_pywrapcp.dll'],
'ortools.linear_solver' : ['_pywraplp.dll'],
'ortools.graph' : ['_pywrapgraph.dll'],
'ortools.algorithms' : ['_pywrapknapsack_solver.dll'],
DELETEWIN 'ortools' : ['libortools.DLL']
},
license='Apache 2.0',
author = 'Google Inc',
author_email = 'lperron@google.com',
description = 'Google OR-Tools python libraries and modules',
keywords = ('operations research, constraint programming, ' +
'linear programming,' + 'flow algorithms,' +
'python'),
url = 'https://developers.google.com/optimization/',
download_url = 'https://github.com/google/or-tools/releases',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Office/Business :: Scheduling',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules'],
long_description = read('README.txt'),
)
| apache-2.0 |
AxelDelmas/ansible | lib/ansible/plugins/action/package.py | 50 | 2340 | # (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
''' handler for package operations '''
name = self._task.args.get('name', None)
state = self._task.args.get('state', None)
module = self._task.args.get('use', 'auto')
if module == 'auto':
try:
module = self._templar.template('{{ansible_pkg_mgr}}')
except:
pass # could not get it from template!
if module == 'auto':
facts = self._execute_module(module_name='setup', module_args=dict(filter='ansible_pkg_mgr'), task_vars=task_vars)
self._display.debug("Facts %s" % facts)
if not 'failed' in facts:
module = getattr(facts['ansible_facts'], 'ansible_pkg_mgr', 'auto')
if module != 'auto':
if module not in self._shared_loader_obj.module_loader:
return {'failed': True, 'msg': 'Could not find a module for %s.' % module}
# run the 'package' module
new_module_args = self._task.args.copy()
if 'use' in new_module_args:
del new_module_args['use']
self._display.vvvv("Running %s" % module)
return self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars)
else:
return {'failed': True, 'msg': 'Could not detect which package manager to use. Try gathering facts or setting the "use" option.'}
| gpl-3.0 |
nekulin/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_import.py | 48 | 14273 | import unittest
import os
import random
import shutil
import sys
import py_compile
import warnings
import marshal
from test.test_support import unlink, TESTFN, unload, run_unittest, check_warnings
def remove_files(name):
for f in (name + os.extsep + "py",
name + os.extsep + "pyc",
name + os.extsep + "pyo",
name + os.extsep + "pyw",
name + "$py.class"):
if os.path.exists(f):
os.remove(f)
class ImportTest(unittest.TestCase):
def testCaseSensitivity(self):
# Brief digression to test that import is case-sensitive: if we got this
# far, we know for sure that "random" exists.
try:
import RAnDoM
except ImportError:
pass
else:
self.fail("import of RAnDoM should have failed (case mismatch)")
def testDoubleConst(self):
# Another brief digression to test the accuracy of manifest float constants.
from test import double_const # don't blink -- that *was* the test
def testImport(self):
def test_with_extension(ext):
# ext normally ".py"; perhaps ".pyw"
source = TESTFN + ext
pyo = TESTFN + os.extsep + "pyo"
if sys.platform.startswith('java'):
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + os.extsep + "pyc"
f = open(source, "w")
print >> f, "# This tests Python's ability to import a", ext, "file."
a = random.randrange(1000)
b = random.randrange(1000)
print >> f, "a =", a
print >> f, "b =", b
f.close()
try:
try:
mod = __import__(TESTFN)
except ImportError, err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEquals(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEquals(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
os.unlink(source)
try:
try:
reload(mod)
except ImportError, err:
self.fail("import from .pyc/.pyo failed: %s" % err)
finally:
try:
os.unlink(pyc)
except OSError:
pass
try:
os.unlink(pyo)
except OSError:
pass
del sys.modules[TESTFN]
sys.path.insert(0, os.curdir)
try:
test_with_extension(os.extsep + "py")
if sys.platform.startswith("win"):
for ext in ".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw":
test_with_extension(ext)
finally:
del sys.path[0]
def testImpModule(self):
# Verify that the imp module can correctly load and find .py files
import imp
x = imp.find_module("os")
os = imp.load_module("os", *x)
def test_module_with_large_stack(self, module='longlist'):
# create module w/list of 65000 elements to test bug #561858
filename = module + os.extsep + 'py'
# create a file with a list of 65000 elements
f = open(filename, 'w+')
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
f.close()
# compile & remove .py file, we only need .pyc (or .pyo)
f = open(filename, 'r')
py_compile.compile(filename)
f.close()
os.unlink(filename)
# need to be able to load from current dir
sys.path.append('')
# this used to crash
exec 'import ' + module
# cleanup
del sys.path[-1]
for ext in 'pyc', 'pyo':
fname = module + os.extsep + ext
if os.path.exists(fname):
os.unlink(fname)
def test_failing_import_sticks(self):
source = TESTFN + os.extsep + "py"
f = open(source, "w")
print >> f, "a = 1/0"
f.close()
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
try:
for i in 1, 2, 3:
try:
mod = __import__(TESTFN)
except ZeroDivisionError:
if TESTFN in sys.modules:
self.fail("damaged module in sys.modules on %i. try" % i)
else:
self.fail("was able to import a damaged module on %i. try" % i)
finally:
sys.path.pop(0)
remove_files(TESTFN)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
f = open(source, "w")
print >> f, "a = 1"
print >> f, "b = 2"
f.close()
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assert_(TESTFN in sys.modules, "expected module in sys.modules")
self.assertEquals(mod.a, 1, "module has wrong attribute values")
self.assertEquals(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
f = open(source, "w")
print >> f, "a = 10"
print >> f, "b = 20//0"
f.close()
self.assertRaises(ZeroDivisionError, reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.failIf(mod is None, "expected module to still be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEquals(mod.a, 10, "module has wrong attribute values")
self.assertEquals(mod.b, 2, "module has wrong attribute values")
finally:
sys.path.pop(0)
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_infinite_reload(self):
# Bug #742342 reports that Python segfaults (infinite recursion in C)
# when faced with self-recursive reload()ing.
sys.path.insert(0, os.path.dirname(__file__))
try:
import infinite_reload
finally:
sys.path.pop(0)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.test_support
self.assert_(x is test, x.__name__)
self.assert_(hasattr(test.test_support, "__file__"))
# import x.y.z as w binds z as w
import test.test_support as y
self.assert_(y is test.test_support, y.__name__)
def test_import_initless_directory_warning(self):
with warnings.catch_warnings():
# Just a random non-package directory we always expect to be
# somewhere in sys.path...
warnings.simplefilter('error', ImportWarning)
self.assertRaises(ImportWarning, __import__, "site-packages")
def test_importbyfilename(self):
path = os.path.abspath(TESTFN)
try:
__import__(path)
except ImportError, err:
self.assertEqual("Import by filename is not supported.",
err.args[0])
else:
self.fail("import by path didn't raise an exception")
class TestPycRewriting(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.func_code.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = file_name + ("c" if __debug__ else "o")
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
del sys.modules[self.module_name]
for file_name in self.file_name, self.compiled_name:
if os.path.exists(file_name):
os.remove(file_name)
if os.path.exists(self.dir_name):
shutil.rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.compiled_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.compiled_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
mod = self.import_module()
self.assertEqual(mod.module_filename, self.compiled_name)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(8)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = test_main.func_code
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
shutil.rmtree(self.path)
sys.path = self.syspath
# http://bugs.python.org/issue1293
def test_trailing_slash(self):
f = open(os.path.join(self.path, 'test_trailing_slash.py'), 'w')
f.write("testdata = 'test_trailing_slash'")
f.close()
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
class RelativeImport(unittest.TestCase):
def tearDown(self):
try:
del sys.modules["test.relimport"]
except:
pass
def test_relimport_star(self):
# This will import * from .test_import.
from . import relimport
self.assertTrue(hasattr(relimport, "RelativeImport"))
def test_issue3221(self):
def check_absolute():
exec "from os import path" in ns
def check_relative():
exec "from . import relimport" in ns
# Check both OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_absolute()
check_relative()
# Check both OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_absolute()
check_relative()
# Check relative fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
with check_warnings() as w:
check_absolute()
self.assert_('foo' in str(w.message))
self.assertEqual(w.category, RuntimeWarning)
self.assertRaises(SystemError, check_relative)
# Check relative fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
with check_warnings() as w:
check_absolute()
self.assert_('foo' in str(w.message))
self.assertEqual(w.category, RuntimeWarning)
self.assertRaises(SystemError, check_relative)
# Check both fail with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(ValueError, check_absolute)
self.assertRaises(ValueError, check_relative)
def test_main(verbose=None):
run_unittest(ImportTest, TestPycRewriting, PathsTests, RelativeImport)
if __name__ == '__main__':
# test needs to be a package, so we can do relative import
from test.test_import import test_main
test_main()
| apache-2.0 |
geerlingguy/ansible | test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py | 47 | 2643 | # -*- coding: utf-8 -*-
# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r"""options:
provider:
description:
- B(Deprecated)
- 'Starting with Ansible 2.5 we recommend using C(connection: network_cli).'
- For more information please see the L(Network Guide, ../network/getting_started/network_differences.html#multiple-communication-protocols).
- HORIZONTALLINE
- A dict object containing connection details.
type: dict
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote device
over the specified transport. The value of host is used as the destination
address for the transport.
type: str
required: true
port:
description:
- Specifies the port to use when building the connection to the remote device.
type: int
default: 22
username:
description:
- Configures the username to use to authenticate the connection to the remote
device. This value is used to authenticate the SSH session. If the value
is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME)
will be used instead.
type: str
password:
description:
- Specifies the password to use to authenticate the connection to the remote
device. This value is used to authenticate the SSH session. If the value
is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD)
will be used instead.
type: str
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is exceeded before
the operation is completed, the module will error.
type: int
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to the remote
device. This value is the path to the key used to authenticate the SSH
session. If the value is not specified in the task, the value of environment
variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
type: path
notes:
- For more information on using Ansible to manage network devices see the :ref:`Ansible
Network Guide <network_guide>`
"""
| gpl-3.0 |
ictofnwi/coach | dashboard/views.py | 1 | 19639 | import random
import re
import json
import pytz
import dateutil.parser
from datetime import datetime, timedelta
from pprint import pformat
from hashlib import md5
from django.http import HttpResponse
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render, redirect
from django.conf import settings
from django.template import RequestContext, loader
from django.db.models import Q
from models import Activity, Recommendation, LogEvent, GroupAssignment
from recommendation import recommend
from tincan_api import TinCan
from helpers import *
# Fetch TinCan credentials from settings
USERNAME = settings.TINCAN['username']
PASSWORD = settings.TINCAN['password']
ENDPOINT = settings.TINCAN['endpoint']
# Reference to TinCan verbs
COMPLETED = TinCan.VERBS['completed']['id']
PROGRESSED = TinCan.VERBS['progressed']['id']
# Reference to TinCan activity types
ASSESSMENT = TinCan.ACTIVITY_TYPES['assessment']
MEDIA = TinCan.ACTIVITY_TYPES['media']
QUESTION = TinCan.ACTIVITY_TYPES['question']
# Reference to progress URI in result/extension
PROGRESS_T = "http://uva.nl/coach/progress"
# Default barcode height
BARCODE_HEIGHT = 35
## Decorators
def identity_required(func):
def inner(request, *args, **kwargs):
# Fetch email from GET paramaters if present and store in session.
paramlist = request.GET.get('paramlist', None)
email = request.GET.get('email', None)
param_hash = request.GET.get('hash', None)
if paramlist is not None:
hash_contents = []
for param in paramlist.split(","):
if param == "pw":
hash_contents.append(settings.AUTHENTICATION_SECRET)
else:
hash_contents.append(request.GET.get(param, ""))
hash_string = md5(",".join(hash_contents)).hexdigest().upper()
if hash_string == param_hash and email is not None and email != "":
request.session['user'] = "mailto:%s" % (email, )
# Fetch user from session
user = request.session.get('user', None)
# If no user is specified, show information on how to login
if user is None:
return render(request, 'dashboard/loginfirst.html', {})
else:
return func(request, *args, **kwargs)
return inner
def check_group(func):
"""Decorator to check the group for A/B testing.
Users in group A see the dashboard and users in group B do not.
Users that are in no group will be assigned one, so that both groups differ
at most 1 in size. If both groups are the same size, the group will be
assigned pseudorandomly.
"""
def inner(request, *args, **kwargs):
# Fetch user from session
user = request.session.get('user', None)
# Case 1: Existing user
try:
assignment = GroupAssignment.objects.get(user=user)
if assignment.group == 'A':
return func(request, *args, **kwargs)
else:
return HttpResponse()
# Case 2: New user
except ObjectDoesNotExist:
# Case 2a: First half of new pair,
# randomly pick A or B for this user.
if GroupAssignment.objects.count() % 2 == 0:
group = random.choice(['A', 'B'])
if group == 'A':
assignment = GroupAssignment(user=user, group='A')
assignment.save()
return func(request, *args, **kwargs)
else:
assignment = GroupAssignment(user=user, group='B')
assignment.save()
return HttpResponse()
# Case 2b: Second half of new pair,
# choose the group that was not previously chosen.
else:
try:
last_group = GroupAssignment.objects.order_by('-id')[0].group
except:
last_group = random.choice(['A', 'B'])
if last_group == 'A':
assignment = GroupAssignment(user=user, group='B')
assignment.save()
return HttpResponse()
else:
assignment = GroupAssignment(user=user, group='A')
assignment.save()
return func(request, *args, **kwargs)
return inner
## Bootstrap
def bootstrap(request):
width = request.GET.get('width',0)
template = loader.get_template('dashboard/bootstrap.js')
return HttpResponse(
template.render(RequestContext(
request,
{ 'host': request.get_host(), 'width': width }
)),
content_type="application/javascript"
)
def bootstrap_recommend(request, milestones):
width = request.GET.get('width',0)
max_recs = int(request.GET.get('max', False))
return render(request, 'dashboard/bootstrap_recommend.js',
{'milestones': milestones,
'max_recs': max_recs,
'width': width,
'host': request.get_host()})
## Debug interface
def log(request):
logs = LogEvent.objects.order_by('-timestamp')[:100]
data = request.GET.get('data',"0") == "1"
return render(request, 'dashboard/log.html',
{ 'logs': logs, 'data': data, 'host': request.get_host()})
## Interface
@identity_required
@check_group
def barcode(request, default_width=170):
"""Return an svg representing progress of an individual vs the group."""
# Fetch user from session
user = request.session.get('user', None)
width = int(request.GET.get('width', default_width))
data = {'width': width, 'height': BARCODE_HEIGHT}
# Add values
markers = {}
activities = Activity.objects.filter(type=ASSESSMENT)
for activity in activities:
if activity.user in markers:
markers[activity.user] += min(80, activity.value)
else:
markers[activity.user] = min(80, activity.value)
if user in markers:
data['user'] = markers[user]
del markers[user]
else:
data['user'] = 0
data['people'] = markers.values()
# Normalise
if len(markers) > 0:
maximum = max(max(data['people']), data['user'])
data['user'] /= maximum
data['user'] *= width
data['user'] = int(data['user'])
for i in range(len(data['people'])):
data['people'][i] /= maximum
data['people'][i] *= width
data['people'][i] = int(data['people'][i])
else:
# if no other persons have been active
# then user is assumed to be in the lead.
# This is regardless if the user has done anything at all.
data['user'] = width
return render(request, 'dashboard/barcode.svg', data)
@identity_required
@check_group
def index(request):
# Fetch user from session
user = request.session.get('user', None)
# Fetch desired width of the dashboard
width = request.GET.get("width",300);
activities = Activity.objects.filter(user=user).order_by('time')
statements = map(lambda x: x._dict(), activities)
statements = aggregate_statements(statements)
for statement in statements:
statement['activity'] = fix_url(statement['activity'], request)
statements = split_statements(statements)
assignments = statements['assignments']
assignments.sort(key = lambda x: x['time'], reverse=True)
exercises = statements['exercises']
exercises.sort(key = lambda x: x['value'])
video = statements['video']
video.sort(key = lambda x: x['time'], reverse=True)
template = loader.get_template('dashboard/index.html')
context = RequestContext(request, {
'width': width,
'barcode_height': BARCODE_HEIGHT,
'assignments': assignments,
'exercises': exercises,
'video': video,
'host': request.get_host()
})
response = HttpResponse(template.render(context))
response['Access-Control-Allow-Origin'] = "*"
event = LogEvent(type='D', user=user, data="{}")
event.save()
return response
@identity_required
@check_group
def get_recommendations(request, milestones, max_recommendations=False):
# Fetch user from session
user = request.session.get('user', None)
# Fetch desired width of the recommendations dashboard
width = request.GET.get("width", 300);
# Get maximum recommendations to be showed
max_recommendations = int(request.GET.get('max', max_recommendations))
# Fetch activities that can be perceived as seen by the user
seen = Activity.objects.filter(
Q(verb=COMPLETED) | Q(verb=PROGRESSED),
value__gte=30,
user=user
)
# Futher filter that list to narrow it down to activities that can be
# perceived as being done by the user.
done = seen.filter(value__gte=80)
# Preprocess the seen and done sets to be used later
seen = set(map(lambda x: hash(x.activity), seen))
done = set(map(lambda x: x.activity, done))
# Init dict containing final recommendations
recommendations = {}
# For every milestone we want to make recommendations for:
for milestone in milestones.split(','):
# Alas this is necessary on some servers
milestone = re.sub(r'http(s?):/([^/])',r'http\1://\2',milestone)
# Make sure the milestone is not already passed
if milestone not in done:
# Fetch list of rules from the context of this milestone.
# Rules contain antecedent => consequent associations with a
# certain amount of confidence and support. The antecedent is
# stored as a hash of the activities in the antecedent. The
# consequent is the activity that is recommended if you did the
# activities in the consequent. At the moment only the trail
# recommendation algorithm is used, which has antecedents of only
# one activity. If this was different, the antecedent hash check
# would have to include creating powersets of certain length.
rules = Recommendation.objects.filter(milestone=milestone)
# For each recommendation rule
for rule in rules:
# If the LHS applies and the RHS is not already done
if rule.antecedent_hash in seen and \
rule.consequent not in done:
# If the consequent was already recommended earlier
if rule.consequent in recommendations:
# Fetch earlier recommendation
earlier_rule = recommendations[rule.consequent]
# Calculate the original total by with the support was
# divided in order to get the confidence of the
# the earlier recommendation
earlier_total = earlier_rule['support']
earlier_total /= float(earlier_rule['confidence'])
total = earlier_total + rule.support/rule.confidence
# Calculate combined values
support = earlier_rule['support'] + rule.support
confidence = support / float(total)
score = f_score(confidence, support, beta=1.5)
# Update the earlier recommendation to combine both
earlier_rule['support'] = support
earlier_rule['confidence'] = confidence
earlier_rule['score'] = score
# If the consequent is recommended for the first time
else:
# Calculate F-score
score = f_score(rule.confidence, rule.support, beta=1.5)
# Store recommendation for this consequent
recommendations[rule.consequent] = {
'milestone': milestone,
'url': rule.consequent,
'id': rand_id(),
'name': rule.name,
'desc': rule.description,
'm_name': rule.m_name,
'confidence': rule.confidence,
'support': rule.support,
'score': score
}
# Convert to a list of recommendations.
# The lookup per consequent is no longer necessary
recommendations = recommendations.values()
# If recommendations were found
if len(recommendations) > 0:
# Normalise score
max_score = max(map(lambda x: x['score'], recommendations))
for recommendation in recommendations:
recommendation['score'] /= max_score
# Sort the recommendations using their f-scores
recommendations.sort(key = lambda x: x['score'], reverse=True)
# Cap the number of recommendations if applicable.
if max_recommendations:
recommendations = recommendations[:max_recommendations]
# Log Recommendations viewed
data = json.dumps({
"recs": map(lambda x: x['url'], recommendations),
"path": request.path,
"milestone_n": len(milestones.split(',')),
"milestones": milestones})
event = LogEvent(type='V', user=user, data=data)
event.save()
# Render the result
return render(request, 'dashboard/recommend.html',
{'recommendations': recommendations,
'context': event.id,
'width' : width,
'host': request.get_host()})
else:
return HttpResponse()
## Background processes
def cache_activities(request):
"""Create a cache of the Learning Record Store by getting all items since
the most recent one in the cache.
"""
# Dynamic interval retrieval settings
INTERVAL = timedelta(days=1)
EPOCH = datetime(2013, 9, 3, 0, 0, 0, 0, pytz.utc)
# Set aggregate to True if events concerning the same activity-person
# should be aggregated into one row. This has impact for recommendations.
aggregate = False
# Find most recent date
try:
# Selecting the the datetime of the latest stored item minus a margin
# of 6 hours. The margin is there to be slightly more resillient to
# variation (read mistakes) in timezone handling and also to cope with
# the situation that an event was stored later than it occured. The
# latter situation is one of the use cases of the Experience API.
# TODO: The 6 hour margin is arbitrary and a hack.
# We should find a better solution for this.
t1 = Activity.objects.latest('time').time - timedelta(hours=6)
except:
t1 = EPOCH
# Get new data
tincan = TinCan(USERNAME, PASSWORD, ENDPOINT)
statements = tincan.dynamicIntervalStatementRetrieval(t1, INTERVAL)
created_statement_count = 0
for statement in statements:
statement_type = statement['object']['definition']['type']
user = statement['actor']['mbox']
activity = statement['object']['id']
verb = statement['verb']['id']
name = statement['object']['definition']['name']['en-US']
description = statement['object']['definition']['description']['en-US']
time = dateutil.parser.parse(statement['timestamp'])
try:
raw_score = statement['result']['score']['raw']
min_score = statement['result']['score']['min']
max_score = statement['result']['score']['max']
value = 100 * (raw_score - min_score) / max_score
except KeyError:
try:
value = 100 * float(statement['result']['extensions'][PROGRESS_T])
except KeyError:
# If no information is given about the end result then assume a
# perfect score was acquired when the activity was completed,
# and no score otherwise.
if verb == COMPLETED:
value = 100
else:
value = 0
if aggregate:
a, created = Activity.objects.get_or_create(user=user,
activity=activity)
# Don't overwrite completed except with other completed events
# and only overwite with more recent timestamp
if created or (time > a.time and
(verb == COMPLETED or a.verb != COMPLETED)):
a.verb = verb
a.type = statement_type
a.value = value
a.name = name
a.description = description
a.time = time
a.save()
created_statement_count += 1
else:
a, created = Activity.objects.get_or_create(user=user,
verb=verb,
activity=activity,
time=time)
if created:
a.verb = verb
a.type = statement_type
a.value = value
a.name = name
a.description = description
a.time = time
a.save()
created_statement_count += 1
data = json.dumps({'t1':t1.isoformat(), 'created':created_statement_count});
event = LogEvent(type='C', user='all', data=data)
event.save()
return HttpResponse()
def generate_recommendations(request):
minsup = int(request.GET.get('minsup', 2))
minconf = int(request.GET.get('minconf', .3))
gamma = int(request.GET.get('gamma', .8))
# Mine recommendations
recommendations, names = recommend(
minsup=minsup,
minconf=minconf,
gamma=gamma
)
# Add recommendations to database
Recommendation.objects.all().delete()
for recommendation in recommendations:
model = Recommendation(
antecedent_hash = hash(recommendation['antecedent']),
confidence = recommendation['confidence'],
support = recommendation['support'],
milestone = recommendation['milestone'],
m_name = names[recommendation['milestone']][0],
name = names[recommendation['consequent']][0],
consequent = recommendation['consequent'],
description = names[recommendation['consequent']][1])
model.save()
event = LogEvent(type='G', user='all', data=json.dumps(recommendations))
event.save()
return HttpResponse(pformat(recommendations))
@identity_required
def track(request, defaulttarget='index.html'):
"""Track user clicks so that we may be able to improve recommendation
relevance in the future.
"""
# Fetch user from session
user = request.session.get('user', None)
# Fetch target URL from GET parameters
target = request.GET.get('target', defaulttarget)
# Fetch context log id from GET paramaters
context = request.GET.get('context', None)
if context is not None:
try:
context = LogEvent.objects.get(pk=int(context))
except LogEvent.DoesNotExist:
context = None
event = LogEvent(type='T', user=user, data=target, context=context)
event.save()
return redirect(fix_url(target, request))
| agpl-3.0 |
mikacousin/olc | src/ascii_load.py | 1 | 25915 | """ASCII file: Load functions"""
import array
from olc.channel_time import ChannelTime
from olc.cue import Cue
from olc.define import MAX_CHANNELS, NB_UNIVERSES, App
from olc.device import Device, Parameter, Template
from olc.group import Group
from olc.independent import Independent
from olc.master import Master
from olc.sequence import Sequence
from olc.step import Step
def get_time(string):
"""String format : [[hours:]minutes:]seconds[.tenths]
Return time in seconds
"""
if ":" in string:
tsplit = string.split(":")
if len(tsplit) == 2:
time = int(tsplit[0]) * 60 + float(tsplit[1])
elif len(tsplit) == 3:
time = int(tsplit[0]) * 3600 + int(tsplit[1]) * 60 + float(tsplit[2])
else:
print("Time format Error")
time = 0
else:
time = float(string)
return time
class AsciiParser:
"""Parse ASCII files"""
def __init__(self):
self.default_time = App().settings.get_double("default-time")
def parse(self, readlines):
"""Parse stream"""
flag_seq = False
in_cue = False
flag_patch = False
flag_master = False
flag_group = False
flag_preset = False
flag_inde = False
flag_template = False
flag_parameter = False
type_seq = "MainPlayback"
playback = False
txt = False
t_in = False
t_out = False
d_in = False
d_out = False
wait = False
channels = False
mem = False
channel_time = {}
template = None
devices = {}
parameters = {}
console = ""
item = ""
for line in readlines:
# Remove not needed endline
line = line.replace("\r", "")
line = line.replace("\n", "")
# Marker for end of file
if line[:7].upper() == "ENDDATA":
break
# Console type
if line[:7].upper() == "CONSOLE":
console = line[8:]
# Clear all
if line[:9].upper() == "CLEAR ALL":
del App().memories[:]
del App().chasers[:]
del App().groups[:]
del App().masters[:]
for page in range(2):
for i in range(20):
App().masters.append(Master(page + 1, i + 1, 0, 0))
App().patch.patch_empty()
App().sequence.__init__(1, text="Main Playback")
del App().sequence.steps[1:]
App().independents.__init__()
# Sequence
if line[:9].upper() == "$SEQUENCE":
p = line[10:].split(" ")
if int(p[0]) < 2 and not playback:
playback = True
type_seq = "MainPlayback"
else:
type_seq = "Chaser"
index_seq = int(p[0])
App().chasers.append(Sequence(index_seq, type_seq=type_seq))
del App().chasers[-1].steps[1:]
flag_seq = True
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_preset = False
flag_template = False
# Chasers
if flag_seq and type_seq == "Chaser":
if line[:4].upper() == "TEXT":
App().chasers[-1].text = line[5:]
if line[:4].upper() == "$CUE":
in_cue = True
channels = array.array("B", [0] * MAX_CHANNELS)
p = line[5:].split(" ")
seq = p[0]
mem = float(p[1])
if in_cue:
if line[:4].upper() == "DOWN":
p = line[5:]
time = p.split(" ")[0]
delay = p.split(" ")[1]
t_out = get_time(time)
if t_out == 0:
t_out = self.default_time
d_out = get_time(delay)
if line[:2].upper() == "UP":
p = line[3:]
time = p.split(" ")[0]
delay = p.split(" ")[1]
t_in = get_time(time)
if t_in == 0:
t_in = self.default_time
d_in = get_time(delay)
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
channels[channel - 1] = level
if line == "":
if not wait:
wait = 0.0
if not txt:
txt = ""
if not t_out:
t_out = 5.0
if not t_in:
t_in = 5.0
cue = Cue(seq, mem, channels, text=txt)
step = Step(
seq,
cue,
time_in=t_in,
time_out=t_out,
delay_out=d_out,
delay_in=d_in,
wait=wait,
text=txt,
)
App().chasers[-1].add_step(step)
in_cue = False
t_out = False
t_in = False
channels = False
# Main Playback
if flag_seq and type_seq == "MainPlayback":
if line[:0] == "!":
flag_seq = False
if line[:3].upper() == "CUE":
in_cue = True
channels = array.array("B", [0] * MAX_CHANNELS)
mem = float(line[4:])
if line[:4].upper() == "$CUE":
in_cue = True
channels = array.array("B", [0] * MAX_CHANNELS)
mem = float(line[5:])
if in_cue:
if line[:4].upper() == "TEXT":
txt = line[5:]
if line[:6].upper() == "$$TEXT" and not txt:
txt = line[7:]
if line[:12].upper() == "$$PRESETTEXT":
txt = line[13:]
if line[:4].upper() == "DOWN":
p = line[5:]
time = p.split(" ")[0]
delay = p.split(" ")[1] if len(p.split(" ")) == 2 else "0"
t_out = get_time(time)
if t_out == 0:
t_out = self.default_time
d_out = get_time(delay)
if line[:2].upper() == "UP":
p = line[3:]
time = p.split(" ")[0]
delay = p.split(" ")[1] if len(p.split(" ")) == 2 else "0"
t_in = get_time(time)
if t_in == 0:
t_in = self.default_time
d_in = get_time(delay)
if line[:6].upper() == "$$WAIT":
time = line[7:].split(" ")[0]
wait = get_time(time)
if line[:11].upper() == "$$PARTTIME ":
p = line[11:]
d = p.split(" ")[0]
if d == ".":
d = 0
delay = float(d)
time_str = p.split(" ")[1]
time = get_time(time_str)
if line[:14].upper() == "$$PARTTIMECHAN":
p = line[15:].split(" ")
# We could have several channels
for chan in p:
if chan.isdigit():
channel_time[int(chan)] = ChannelTime(delay, time)
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
# Ignore channels greater than MAX_CHANNELS
if channel < MAX_CHANNELS:
level = int(r[1][1:], 16)
channels[channel - 1] = level
if line[:5].upper() == "$$AL ":
items = line[5:].split(" ")
channel = int(items[0])
if line[:4].upper() == "$$A ":
items = line[4:].split(" ")
channel = int(items[0])
param_number = int(items[1])
value = int(items[2])
if channel < MAX_CHANNELS:
device_number = abs(App().patch.channels[channel - 1][0][0])
device = App().patch.devices[device_number]
param = device.template.parameters.get(param_number)
high_byte = param.offset.get("High Byte")
low_byte = param.offset.get("Low Byte")
parameters[param_number] = {
"high byte": high_byte,
"low byte": low_byte,
"value": value,
}
devices[channel] = parameters
if line == "":
if not wait:
wait = 0.0
if not txt:
txt = ""
if not t_out:
t_out = 5.0
if not t_in:
t_in = 5.0
if not d_in:
d_in = 0.0
if not d_out:
d_out = 0.0
# Create Cue
cue = Cue(0, mem, channels, text=txt, devices=devices)
# Add cue to the list
App().memories.append(cue)
# Create Step
step = Step(
1,
cue,
time_in=t_in,
time_out=t_out,
delay_in=d_in,
delay_out=d_out,
wait=wait,
channel_time=channel_time,
text=txt,
)
# Add Step to the Sequence
App().sequence.add_step(step)
in_cue = False
txt = False
t_out = False
t_in = False
wait = False
mem = False
channels = False
channel_time = {}
devices = {}
parameters = {}
# Dimmers Patch
if line[:11].upper() == "CLEAR PATCH":
flag_seq = False
flag_patch = True
flag_master = False
flag_group = False
flag_inde = False
flag_preset = False
flag_template = False
App().patch.patch_empty() # Empty patch
App().window.channels_view.flowbox.invalidate_filter()
if flag_patch and line[:0] == "!":
flag_patch = False
if line[:7].upper() == "PATCH 1":
for p in line[8:].split(" "):
q = p.split("<")
if q[0]:
r = q[1].split("@")
channel = int(q[0])
output = int(r[0])
univ = int((output - 1) / 512)
level = int(r[1])
if univ < NB_UNIVERSES:
if channel < MAX_CHANNELS:
out = output - (512 * univ)
App().patch.add_output(channel, out, univ, level)
App().window.channels_view.flowbox.invalidate_filter()
else:
print("More than", MAX_CHANNELS, "channels")
else:
print("More than", NB_UNIVERSES, "universes")
# Parameter Definitions
if line[:9].upper() == "$PARAMDEF":
item = line[10:].split(" ")
number = int(item[0])
group = int(item[1])
name = item[2]
name = ""
for i in range(2, len(item)):
name += item[i] + " "
name = name[:-1]
App().parameters[number] = [group, name]
# Device Template
if flag_template:
if line[:0] == "!":
flag_template = False
if line[:14].upper() == "$$MANUFACTURER":
template.manufacturer = line[15:]
if line[:11].upper() == "$$MODELNAME":
template.model_name = line[12:]
if line[:10].upper() == "$$MODENAME":
template.mode_name = line[11:]
if line[:10].upper() == "$$COLORCAL":
pass
if line[:11].upper() == "$$FOOTPRINT":
template.footprint = int(line[12:])
if line[:11].upper() == "$$PARAMETER":
item = line[12:].split(" ")
param_number = int(item[0])
# param_type = int(item[1])
# param_xfade = int(item[2])
parameter = Parameter(param_number)
flag_parameter = True
if flag_parameter:
if line[:8].upper() == "$$OFFSET":
item = line[9:].split(" ")
parameter.offset = {
"High Byte": int(item[0]),
"Low Byte": int(item[1]),
"Step": int(item[2]),
}
if line[:9].upper() == "$$DEFAULT":
parameter.default = int(line[10:])
if line[:11].upper() == "$$HIGHLIGHT":
parameter.highlight = int(line[12:])
if line[:7].upper() == "$$TABLE":
item = line[8:].split(" ")
start = int(item[0])
stop = int(item[1])
flags = int(item[2])
range_name = ""
for i in range(3, len(item)):
range_name += item[i] + " "
range_name = range_name[:-1]
parameter.table.append([start, stop, flags, range_name])
if line[:8].upper() == "$$RANGE ":
item = line[8:].split(" ")
percent = int(item[2]) == 1
parameter.range = {
"Minimum": int(item[0]),
"Maximum": int(item[1]),
"Percent": percent,
}
if line[:12].upper() == "$$RANGEGROUP":
pass
if line == "":
template.parameters[parameter.number] = parameter
flag_parameter = False
if line[:9].upper() == "$TEMPLATE":
flag_seq = False
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_preset = False
flag_template = True
name = line[10:]
template = Template(name)
App().templates.append(template)
# Devices
if line[:8].upper() == "$DEVICE ":
item = line[8:].split(" ")
channel = int(item[0])
output = int(item[1])
universe = int((output - 1) / 512)
output = output - (512 * universe)
template = ""
for i in range(6, len(item)):
template += item[i] + " "
template = template[:-1]
if channel < MAX_CHANNELS and universe < NB_UNIVERSES:
device = Device(channel, output, universe, template)
App().patch.add_device(device)
# Presets not in sequence
if line[:5].upper() == "GROUP" and console == "CONGO":
# On Congo, Preset not in sequence
flag_seq = False
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_preset = True
flag_template = False
channels = array.array("B", [0] * MAX_CHANNELS)
preset_nb = float(line[6:])
if line[:7].upper() == "$PRESET" and (console in ("DLIGHT", "VLC")):
# On DLight, Preset not in sequence
flag_seq = False
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_template = False
flag_preset = True
channels = array.array("B", [0] * MAX_CHANNELS)
preset_nb = float(line[8:])
if flag_preset:
if line[:1] == "!":
flag_preset = False
if line[:4].upper() == "TEXT":
txt = line[5:]
if line[:6].upper() == "$$TEXT":
txt = line[7:]
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
if channel <= MAX_CHANNELS:
channels[channel - 1] = level
if line == "":
# Find Preset's position
found = False
i = 0
for i, _ in enumerate(App().memories):
if App().memories[i].memory > preset_nb:
found = True
break
if not found:
# Preset is at the end
i += 1
if not txt:
txt = ""
# Create Preset
cue = Cue(0, preset_nb, channels, text=txt)
# Add preset to the list
App().memories.insert(i, cue)
flag_preset = False
txt = ""
# Groups
if line[:5].upper() == "GROUP" and console != "CONGO":
flag_seq = False
flag_patch = False
flag_master = False
flag_preset = False
flag_inde = False
flag_template = False
flag_group = True
channels = array.array("B", [0] * MAX_CHANNELS)
group_nb = float(line[6:])
if line[:6].upper() == "$GROUP":
flag_seq = False
flag_patch = False
flag_master = False
flag_preset = False
flag_inde = False
flag_template = False
flag_group = True
channels = array.array("B", [0] * MAX_CHANNELS)
group_nb = float(line[7:])
if flag_group:
if line[:1] == "!":
flag_group = False
if line[:4].upper() == "TEXT":
txt = line[5:]
if line[:6].upper() == "$$TEXT":
txt = line[7:]
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
if channel <= MAX_CHANNELS:
channels[channel - 1] = level
if line == "":
if not txt:
txt = ""
# We don't create a group who already exist
group_exist = False
for grp in App().groups:
if group_nb == grp.index:
group_exist = True
if not group_exist:
App().groups.append(Group(group_nb, channels, txt))
flag_group = False
txt = ""
# Masters
if flag_master:
if line[:1] == "!":
flag_master = False
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
if channel <= MAX_CHANNELS:
channels[channel - 1] = level
if (line == "" or line[:13].upper() == "$MASTPAGEITEM") and int(
item[1]
) <= 20:
index = int(item[1]) - 1 + ((int(item[0]) - 1) * 20)
App().masters[index] = Master(
int(item[0]), int(item[1]), item[2], channels
)
flag_master = False
if line[:13].upper() == "$MASTPAGEITEM":
item = line[14:].split(" ")
# DLight use Type "2" for Groups
if console == "DLIGHT" and item[2] == "2":
item[2] = "13"
if item[2] == "2":
flag_seq = False
flag_patch = False
flag_group = False
flag_preset = False
flag_inde = False
flag_template = False
flag_master = True
channels = array.array("B", [0] * MAX_CHANNELS)
# Only 20 Masters per pages
elif int(item[1]) <= 20:
index = int(item[1]) - 1 + ((int(item[0]) - 1) * 20)
App().masters[index] = Master(
int(item[0]), int(item[1]), item[2], item[3]
)
# Independents
if line[:16].upper() == "$SPECIALFUNCTION":
flag_seq = False
flag_patch = False
flag_master = False
flag_preset = False
flag_group = False
flag_template = False
flag_inde = True
channels = array.array("B", [0] * MAX_CHANNELS)
text = ""
items = line[17:].split(" ")
number = int(items[0])
# Parameters not implemented:
# ftype = items[1] # 0: inclusive, 1: Inhibit, 2: Exclusive
# button_mode = items[2] # 0: Momentary, 1: Toggling
if flag_inde:
if line[:1] == "!":
flag_inde = False
if line[:4].upper() == "TEXT":
text = line[5:]
if line[:6].upper() == "$$TEXT" and not text:
text = line[7:]
if line[:4].upper() == "CHAN":
chan_list = line[5:].split(" ")
for channel in chan_list:
item = channel.split("/")
if item[0]:
chan = int(item[0])
level = int(item[1][1:], 16)
if chan <= MAX_CHANNELS:
channels[chan - 1] = level
if line == "":
inde = Independent(number, text=text, levels=channels)
App().independents.update(inde)
flag_inde = False
# MIDI mapping
if line[:10].upper() == "$$MIDINOTE":
item = line[11:].split(" ")
App().midi.midi_notes.update({item[0]: [int(item[1]), int(item[2])]})
if line[:8].upper() == "$$MIDICC":
item = line[9:].split(" ")
App().midi.midi_cc.update({item[0]: [int(item[1]), int(item[2])]})
| gpl-3.0 |
agconti/njode | env/lib/python2.7/site-packages/allauth/socialaccount/providers/paypal/provider.py | 68 | 1192 | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class PaypalAccount(ProviderAccount):
def get_avatar_url(self):
return self.account.extra_data.get('picture')
def to_str(self):
return self.account.extra_data.get('name',
super(PaypalAccount, self).to_str())
class PaypalProvider(OAuth2Provider):
id = 'paypal'
name = 'Paypal'
package = 'allauth.socialaccount.providers.paypal'
account_class = PaypalAccount
def get_default_scope(self):
# See: https://developer.paypal.com/docs/integration/direct/identity/attributes/ # noqa
return ['openid', 'email']
def extract_uid(self, data):
return str(data['user_id'])
def extract_common_fields(self, data):
# See: https://developer.paypal.com/docs/api/#get-user-information
return dict(first_name=data.get('given_name', ''),
last_name=data.get('family_name', ''),
email=data.get('email'))
providers.registry.register(PaypalProvider)
| bsd-3-clause |
wisechengyi/pants | tests/python/pants_test/backend/docgen/tasks/test_markdown_to_html_integration.py | 2 | 2372 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from pants.base.build_environment import get_buildroot
from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest
from pants.util.dirutil import safe_open
class MarkdownIntegrationTest(PantsRunIntegrationTest):
def test_markdown_normal(self):
pants_run = self.run_pants(
["markdown", "testprojects/src/java/org/pantsbuild/testproject/page:readme"]
)
self.assert_success(pants_run)
out_path = os.path.join(
get_buildroot(),
"dist",
"markdown/html",
"testprojects/src/java/org/pantsbuild/testproject/page",
"README.html",
)
with safe_open(out_path, "r") as outfile:
page_html = outfile.read()
self.assertIn(
"../../../../../../../examples/src/java/org/pantsbuild/"
"example/hello/main/README.html",
page_html,
"Failed to resolve [[wiki-like]] pants link.",
)
def test_rst_normal(self):
pants_run = self.run_pants(
["markdown", "testprojects/src/java/org/pantsbuild/testproject/page:senserst"]
)
self.assert_success(pants_run)
out_path = os.path.join(
get_buildroot(),
"dist",
"markdown/html",
"testprojects/src/java/org/pantsbuild/testproject/page",
"sense.html",
)
with safe_open(out_path, "r") as outfile:
page_html = outfile.read()
# should get Sense and Sensibility in title (or TITLE, sheesh):
self.assertRegex(page_html, r"(?i).*<title[^>]*>\s*Sense\s+and\s+Sensibility\s*</title")
# should get formatted with h1:
self.assertRegex(
page_html, r"(?i).*<h1[^>]*>\s*They\s+Heard\s+Her\s+With\s+Surprise\s*</h1>"
)
# should get formatted with _something_
self.assertRegex(page_html, r".*>\s*inhabiting\s*</")
self.assertRegex(page_html, r".*>\s*civilly\s*</")
# there should be a link that has href="http://www.calderdale.gov.uk/"
self.assertRegex(page_html, r'.*<a [^>]*href\s*=\s*[\'"]http://www.calderdale')
| apache-2.0 |
mementum/backtrader | samples/vctest/vctest.py | 1 | 15011 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import datetime
# The above could be sent to an independent module
import backtrader as bt
from backtrader.utils import flushfile # win32 quick stdout flushing
from backtrader.utils.py3 import string_types
class TestStrategy(bt.Strategy):
params = dict(
smaperiod=5,
trade=False,
stake=10,
exectype=bt.Order.Market,
stopafter=0,
valid=None,
cancel=0,
donotsell=False,
price=None,
pstoplimit=None,
)
def __init__(self):
# To control operation entries
self.orderid = list()
self.order = None
self.counttostop = 0
self.datastatus = 0
# Create SMA on 2nd data
self.sma = bt.indicators.MovAv.SMA(self.data, period=self.p.smaperiod)
print('--------------------------------------------------')
print('Strategy Created')
print('--------------------------------------------------')
def notify_data(self, data, status, *args, **kwargs):
print('*' * 5, 'DATA NOTIF:', data._getstatusname(status), *args)
if status == data.LIVE:
self.counttostop = self.p.stopafter
self.datastatus = 1
def notify_store(self, msg, *args, **kwargs):
print('*' * 5, 'STORE NOTIF:', msg)
def notify_order(self, order):
if order.status in [order.Completed, order.Cancelled, order.Rejected]:
self.order = None
print('-' * 50, 'ORDER BEGIN', datetime.datetime.now())
print(order)
print('-' * 50, 'ORDER END')
def notify_trade(self, trade):
print('-' * 50, 'TRADE BEGIN', datetime.datetime.now())
print(trade)
print('-' * 50, 'TRADE END')
def prenext(self):
self.next(frompre=True)
def next(self, frompre=False):
txt = list()
txt.append('%04d' % len(self))
dtfmt = '%Y-%m-%dT%H:%M:%S.%f'
txt.append('%s' % self.data.datetime.datetime(0).strftime(dtfmt))
txt.append('{}'.format(self.data.open[0]))
txt.append('{}'.format(self.data.high[0]))
txt.append('{}'.format(self.data.low[0]))
txt.append('{}'.format(self.data.close[0]))
txt.append('{}'.format(self.data.volume[0]))
txt.append('{}'.format(self.data.openinterest[0]))
txt.append('{}'.format(self.sma[0]))
print(', '.join(txt))
if len(self.datas) > 1:
txt = list()
txt.append('%04d' % len(self))
dtfmt = '%Y-%m-%dT%H:%M:%S.%f'
txt.append('%s' % self.data1.datetime.datetime(0).strftime(dtfmt))
txt.append('{}'.format(self.data1.open[0]))
txt.append('{}'.format(self.data1.high[0]))
txt.append('{}'.format(self.data1.low[0]))
txt.append('{}'.format(self.data1.close[0]))
txt.append('{}'.format(self.data1.volume[0]))
txt.append('{}'.format(self.data1.openinterest[0]))
txt.append('{}'.format(float('NaN')))
print(', '.join(txt))
if self.counttostop: # stop after x live lines
self.counttostop -= 1
if not self.counttostop:
self.env.runstop()
return
if not self.p.trade:
return
# if True and len(self.orderid) < 1:
if self.datastatus and not self.position and len(self.orderid) < 1:
self.order = self.buy(size=self.p.stake,
exectype=self.p.exectype,
price=self.p.price,
plimit=self.p.pstoplimit,
valid=self.p.valid)
self.orderid.append(self.order)
elif self.position.size > 0 and not self.p.donotsell:
if self.order is None:
size = self.p.stake // 2
if not size:
size = self.position.size # use the remaining
self.order = self.sell(size=size, exectype=bt.Order.Market)
elif self.order is not None and self.p.cancel:
if self.datastatus > self.p.cancel:
self.cancel(self.order)
if self.datastatus:
self.datastatus += 1
def start(self):
header = ['Datetime', 'Open', 'High', 'Low', 'Close', 'Volume',
'OpenInterest', 'SMA']
print(', '.join(header))
self.done = False
def runstrategy():
args = parse_args()
# Create a cerebro
cerebro = bt.Cerebro()
storekwargs = dict()
if not args.nostore:
vcstore = bt.stores.VCStore(**storekwargs)
if args.broker:
brokerargs = dict(account=args.account, **storekwargs)
if not args.nostore:
broker = vcstore.getbroker(**brokerargs)
else:
broker = bt.brokers.VCBroker(**brokerargs)
cerebro.setbroker(broker)
timeframe = bt.TimeFrame.TFrame(args.timeframe)
if args.resample or args.replay:
datatf = bt.TimeFrame.Ticks
datacomp = 1
else:
datatf = timeframe
datacomp = args.compression
fromdate = None
if args.fromdate:
dtformat = '%Y-%m-%d' + ('T%H:%M:%S' * ('T' in args.fromdate))
fromdate = datetime.datetime.strptime(args.fromdate, dtformat)
todate = None
if args.todate:
dtformat = '%Y-%m-%d' + ('T%H:%M:%S' * ('T' in args.todate))
todate = datetime.datetime.strptime(args.todate, dtformat)
VCDataFactory = vcstore.getdata if not args.nostore else bt.feeds.VCData
datakwargs = dict(
timeframe=datatf, compression=datacomp,
fromdate=fromdate, todate=todate,
historical=args.historical,
qcheck=args.qcheck,
tz=args.timezone
)
if args.nostore and not args.broker: # neither store nor broker
datakwargs.update(storekwargs) # pass the store args over the data
data0 = VCDataFactory(dataname=args.data0, tradename=args.tradename,
**datakwargs)
data1 = None
if args.data1 is not None:
data1 = VCDataFactory(dataname=args.data1, **datakwargs)
rekwargs = dict(
timeframe=timeframe, compression=args.compression,
bar2edge=not args.no_bar2edge,
adjbartime=not args.no_adjbartime,
rightedge=not args.no_rightedge,
)
if args.replay:
cerebro.replaydata(data0, **rekwargs)
if data1 is not None:
cerebro.replaydata(data1, **rekwargs)
elif args.resample:
cerebro.resampledata(data0, **rekwargs)
if data1 is not None:
cerebro.resampledata(data1, **rekwargs)
else:
cerebro.adddata(data0)
if data1 is not None:
cerebro.adddata(data1)
if args.valid is None:
valid = None
else:
try:
valid = float(args.valid)
except:
dtformat = '%Y-%m-%d' + ('T%H:%M:%S' * ('T' in args.valid))
valid = datetime.datetime.strptime(args.valid, dtformat)
else:
valid = datetime.timedelta(seconds=args.valid)
# Add the strategy
cerebro.addstrategy(TestStrategy,
smaperiod=args.smaperiod,
trade=args.trade,
exectype=bt.Order.ExecType(args.exectype),
stake=args.stake,
stopafter=args.stopafter,
valid=valid,
cancel=args.cancel,
donotsell=args.donotsell,
price=args.price,
pstoplimit=args.pstoplimit)
# Live data ... avoid long data accumulation by switching to "exactbars"
cerebro.run(exactbars=args.exactbars)
if args.plot and args.exactbars < 1: # plot if possible
cerebro.plot()
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Test Visual Chart 6 integration')
parser.add_argument('--exactbars', default=1, type=int,
required=False, action='store',
help='exactbars level, use 0/-1/-2 to enable plotting')
parser.add_argument('--plot',
required=False, action='store_true',
help='Plot if possible')
parser.add_argument('--stopafter', default=0, type=int,
required=False, action='store',
help='Stop after x lines of LIVE data')
parser.add_argument('--nostore',
required=False, action='store_true',
help='Do not Use the store pattern')
parser.add_argument('--qcheck', default=0.5, type=float,
required=False, action='store',
help=('Timeout for periodic '
'notification/resampling/replaying check'))
parser.add_argument('--no-timeoffset',
required=False, action='store_true',
help=('Do not Use TWS/System time offset for non '
'timestamped prices and to align resampling'))
parser.add_argument('--data0', default=None,
required=True, action='store',
help='data 0 into the system')
parser.add_argument('--tradename', default=None,
required=False, action='store',
help='Actual Trading Name of the asset')
parser.add_argument('--data1', default=None,
required=False, action='store',
help='data 1 into the system')
parser.add_argument('--timezone', default=None,
required=False, action='store',
help='timezone to get time output into (pytz names)')
parser.add_argument('--historical',
required=False, action='store_true',
help='do only historical download')
parser.add_argument('--fromdate',
required=False, action='store',
help=('Starting date for historical download '
'with format: YYYY-MM-DD[THH:MM:SS]'))
parser.add_argument('--todate',
required=False, action='store',
help=('End date for historical download '
'with format: YYYY-MM-DD[THH:MM:SS]'))
parser.add_argument('--smaperiod', default=5, type=int,
required=False, action='store',
help='Period to apply to the Simple Moving Average')
pgroup = parser.add_mutually_exclusive_group(required=False)
pgroup.add_argument('--replay',
required=False, action='store_true',
help='replay to chosen timeframe')
pgroup.add_argument('--resample',
required=False, action='store_true',
help='resample to chosen timeframe')
parser.add_argument('--timeframe', default=bt.TimeFrame.Names[0],
choices=bt.TimeFrame.Names,
required=False, action='store',
help='TimeFrame for Resample/Replay')
parser.add_argument('--compression', default=1, type=int,
required=False, action='store',
help='Compression for Resample/Replay')
parser.add_argument('--no-bar2edge',
required=False, action='store_true',
help='no bar2edge for resample/replay')
parser.add_argument('--no-adjbartime',
required=False, action='store_true',
help='no adjbartime for resample/replay')
parser.add_argument('--no-rightedge',
required=False, action='store_true',
help='no rightedge for resample/replay')
parser.add_argument('--broker',
required=False, action='store_true',
help='Use VisualChart as broker')
parser.add_argument('--account', default=None,
required=False, action='store',
help='Choose broker account (else first)')
parser.add_argument('--trade',
required=False, action='store_true',
help='Do Sample Buy/Sell operations')
parser.add_argument('--donotsell',
required=False, action='store_true',
help='Do not sell after a buy')
parser.add_argument('--exectype', default=bt.Order.ExecTypes[0],
choices=bt.Order.ExecTypes,
required=False, action='store',
help='Execution to Use when opening position')
parser.add_argument('--price', default=None, type=float,
required=False, action='store',
help='Price in Limit orders or Stop Trigger Price')
parser.add_argument('--pstoplimit', default=None, type=float,
required=False, action='store',
help='Price for the limit in StopLimit')
parser.add_argument('--stake', default=10, type=int,
required=False, action='store',
help='Stake to use in buy operations')
parser.add_argument('--valid', default=None,
required=False, action='store',
help='Seconds or YYYY-MM-DD')
parser.add_argument('--cancel', default=0, type=int,
required=False, action='store',
help=('Cancel a buy order after n bars in operation,'
' to be combined with orders like Limit'))
return parser.parse_args()
if __name__ == '__main__':
runstrategy()
| gpl-3.0 |
sxhao/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checksvnconfigfile.py | 135 | 2718 | # Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This file is used by:
# webkitpy/tool/steps/addsvnmimetypeforpng.py
# webkitpy/style/checkers/png.py
import os
import re
def check(host, fs):
"""
check the svn config file
return with three logical value:
is svn config file missing, is auto-props missing, is the svn:mime-type for png missing
"""
cfg_file_path = config_file_path(host, fs)
try:
config_file = fs.read_text_file(cfg_file_path)
except IOError:
return (True, True, True)
errorcode_autoprop = not re.search("^\s*enable-auto-props\s*=\s*yes", config_file, re.MULTILINE)
errorcode_png = not re.search("^\s*\*\.png\s*=\s*svn:mime-type=image/png", config_file, re.MULTILINE)
return (False, errorcode_autoprop, errorcode_png)
def config_file_path(host, fs):
if host.platform.is_win():
config_file_path = fs.join(os.environ['APPDATA'], "Subversion", "config")
else:
config_file_path = fs.join(fs.expanduser("~"), ".subversion", "config")
return config_file_path
def errorstr_autoprop(config_file_path):
return 'Have to enable auto props in the subversion config file (%s "enable-auto-props = yes"). ' % config_file_path
def errorstr_png(config_file_path):
return 'Have to set the svn:mime-type in the subversion config file (%s "*.png = svn:mime-type=image/png").' % config_file_path
| bsd-3-clause |
GrimDerp/thrust | performance/build/testsuite.py | 28 | 2505 | """functions that generate reports and figures using the .xml output from the performance tests"""
__all__ = ['TestSuite', 'parse_testsuite_xml']
class TestSuite:
def __init__(self, name, platform, tests):
self.name = name
self.platform = platform
self.tests = tests
def __repr__(self):
import pprint
return 'TestSuite' + pprint.pformat( (self.name, self.platform, self.tests) )
class Test:
def __init__(self, name, variables, results):
self.name = name
self.variables = variables
self.results = results
def __repr__(self):
return 'Test' + repr( (self.name, self.variables, self.results) )
def scalar_element(element):
value = element.get('value')
try:
return int(value)
except:
try:
return float(value)
except:
return value
def parse_testsuite_platform(et):
testsuite_platform = {}
platform_element = et.find('platform')
device_element = platform_element.find('device')
device = {}
device['name'] = device_element.get('name')
for property_element in device_element.findall('property'):
device[property_element.get('name')] = scalar_element(property_element)
testsuite_platform['device'] = device
return testsuite_platform
def parse_testsuite_tests(et):
testsuite_tests = {}
for test_element in et.findall('test'):
# test name
test_name = test_element.get('name')
# test variables: name -> value
test_variables = {}
for variable_element in test_element.findall('variable'):
test_variables[variable_element.get('name')] = scalar_element(variable_element)
# test results: name -> (value, units)
test_results = {}
for result_element in test_element.findall('result'):
# TODO make this a thing that can be converted to its first element when treated like a number
test_results[result_element.get('name')] = scalar_element(result_element)
testsuite_tests[test_name] = Test(test_name, test_variables, test_results)
return testsuite_tests
def parse_testsuite_xml(filename):
import xml.etree.ElementTree as ET
et = ET.parse(filename)
testsuite_name = et.getroot().get('name')
testsuite_platform = parse_testsuite_platform(et)
testsuite_tests = parse_testsuite_tests(et)
return TestSuite(testsuite_name, testsuite_platform, testsuite_tests)
| apache-2.0 |
potatolondon/potato-mapreduce | test/mapreduce/combiner_test.py | 15 | 5268 | #!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
# Using opensource naming conventions, pylint: disable=g-bad-name
import unittest
import pipeline
import cloudstorage
from google.appengine.ext import db
# pylint: disable=g-direct-third-party-import
from mapreduce import input_readers
from mapreduce import mapreduce_pipeline
from mapreduce import operation
from mapreduce import output_writers
from mapreduce import shuffler
from mapreduce import test_support
from testlib import testutil
class TestEntity(db.Model):
"""Test entity class."""
data = db.TextProperty()
def test_combiner_map(entity):
"""Tests map handler for use with the Combiner test."""
yield str(int(entity.data) % 4), entity.data
class TestCombiner(object):
"""Test combine handler."""
invocations = []
def __call__(self, key, values, combiner_values):
self.invocations.append((key, values, combiner_values))
value_ints = [int(x) for x in values]
combiner_values_int = [int(x) for x in combiner_values]
yield sum(value_ints + combiner_values_int)
yield operation.counters.Increment("combiner-call")
@classmethod
def reset(cls):
cls.invocations = []
def test_combiner_reduce(key, values):
yield repr((key, sum([int(x) for x in values]))) + "\n"
class CombinerTest(testutil.HandlerTestBase):
"""Tests for combiners."""
def setUp(self):
testutil.HandlerTestBase.setUp(self)
pipeline.Pipeline._send_mail = self._send_mail
self.emails = []
self.old_max_values_count = shuffler._MergePipeline._MAX_VALUES_COUNT
shuffler._MergePipeline._MAX_VALUES_COUNT = 1
TestCombiner.reset()
def tearDown(self):
shuffler._MergePipeline._MAX_VALUES_COUNT = self.old_max_values_count
testutil.HandlerTestBase.tearDown(self)
def _send_mail(self, sender, subject, body, html=None):
"""Callback function for sending mail."""
self.emails.append((sender, subject, body, html))
def testNoCombiner(self):
"""Test running with low values count but without combiner."""
# Even though this test doesn't have combiner specified, it's still
# interesting to run. It forces MergePipeline to produce partial
# key values and we verify that they are combined correctly in reader.
# Prepare test data
entity_count = 200
for i in range(entity_count):
TestEntity(data=str(i)).put()
TestEntity(data=str(i)).put()
p = mapreduce_pipeline.MapreducePipeline(
"test",
__name__ + ".test_combiner_map",
__name__ + ".test_combiner_reduce",
input_reader_spec=input_readers.__name__ + ".DatastoreInputReader",
output_writer_spec=
output_writers.__name__ + ".GoogleCloudStorageOutputWriter",
mapper_params={
"entity_kind": __name__ + ".TestEntity",
},
reducer_params={
"output_writer": {
"bucket_name": "testbucket"
},
},
shards=4)
p.start()
test_support.execute_until_empty(self.taskqueue)
p = mapreduce_pipeline.MapreducePipeline.from_id(p.pipeline_id)
self.assertEquals(4, len(p.outputs.default.value))
file_content = []
for input_file in p.outputs.default.value:
with cloudstorage.open(input_file) as infile:
for line in infile:
file_content.append(line.strip())
file_content = sorted(file_content)
self.assertEquals(
["('0', 9800)", "('1', 9900)", "('2', 10000)", "('3', 10100)"],
file_content)
def testCombiner(self):
"""Test running with low values count but with combiner."""
# Prepare test data
entity_count = 200
for i in range(entity_count):
TestEntity(data=str(i)).put()
TestEntity(data=str(i)).put()
p = mapreduce_pipeline.MapreducePipeline(
"test",
__name__ + ".test_combiner_map",
__name__ + ".test_combiner_reduce",
combiner_spec=__name__ + ".TestCombiner",
input_reader_spec=input_readers.__name__ + ".DatastoreInputReader",
output_writer_spec=
output_writers.__name__ + ".GoogleCloudStorageOutputWriter",
mapper_params={
"entity_kind": __name__ + ".TestEntity",
},
reducer_params={
"output_writer": {
"bucket_name": "testbucket"
},
},
shards=4)
p.start()
test_support.execute_until_empty(self.taskqueue)
p = mapreduce_pipeline.MapreducePipeline.from_id(p.pipeline_id)
self.assertEquals(4, len(p.outputs.default.value))
file_content = []
for input_file in p.outputs.default.value:
with cloudstorage.open(input_file) as infile:
for line in infile:
file_content.append(line.strip())
file_content = sorted(file_content)
self.assertEquals(
["('0', 9800)", "('1', 9900)", "('2', 10000)", "('3', 10100)"],
file_content)
self.assertTrue(TestCombiner.invocations)
for invocation in TestCombiner.invocations:
key = invocation[0]
values = invocation[1]
self.assertTrue(key)
self.assertTrue(values)
self.assertEquals(1, len(values))
self.assertTrue(int(values[0]) % 4 == int(key))
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
dansanderson/picotool | tests/pico8/music/music_test.py | 1 | 3789 | #!/usr/bin/env python3
import unittest
from unittest.mock import Mock
from unittest.mock import patch
from pico8.music import music
VALID_MUSIC_LINES = [b'00 41424344\n'] * 64
class TestMusic(unittest.TestCase):
def testFromLines(self):
m = music.Music.from_lines(VALID_MUSIC_LINES, 4)
self.assertEqual(b'\x41\x42\x43\x44' * 64, m._data)
self.assertEqual(4, m._version)
def testToLines(self):
m = music.Music.from_lines(VALID_MUSIC_LINES, 4)
self.assertEqual(list(m.to_lines()), VALID_MUSIC_LINES)
def testSetChannel(self):
m = music.Music.empty(version=4)
m.set_channel(0, 0, 0)
self.assertEqual(b'\x00\x42\x43\x44', m._data[0:4])
m.set_channel(0, 1, 1)
self.assertEqual(b'\x00\x01\x43\x44', m._data[0:4])
m.set_channel(0, 2, 2)
self.assertEqual(b'\x00\x01\x02\x44', m._data[0:4])
m.set_channel(0, 3, 3)
self.assertEqual(b'\x00\x01\x02\x03', m._data[0:4])
m.set_channel(1, 0, 0)
self.assertEqual(b'\x00\x42\x43\x44', m._data[4:8])
m.set_channel(0, 0, None)
m.set_channel(0, 1, None)
m.set_channel(0, 2, None)
m.set_channel(0, 3, None)
self.assertEqual(b'\x41\x42\x43\x44', m._data[0:4])
def testGetChannel(self):
m = music.Music.empty(version=4)
self.assertIsNone(m.get_channel(0, 0))
m.set_channel(0, 0, 0)
self.assertEqual(0, m.get_channel(0, 0))
self.assertIsNone(m.get_channel(0, 1))
m.set_channel(0, 1, 1)
self.assertEqual(1, m.get_channel(0, 1))
self.assertIsNone(m.get_channel(0, 2))
m.set_channel(0, 2, 2)
self.assertEqual(2, m.get_channel(0, 2))
self.assertIsNone(m.get_channel(0, 3))
m.set_channel(0, 3, 3)
self.assertEqual(3, m.get_channel(0, 3))
self.assertIsNone(m.get_channel(1, 0))
m.set_channel(1, 0, 0)
self.assertEqual(0, m.get_channel(1, 0))
def testSetProperties(self):
m = music.Music.empty(version=4)
m.set_channel(0, 0, 0)
m.set_channel(0, 1, 1)
m.set_channel(0, 2, 2)
m.set_channel(0, 3, 3)
self.assertEqual(b'\x00\x01\x02\x03', m._data[0:4])
m.set_properties(0)
self.assertEqual(b'\x00\x01\x02\x03', m._data[0:4])
m.set_properties(0, begin=True)
self.assertEqual(b'\x80\x01\x02\x03', m._data[0:4])
m.set_properties(0, end=True)
self.assertEqual(b'\x80\x81\x02\x03', m._data[0:4])
m.set_properties(0, stop=True)
self.assertEqual(b'\x80\x81\x82\x03', m._data[0:4])
m.set_properties(0, begin=False, stop=False)
self.assertEqual(b'\x00\x81\x02\x03', m._data[0:4])
m.set_properties(0, begin=True, end=False)
self.assertEqual(b'\x80\x01\x02\x03', m._data[0:4])
m.set_channel(1, 0, 0)
m.set_channel(1, 1, 1)
m.set_channel(1, 2, 2)
m.set_channel(1, 3, 3)
self.assertEqual(b'\x00\x01\x02\x03', m._data[4:8])
m.set_properties(1, begin=True)
self.assertEqual(b'\x80\x01\x02\x03', m._data[4:8])
def testGetProperties(self):
m = music.Music.empty(version=4)
self.assertEqual((False, False, False), m.get_properties(0))
m.set_properties(0, begin=True)
self.assertEqual((True, False, False), m.get_properties(0))
m.set_properties(0, end=True)
self.assertEqual((True, True, False), m.get_properties(0))
m.set_properties(0, stop=True)
self.assertEqual((True, True, True), m.get_properties(0))
m.set_properties(0, begin=False, stop=False)
self.assertEqual((False, True, False), m.get_properties(0))
if __name__ == '__main__':
unittest.main()
| mit |
vivisect/synapse | synapse/lib/trigger.py | 1 | 1918 | import logging
import synapse.lib.cache as s_cache
logger = logging.getLogger(__name__)
class Triggers:
def __init__(self):
self._trig_list = []
self._trig_match = s_cache.MatchCache()
self._trig_byname = s_cache.Cache(onmiss=self._onTrigNameMiss)
def clear(self):
'''
Clear all previously registered triggers
'''
self._trig_list = []
self._trig_byname.clear()
def add(self, func, perm):
'''
Add a new callback to the triggers.
Args:
func (function): The function to call
perm (str,dict): The permission tufo
Returns:
(None)
'''
self._trig_list.append((perm, func))
self._trig_byname.clear()
def _onTrigNameMiss(self, name):
retn = []
for perm, func in self._trig_list:
if self._trig_match.match(name, perm[0]):
retn.append((perm, func))
return retn
def _cmpperm(self, perm, must):
for prop, match in must[1].items():
valu = perm[1].get(prop)
if valu is None:
return False
if not self._trig_match.match(valu, match):
return False
return True
def trigger(self, perm, *args, **kwargs):
'''
Fire any matching trigger functions for the given perm.
Args:
perm ((str,dict)): The perm tufo to trigger
*args (list): args list to use calling the trigger function
**kwargs (dict): kwargs dict to use calling the trigger function
Returns:
(None)
'''
for must, func in self._trig_byname.get(perm[0]):
if self._cmpperm(perm, must):
try:
func(*args, **kwargs)
except Exception as e:
logger.exception(e)
| apache-2.0 |
joelddiaz/openshift-tools | openshift/installer/vendored/openshift-ansible-3.2.13/filter_plugins/oo_filters.py | 9 | 38540 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
"""
Custom filters for use in openshift-ansible
"""
from ansible import errors
from collections import Mapping
from distutils.version import LooseVersion
from operator import itemgetter
import OpenSSL.crypto
import os
import pdb
import pkg_resources
import re
import json
import yaml
from ansible.utils.unicode import to_unicode
# Disabling too-many-public-methods, since filter methods are necessarily
# public
# pylint: disable=too-many-public-methods
class FilterModule(object):
""" Custom ansible filters """
@staticmethod
def oo_pdb(arg):
""" This pops you into a pdb instance where arg is the data passed in
from the filter.
Ex: "{{ hostvars | oo_pdb }}"
"""
pdb.set_trace()
return arg
@staticmethod
def get_attr(data, attribute=None):
""" This looks up dictionary attributes of the form a.b.c and returns
the value.
Ex: data = {'a': {'b': {'c': 5}}}
attribute = "a.b.c"
returns 5
"""
if not attribute:
raise errors.AnsibleFilterError("|failed expects attribute to be set")
ptr = data
for attr in attribute.split('.'):
ptr = ptr[attr]
return ptr
@staticmethod
def oo_flatten(data):
""" This filter plugin will flatten a list of lists
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects to flatten a List")
return [item for sublist in data for item in sublist]
@staticmethod
def oo_merge_dicts(first_dict, second_dict):
""" Merge two dictionaries where second_dict values take precedence.
Ex: first_dict={'a': 1, 'b': 2}
second_dict={'b': 3, 'c': 4}
returns {'a': 1, 'b': 3, 'c': 4}
"""
if not isinstance(first_dict, dict) or not isinstance(second_dict, dict):
raise errors.AnsibleFilterError("|failed expects to merge two dicts")
merged = first_dict.copy()
merged.update(second_dict)
return merged
@staticmethod
def oo_merge_hostvars(hostvars, variables, inventory_hostname):
""" Merge host and play variables.
When ansible version is greater than or equal to 2.0.0,
merge hostvars[inventory_hostname] with variables (ansible vars)
otherwise merge hostvars with hostvars['inventory_hostname'].
Ex: hostvars={'master1.example.com': {'openshift_variable': '3'},
'openshift_other_variable': '7'}
variables={'openshift_other_variable': '6'}
inventory_hostname='master1.example.com'
returns {'openshift_variable': '3', 'openshift_other_variable': '7'}
hostvars=<ansible.vars.hostvars.HostVars object> (Mapping)
variables={'openshift_other_variable': '6'}
inventory_hostname='master1.example.com'
returns {'openshift_variable': '3', 'openshift_other_variable': '6'}
"""
if not isinstance(hostvars, Mapping):
raise errors.AnsibleFilterError("|failed expects hostvars is dictionary or object")
if not isinstance(variables, dict):
raise errors.AnsibleFilterError("|failed expects variables is a dictionary")
if not isinstance(inventory_hostname, basestring):
raise errors.AnsibleFilterError("|failed expects inventory_hostname is a string")
# pylint: disable=no-member
ansible_version = pkg_resources.get_distribution("ansible").version
merged_hostvars = {}
if LooseVersion(ansible_version) >= LooseVersion('2.0.0'):
merged_hostvars = FilterModule.oo_merge_dicts(hostvars[inventory_hostname],
variables)
else:
merged_hostvars = FilterModule.oo_merge_dicts(hostvars[inventory_hostname],
hostvars)
return merged_hostvars
@staticmethod
def oo_collect(data, attribute=None, filters=None):
""" This takes a list of dict and collects all attributes specified into a
list. If filter is specified then we will include all items that
match _ALL_ of filters. If a dict entry is missing the key in a
filter it will be excluded from the match.
Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
{'a':2, 'z': 'z'}, # True, return
{'a':3, 'z': 'z'}, # True, return
{'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
]
attribute = 'a'
filters = {'z': 'z'}
returns [1, 2, 3]
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects to filter on a List")
if not attribute:
raise errors.AnsibleFilterError("|failed expects attribute to be set")
if filters is not None:
if not isinstance(filters, dict):
raise errors.AnsibleFilterError("|failed expects filter to be a"
" dict")
retval = [FilterModule.get_attr(d, attribute) for d in data if (
all([d.get(key, None) == filters[key] for key in filters]))]
else:
retval = [FilterModule.get_attr(d, attribute) for d in data]
return retval
@staticmethod
def oo_select_keys_from_list(data, keys):
""" This returns a list, which contains the value portions for the keys
Ex: data = { 'a':1, 'b':2, 'c':3 }
keys = ['a', 'c']
returns [1, 3]
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects to filter on a list")
if not isinstance(keys, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
# Gather up the values for the list of keys passed in
retval = [FilterModule.oo_select_keys(item, keys) for item in data]
return FilterModule.oo_flatten(retval)
@staticmethod
def oo_select_keys(data, keys):
""" This returns a list, which contains the value portions for the keys
Ex: data = { 'a':1, 'b':2, 'c':3 }
keys = ['a', 'c']
returns [1, 3]
"""
if not isinstance(data, Mapping):
raise errors.AnsibleFilterError("|failed expects to filter on a dict or object")
if not isinstance(keys, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
# Gather up the values for the list of keys passed in
retval = [data[key] for key in keys if key in data]
return retval
@staticmethod
def oo_prepend_strings_in_list(data, prepend):
""" This takes a list of strings and prepends a string to each item in the
list
Ex: data = ['cart', 'tree']
prepend = 'apple-'
returns ['apple-cart', 'apple-tree']
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
if not all(isinstance(x, basestring) for x in data):
raise errors.AnsibleFilterError("|failed expects first param is a list"
" of strings")
retval = [prepend + s for s in data]
return retval
@staticmethod
def oo_combine_key_value(data, joiner='='):
"""Take a list of dict in the form of { 'key': 'value'} and
arrange them as a list of strings ['key=value']
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
rval = []
for item in data:
rval.append("%s%s%s" % (item['key'], joiner, item['value']))
return rval
@staticmethod
def oo_combine_dict(data, in_joiner='=', out_joiner=' '):
"""Take a dict in the form of { 'key': 'value', 'key': 'value' } and
arrange them as a string 'key=value key=value'
"""
if not isinstance(data, dict):
raise errors.AnsibleFilterError("|failed expects first param is a dict")
return out_joiner.join([in_joiner.join([k, v]) for k, v in data.items()])
@staticmethod
def oo_ami_selector(data, image_name):
""" This takes a list of amis and an image name and attempts to return
the latest ami.
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
if not data:
return None
else:
if image_name is None or not image_name.endswith('_*'):
ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
return ami['ami_id']
else:
ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
return ami['ami_id']
@staticmethod
def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
""" This takes a dictionary of volume definitions and returns a valid ec2
volume definition based on the host_type and the values in the
dictionary.
The dictionary should look similar to this:
{ 'master':
{ 'root':
{ 'volume_size': 10, 'device_type': 'gp2',
'iops': 500
},
'docker':
{ 'volume_size': 40, 'device_type': 'gp2',
'iops': 500, 'ephemeral': 'true'
}
},
'node':
{ 'root':
{ 'volume_size': 10, 'device_type': 'io1',
'iops': 1000
},
'docker':
{ 'volume_size': 40, 'device_type': 'gp2',
'iops': 500, 'ephemeral': 'true'
}
}
}
"""
if not isinstance(data, dict):
raise errors.AnsibleFilterError("|failed expects first param is a dict")
if host_type not in ['master', 'node', 'etcd']:
raise errors.AnsibleFilterError("|failed expects etcd, master or node"
" as the host type")
root_vol = data[host_type]['root']
root_vol['device_name'] = '/dev/sda1'
root_vol['delete_on_termination'] = True
if root_vol['device_type'] != 'io1':
root_vol.pop('iops', None)
if host_type in ['master', 'node'] and 'docker' in data[host_type]:
docker_vol = data[host_type]['docker']
docker_vol['device_name'] = '/dev/xvdb'
docker_vol['delete_on_termination'] = True
if docker_vol['device_type'] != 'io1':
docker_vol.pop('iops', None)
if docker_ephemeral:
docker_vol.pop('device_type', None)
docker_vol.pop('delete_on_termination', None)
docker_vol['ephemeral'] = 'ephemeral0'
return [root_vol, docker_vol]
elif host_type == 'etcd' and 'etcd' in data[host_type]:
etcd_vol = data[host_type]['etcd']
etcd_vol['device_name'] = '/dev/xvdb'
etcd_vol['delete_on_termination'] = True
if etcd_vol['device_type'] != 'io1':
etcd_vol.pop('iops', None)
return [root_vol, etcd_vol]
return [root_vol]
@staticmethod
def oo_split(string, separator=','):
""" This splits the input string into a list. If the input string is
already a list we will return it as is.
"""
if isinstance(string, list):
return string
return string.split(separator)
@staticmethod
def oo_haproxy_backend_masters(hosts, port):
""" This takes an array of dicts and returns an array of dicts
to be used as a backend for the haproxy role
"""
servers = []
for idx, host_info in enumerate(hosts):
server = dict(name="master%s" % idx)
server_ip = host_info['openshift']['common']['ip']
server['address'] = "%s:%s" % (server_ip, port)
server['opts'] = 'check'
servers.append(server)
return servers
@staticmethod
def oo_filter_list(data, filter_attr=None):
""" This returns a list, which contains all items where filter_attr
evaluates to true
Ex: data = [ { a: 1, b: True },
{ a: 3, b: False },
{ a: 5, b: True } ]
filter_attr = 'b'
returns [ { a: 1, b: True },
{ a: 5, b: True } ]
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects to filter on a list")
if not isinstance(filter_attr, basestring):
raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode")
# Gather up the values for the list of keys passed in
return [x for x in data if filter_attr in x and x[filter_attr]]
@staticmethod
def oo_nodes_with_label(nodes, label, value=None):
""" Filters a list of nodes by label and value (if provided)
It handles labels that are in the following variables by priority:
openshift_node_labels, cli_openshift_node_labels, openshift['node']['labels']
Examples:
data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
'c': {'openshift_node_labels': {'size': 'S'}}]
label = 'color'
returns = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}]
data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
'c': {'openshift_node_labels': {'size': 'S'}}]
label = 'color'
value = 'green'
returns = ['b': {'labels': {'color': 'green', 'size': 'L'}}]
Args:
nodes (list[dict]): list of node to node variables
label (str): label to filter `nodes` by
value (Optional[str]): value of `label` to filter by Defaults
to None.
Returns:
list[dict]: nodes filtered by label and value (if provided)
"""
if not isinstance(nodes, list):
raise errors.AnsibleFilterError("failed expects to filter on a list")
if not isinstance(label, basestring):
raise errors.AnsibleFilterError("failed expects label to be a string")
if value is not None and not isinstance(value, basestring):
raise errors.AnsibleFilterError("failed expects value to be a string")
def label_filter(node):
""" filter function for testing if node should be returned """
if not isinstance(node, dict):
raise errors.AnsibleFilterError("failed expects to filter on a list of dicts")
if 'openshift_node_labels' in node:
labels = node['openshift_node_labels']
elif 'cli_openshift_node_labels' in node:
labels = node['cli_openshift_node_labels']
elif 'openshift' in node and 'node' in node['openshift'] and 'labels' in node['openshift']['node']:
labels = node['openshift']['node']['labels']
else:
return False
if isinstance(labels, basestring):
labels = yaml.safe_load(labels)
if not isinstance(labels, dict):
raise errors.AnsibleFilterError(
"failed expected node labels to be a dict or serializable to a dict"
)
return label in labels and (value is None or labels[label] == value)
return [n for n in nodes if label_filter(n)]
@staticmethod
def oo_parse_heat_stack_outputs(data):
""" Formats the HEAT stack output into a usable form
The goal is to transform something like this:
+---------------+-------------------------------------------------+
| Property | Value |
+---------------+-------------------------------------------------+
| capabilities | [] | |
| creation_time | 2015-06-26T12:26:26Z | |
| description | OpenShift cluster | |
| … | … |
| outputs | [ |
| | { |
| | "output_value": "value_A" |
| | "description": "This is the value of Key_A" |
| | "output_key": "Key_A" |
| | }, |
| | { |
| | "output_value": [ |
| | "value_B1", |
| | "value_B2" |
| | ], |
| | "description": "This is the value of Key_B" |
| | "output_key": "Key_B" |
| | }, |
| | ] |
| parameters | { |
| … | … |
+---------------+-------------------------------------------------+
into something like this:
{
"Key_A": "value_A",
"Key_B": [
"value_B1",
"value_B2"
]
}
"""
# Extract the “outputs” JSON snippet from the pretty-printed array
in_outputs = False
outputs = ''
line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|')
for line in data['stdout_lines']:
match = line_regex.match(line)
if match:
if match.group(1) == 'outputs':
in_outputs = True
elif match.group(1) != '':
in_outputs = False
if in_outputs:
outputs += match.group(2)
outputs = json.loads(outputs)
# Revamp the “outputs” to put it in the form of a “Key: value” map
revamped_outputs = {}
for output in outputs:
revamped_outputs[output['output_key']] = output['output_value']
return revamped_outputs
@staticmethod
# pylint: disable=too-many-branches
def oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames):
""" Parses names from list of certificate hashes.
Ex: certificates = [{ "certfile": "/root/custom1.crt",
"keyfile": "/root/custom1.key" },
{ "certfile": "custom2.crt",
"keyfile": "custom2.key" }]
returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt",
"keyfile": "/etc/origin/master/named_certificates/custom1.key",
"names": [ "public-master-host.com",
"other-master-host.com" ] },
{ "certfile": "/etc/origin/master/named_certificates/custom2.crt",
"keyfile": "/etc/origin/master/named_certificates/custom2.key",
"names": [ "some-hostname.com" ] }]
"""
if not isinstance(named_certs_dir, basestring):
raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode")
if not isinstance(internal_hostnames, list):
raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
for certificate in certificates:
if 'names' in certificate.keys():
continue
else:
certificate['names'] = []
if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
(certificate['certfile'], certificate['keyfile']))
try:
st_cert = open(certificate['certfile'], 'rt').read()
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
certificate['names'].append(str(cert.get_subject().commonName.decode()))
for i in range(cert.get_extension_count()):
if cert.get_extension(i).get_short_name() == 'subjectAltName':
for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
certificate['names'].append(name)
except:
raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
"please specify certificate names in host inventory"))
certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
certificate['names'] = list(set(certificate['names']))
if not certificate['names']:
raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
"detected a collision with internal hostname, please specify " +
"certificate names in host inventory"))
for certificate in certificates:
# Update paths for configuration
certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile']))
certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile']))
return certificates
@staticmethod
def oo_pretty_print_cluster(data):
""" Read a subset of hostvars and build a summary of the cluster
in the following layout:
"c_id": {
"master": {
"default": [
{ "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1" }
]
"node": {
"infra": [
{ "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2" }
],
"compute": [
{ "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3" },
...
]
}
"""
def _get_tag_value(tags, key):
""" Extract values of a map implemented as a set.
Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' }
key = 'bar'
returns 'value2'
"""
for tag in tags:
if tag[:len(key)+4] == 'tag_' + key:
return tag[len(key)+5:]
raise KeyError(key)
def _add_host(clusters,
clusterid,
host_type,
sub_host_type,
host):
""" Add a new host in the clusters data structure """
if clusterid not in clusters:
clusters[clusterid] = {}
if host_type not in clusters[clusterid]:
clusters[clusterid][host_type] = {}
if sub_host_type not in clusters[clusterid][host_type]:
clusters[clusterid][host_type][sub_host_type] = []
clusters[clusterid][host_type][sub_host_type].append(host)
clusters = {}
for host in data:
try:
_add_host(clusters=clusters,
clusterid=_get_tag_value(host['group_names'], 'clusterid'),
host_type=_get_tag_value(host['group_names'], 'host-type'),
sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
host={'name': host['inventory_hostname'],
'public IP': host['ansible_ssh_host'],
'private IP': host['ansible_default_ipv4']['address']})
except KeyError:
pass
return clusters
@staticmethod
def oo_generate_secret(num_bytes):
""" generate a session secret """
if not isinstance(num_bytes, int):
raise errors.AnsibleFilterError("|failed expects num_bytes is int")
secret = os.urandom(num_bytes)
return secret.encode('base-64').strip()
@staticmethod
def to_padded_yaml(data, level=0, indent=2, **kw):
""" returns a yaml snippet padded to match the indent level you specify """
if data in [None, ""]:
return ""
try:
transformed = yaml.safe_dump(data, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
return to_unicode("\n{0}".format(padded))
except Exception as my_e:
raise errors.AnsibleFilterError('Failed to convert: %s', my_e)
@staticmethod
def oo_openshift_env(hostvars):
''' Return facts which begin with "openshift_" and translate
legacy facts to their openshift_env counterparts.
Ex: hostvars = {'openshift_fact': 42,
'theyre_taking_the_hobbits_to': 'isengard'}
returns = {'openshift_fact': 42}
'''
if not issubclass(type(hostvars), dict):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
facts = {}
regex = re.compile('^openshift_.*')
for key in hostvars:
if regex.match(key):
facts[key] = hostvars[key]
migrations = {'openshift_router_selector': 'openshift_hosted_router_selector',
'openshift_registry_selector': 'openshift_hosted_registry_selector'}
for old_fact, new_fact in migrations.iteritems():
if old_fact in facts and new_fact not in facts:
facts[new_fact] = facts[old_fact]
return facts
@staticmethod
# pylint: disable=too-many-branches
def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
""" Generate list of persistent volumes based on oo_openshift_env
storage options set in host variables.
"""
if not issubclass(type(hostvars), dict):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
if not issubclass(type(groups), dict):
raise errors.AnsibleFilterError("|failed expects groups is a dict")
if persistent_volumes != None and not issubclass(type(persistent_volumes), list):
raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list")
if persistent_volumes == None:
persistent_volumes = []
if 'hosted' in hostvars['openshift']:
for component in hostvars['openshift']['hosted']:
if 'storage' in hostvars['openshift']['hosted'][component]:
params = hostvars['openshift']['hosted'][component]['storage']
kind = params['kind']
create_pv = params['create_pv']
if kind != None and create_pv:
if kind == 'nfs':
host = params['host']
if host == None:
if len(groups['oo_nfs_to_config']) > 0:
host = groups['oo_nfs_to_config'][0]
else:
raise errors.AnsibleFilterError("|failed no storage host detected")
directory = params['nfs']['directory']
volume = params['volume']['name']
path = directory + '/' + volume
size = params['volume']['size']
access_modes = params['access_modes']
persistent_volume = dict(
name="{0}-volume".format(volume),
capacity=size,
access_modes=access_modes,
storage=dict(
nfs=dict(
server=host,
path=path)))
persistent_volumes.append(persistent_volume)
elif kind == 'openstack':
volume = params['volume']['name']
size = params['volume']['size']
access_modes = params['access_modes']
filesystem = params['openstack']['filesystem']
volume_id = params['openstack']['volumeID']
persistent_volume = dict(
name="{0}-volume".format(volume),
capacity=size,
access_modes=access_modes,
storage=dict(
cinder=dict(
fsType=filesystem,
volumeID=volume_id)))
persistent_volumes.append(persistent_volume)
elif kind != 'object':
msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
kind,
component)
raise errors.AnsibleFilterError(msg)
return persistent_volumes
@staticmethod
def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
""" Generate list of persistent volume claims based on oo_openshift_env
storage options set in host variables.
"""
if not issubclass(type(hostvars), dict):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
if persistent_volume_claims != None and not issubclass(type(persistent_volume_claims), list):
raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list")
if persistent_volume_claims == None:
persistent_volume_claims = []
if 'hosted' in hostvars['openshift']:
for component in hostvars['openshift']['hosted']:
if 'storage' in hostvars['openshift']['hosted'][component]:
kind = hostvars['openshift']['hosted'][component]['storage']['kind']
create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv']
if kind != None and create_pv:
volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name']
size = hostvars['openshift']['hosted'][component]['storage']['volume']['size']
access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes']
persistent_volume_claim = dict(
name="{0}-claim".format(volume),
capacity=size,
access_modes=access_modes)
persistent_volume_claims.append(persistent_volume_claim)
return persistent_volume_claims
@staticmethod
def oo_31_rpm_rename_conversion(rpms, openshift_version=None):
""" Filters a list of 3.0 rpms and return the corresponding 3.1 rpms
names with proper version (if provided)
If 3.1 rpms are passed in they will only be augmented with the
correct version. This is important for hosts that are running both
Masters and Nodes.
"""
if not isinstance(rpms, list):
raise errors.AnsibleFilterError("failed expects to filter on a list")
if openshift_version is not None and not isinstance(openshift_version, basestring):
raise errors.AnsibleFilterError("failed expects openshift_version to be a string")
rpms_31 = []
for rpm in rpms:
if not 'atomic' in rpm:
rpm = rpm.replace("openshift", "atomic-openshift")
if openshift_version:
rpm = rpm + openshift_version
rpms_31.append(rpm)
return rpms_31
@staticmethod
def oo_pods_match_component(pods, deployment_type, component):
""" Filters a list of Pods and returns the ones matching the deployment_type and component
"""
if not isinstance(pods, list):
raise errors.AnsibleFilterError("failed expects to filter on a list")
if not isinstance(deployment_type, basestring):
raise errors.AnsibleFilterError("failed expects deployment_type to be a string")
if not isinstance(component, basestring):
raise errors.AnsibleFilterError("failed expects component to be a string")
image_prefix = 'openshift/origin-'
if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
image_prefix = 'openshift3/ose-'
elif deployment_type == 'atomic-enterprise':
image_prefix = 'aep3_beta/aep-'
matching_pods = []
image_regex = image_prefix + component + r'.*'
for pod in pods:
for container in pod['spec']['containers']:
if re.search(image_regex, container['image']):
matching_pods.append(pod)
break # stop here, don't add a pod more than once
return matching_pods
@staticmethod
def oo_get_hosts_from_hostvars(hostvars, hosts):
""" Return a list of hosts from hostvars """
retval = []
for host in hosts:
try:
retval.append(hostvars[host])
except errors.AnsibleError as _:
# host does not exist
pass
return retval
@staticmethod
def oo_image_tag_to_rpm_version(version, include_dash=False):
""" Convert an image tag string to an RPM version if necessary
Empty strings and strings that are already in rpm version format
are ignored. Also remove non semantic version components.
Ex. v3.2.0.10 -> -3.2.0.10
v1.2.0-rc1 -> -1.2.0
"""
if not isinstance(version, basestring):
raise errors.AnsibleFilterError("|failed expects a string or unicode")
if version.startswith("v"):
version = version[1:]
# Strip release from requested version, we no longer support this.
version = version.split('-')[0]
if include_dash and version and not version.startswith("-"):
version = "-" + version
return version
def filters(self):
""" returns a mapping of filters to methods """
return {
"oo_select_keys": self.oo_select_keys,
"oo_select_keys_from_list": self.oo_select_keys_from_list,
"oo_collect": self.oo_collect,
"oo_flatten": self.oo_flatten,
"oo_pdb": self.oo_pdb,
"oo_prepend_strings_in_list": self.oo_prepend_strings_in_list,
"oo_ami_selector": self.oo_ami_selector,
"oo_ec2_volume_definition": self.oo_ec2_volume_definition,
"oo_combine_key_value": self.oo_combine_key_value,
"oo_combine_dict": self.oo_combine_dict,
"oo_split": self.oo_split,
"oo_filter_list": self.oo_filter_list,
"oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
"oo_parse_named_certificates": self.oo_parse_named_certificates,
"oo_haproxy_backend_masters": self.oo_haproxy_backend_masters,
"oo_pretty_print_cluster": self.oo_pretty_print_cluster,
"oo_generate_secret": self.oo_generate_secret,
"to_padded_yaml": self.to_padded_yaml,
"oo_nodes_with_label": self.oo_nodes_with_label,
"oo_openshift_env": self.oo_openshift_env,
"oo_persistent_volumes": self.oo_persistent_volumes,
"oo_persistent_volume_claims": self.oo_persistent_volume_claims,
"oo_31_rpm_rename_conversion": self.oo_31_rpm_rename_conversion,
"oo_pods_match_component": self.oo_pods_match_component,
"oo_get_hosts_from_hostvars": self.oo_get_hosts_from_hostvars,
"oo_image_tag_to_rpm_version": self.oo_image_tag_to_rpm_version,
"oo_merge_dicts": self.oo_merge_dicts,
"oo_merge_hostvars": self.oo_merge_hostvars,
}
| apache-2.0 |
vlegoff/tsunami | src/secondaires/navigation/equipage/postes/chirurgien.py | 1 | 1796 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le poste chirurgien."""
from . import Poste
class Chirurgien(Poste):
"""Classe définissant le poste chirurgien."""
nom = "chirurgien"
autorite = 15
points = 4
nom_parent = "officier"
| bsd-3-clause |
allenai/allennlp | tests/training/metrics/covariance_test.py | 1 | 7843 | import numpy as np
import torch
from torch.testing import assert_allclose
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
# global_distributed_metric,
# run_distributed_test,
)
from allennlp.training.metrics import Covariance
class CovarianceTest(AllenNlpTestCase):
@multi_device
def test_covariance_unmasked_computation(self, device: str):
covariance = Covariance()
batch_size = 100
num_labels = 10
predictions = torch.randn(batch_size, num_labels, device=device)
labels = 0.5 * predictions + torch.randn(batch_size, num_labels, device=device)
stride = 10
for i in range(batch_size // stride):
timestep_predictions = predictions[stride * i : stride * (i + 1), :]
timestep_labels = labels[stride * i : stride * (i + 1), :]
# Flatten the predictions and labels thus far, so numpy treats them as
# independent observations.
expected_covariance = np.cov(
predictions[: stride * (i + 1), :].view(-1).cpu().numpy(),
labels[: stride * (i + 1), :].view(-1).cpu().numpy(),
)[0, 1]
covariance(timestep_predictions, timestep_labels)
assert_allclose(expected_covariance, covariance.get_metric())
# Test reset
covariance.reset()
covariance(predictions, labels)
assert_allclose(
np.cov(predictions.view(-1).cpu().numpy(), labels.view(-1).cpu().numpy())[0, 1],
covariance.get_metric(),
)
@multi_device
def test_covariance_masked_computation(self, device: str):
covariance = Covariance()
batch_size = 100
num_labels = 10
predictions = torch.randn(batch_size, num_labels, device=device)
labels = 0.5 * predictions + torch.randn(batch_size, num_labels, device=device)
# Random binary mask
mask = torch.randint(0, 2, size=(batch_size, num_labels), device=device).bool()
stride = 10
for i in range(batch_size // stride):
timestep_predictions = predictions[stride * i : stride * (i + 1), :]
timestep_labels = labels[stride * i : stride * (i + 1), :]
timestep_mask = mask[stride * i : stride * (i + 1), :]
# Flatten the predictions, labels, and mask thus far, so numpy treats them as
# independent observations.
expected_covariance = np.cov(
predictions[: stride * (i + 1), :].view(-1).cpu().numpy(),
labels[: stride * (i + 1), :].view(-1).cpu().numpy(),
fweights=mask[: stride * (i + 1), :].view(-1).cpu().numpy(),
)[0, 1]
covariance(timestep_predictions, timestep_labels, timestep_mask)
assert_allclose(expected_covariance, covariance.get_metric())
# Test reset
covariance.reset()
covariance(predictions, labels, mask)
assert_allclose(
np.cov(
predictions.view(-1).cpu().numpy(),
labels.view(-1).cpu().numpy(),
fweights=mask.view(-1).cpu().numpy(),
)[0, 1],
covariance.get_metric(),
)
# Commenting in order to revisit distributed covariance later.
# def test_distributed_covariance(self):
# batch_size = 10
# num_labels = 10
# predictions = torch.randn(batch_size, num_labels)
# labels = 0.5 * predictions + torch.randn(batch_size, num_labels)
# # Random binary mask
# mask = torch.randint(0, 2, size=(batch_size, num_labels)).bool()
# expected_covariance = np.cov(
# predictions.view(-1).cpu().numpy(),
# labels.view(-1).cpu().numpy(),
# fweights=mask.view(-1).cpu().numpy(),
# )[0, 1]
# predictions = [predictions[:5], predictions[5:]]
# labels = [labels[:5], labels[5:]]
# mask = [mask[:5], mask[5:]]
# metric_kwargs = {"predictions": predictions, "gold_labels": labels, "mask": mask}
# run_distributed_test(
# [-1, -1],
# global_distributed_metric,
# Covariance(),
# metric_kwargs,
# expected_covariance,
# exact=(0.0001, 1e-01),
# )
# def test_distributed_covariance_unequal_batches(self):
# batch_size = 10
# num_labels = 10
# predictions = torch.randn(batch_size, num_labels)
# labels = 0.5 * predictions + torch.randn(batch_size, num_labels)
# # Random binary mask
# mask = torch.randint(0, 2, size=(batch_size, num_labels)).bool()
# expected_covariance = np.cov(
# predictions.view(-1).cpu().numpy(),
# labels.view(-1).cpu().numpy(),
# fweights=mask.view(-1).cpu().numpy(),
# )[0, 1]
# predictions = [predictions[:6], predictions[6:]]
# labels = [labels[:6], labels[6:]]
# mask = [mask[:6], mask[6:]]
# metric_kwargs = {"predictions": predictions, "gold_labels": labels, "mask": mask}
# run_distributed_test(
# [-1, -1],
# global_distributed_metric,
# Covariance(),
# metric_kwargs,
# expected_covariance,
# exact=(0.0001, 1e-01),
# )
# def test_multiple_runs(self):
# batch_size = 12
# num_labels = 10
# predictions = torch.randn(batch_size, num_labels)
# labels = 0.5 * predictions + torch.randn(batch_size, num_labels)
# stride = 1
# expected_covariances = []
# for i in range(batch_size // stride):
# timestep_predictions = predictions[stride * i : stride * (i + 1), :]
# timestep_labels = labels[stride * i : stride * (i + 1), :]
# # Flatten the predictions and labels thus far, so numpy treats them as
# # independent observations.
# expected_covariance = np.cov(
# predictions[: stride * (i + 1), :].view(-1).cpu().numpy(),
# labels[: stride * (i + 1), :].view(-1).cpu().numpy(),
# )[0, 1]
# expected_covariances.append(expected_covariance)
# predictions = [predictions[:6], predictions[6:]]
# labels = [labels[:6], labels[6:]]
# metric_kwargs = {"predictions": predictions, "gold_labels": labels}
# run_distributed_test(
# [-1, -1],
# multiple_runs,
# Covariance(),
# batch_size,
# stride,
# metric_kwargs,
# expected_covariances,
# exact=(0.0001, 1e-01),
# )
# def multiple_runs(
# global_rank: int,
# world_size: int,
# gpu_id: Union[int, torch.device],
# covariance: Covariance,
# batch_size: int,
# stride: int,
# metric_kwargs: Dict[str, List[Any]],
# expected_covariances: List[float],
# exact: Union[bool, Tuple[float, float]] = True,
# ):
# kwargs = {}
# # Use the arguments meant for the process with rank `global_rank`.
# for argname in metric_kwargs:
# kwargs[argname] = metric_kwargs[argname][global_rank]
# predictions = kwargs["predictions"]
# labels = kwargs["gold_labels"]
# batch_size = predictions.shape[0]
# stride = stride // world_size
# for i in range(batch_size // stride):
# timestep_predictions = predictions[stride * i : stride * (i + 1), :]
# timestep_labels = labels[stride * i : stride * (i + 1), :]
# # Flatten the predictions and labels thus far, so numpy treats them as
# # independent observations.
# covariance(timestep_predictions, timestep_labels)
# assert_allclose(expected_covariances[i], covariance.get_metric(), rtol=exact[0], atol=exact[1])
| apache-2.0 |
orionzhou/robin | utils/counter.py | 1 | 6732 | """
Counter class for py2.6 back compat.
<http://code.activestate.com/recipes/576611/>
"""
from operator import itemgetter
from heapq import nlargest
from itertools import repeat, ifilter
class Counter(dict):
'''Dict subclass for counting hashable objects. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> Counter('zyzygy')
Counter({'y': 3, 'z': 2, 'g': 1})
'''
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
self.update(iterable, **kwds)
def __missing__(self, key):
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('r', 2), ('b', 2)]
'''
if n is None:
return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
return nlargest(n, self.iteritems(), key=itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
If an element's count has been set to zero or is a negative number,
elements() will ignore it.
'''
for elem, count in self.iteritems():
for _ in repeat(None, count):
yield elem
# Override dict methods where the meaning changes for Counter objects.
@classmethod
def fromkeys(cls, iterable, v=None):
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
if iterable is not None:
if hasattr(iterable, 'iteritems'):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
dict.update(self, iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def copy(self):
'Like dict.copy() but returns a Counter instance instead of a dict.'
return Counter(self)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
dict.__delitem__(self, elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] + other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] - other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_max = max
result = Counter()
for elem in set(self) | set(other):
newcount = _max(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_min = min
result = Counter()
if len(self) < len(other):
self, other = other, self
for elem in ifilter(self.__contains__, other):
newcount = _min(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
def report(self, sep=", ", percentage=False):
total = sum(self.values())
items = []
for k, v in sorted(self.items(), key=lambda x: -x[-1]):
item = "{0}:{1}".format(k, v)
if percentage:
item += " ({0:.1f}%)".format(v * 100. / total)
items.append(item)
return sep.join(items)
if __name__ == '__main__':
import doctest
print(doctest.testmod())
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.