repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
TeamJB/kernel_samsung_i9300 | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
lrp/tftools | tftools.py | 2 | 9758 | # tftools.py: Utilities for optimizing transfer function excitation signals
# Copyright (C) 2013 Larry Price
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import numpy as np
import scipy.signal as sig
from matplotlib.mlab import csd, psd
import sys
def tukeywin(m, a=0.5):
'''
Produces a tukey window
a = overlap parameter between 0 returns a square window and 1 returns a Hann(ing) window
m = number of points in the window
see, e.g., https://en.wikipedia.org/wiki/Window_function#Tukey_window
'''
if a <= 0:
return np.ones(m)
elif a >= 1:
return np.hanning(m)
x = np.linspace(0, 1, m)
w = np.ones_like(x)
w[x < a/2] = (1 + np.cos(2*np.pi/a * (x[x < a/2] - a/2) )) / 2
w[x >= (1 - a/2)] = (1 + np.cos(2*np.pi/a * (x[x >= (1 - a/2)] - 1 + a/2))) / 2
return w
def getTF(exc,resp,tmeas,Fs,Nfft,padto=None):
"""
compute transfer function along with coherence and SNR. uses PSD/CSD method with 50% overlapping windows
returns f, TF, Coherence, SNR
exc = excitation signal
resp = system response
tmeas = duration of mesurement in seconds
Fs = sample rate (Hz)
Nfft = number of data points to be used in each block for fft
padto = pad to this many points
"""
N = 1.89 * tmeas / (Nfft / Fs)
Sx, f = psd(exc,NFFT=Nfft,Fs=Fs,noverlap=int(Nfft/2),pad_to=padto)
Sy = psd(resp,NFFT=Nfft,Fs=Fs,noverlap=int(Nfft/2),pad_to=padto)[0]
Sxy = csd(exc,resp,NFFT=Nfft,Fs=Fs,noverlap=int(Nfft/2),pad_to=padto)[0]
Cxy = (Sxy * np.conj(Sxy)) / (Sx * Sy)
snr = np.sqrt(Cxy * 2 * N / (1 - Cxy) )
return f, Sxy / Sx, Cxy, snr
def fishersfzpk(ltisys,w,Sn):
"""
create the single-frequency fisher matrix for a transfer function in zpk form, i.e.
__
||_i (w - z_i)
H(w) = k --------------------
__
||_i (w - p_i)
*** the excitation signal is assumed to be a sum of sines ***
*** the denominator must be monic ***
arguments:
ltisys = a scipy.signal.lti instance of the transfer function
w = (angular) frequencies of interest (in rad/s)
Sn = PSD of the noise as an array (same length as w)
returns:
an NxN numpy array with each value being an array of len(w) (the collection of all single
frequency Fisher matrices at frequencies w)
"""
###FIXME: add some basic error handling
#tf is in terms of iw, not w
s = 1j*w
#create the transfer function
#use the lower case w because scipy puts the i in the transfer function for us
tf = ltisys.freqresp(w)[1]
#do the magnitude squared here once and for all
tfmagsq = tf * np.conj(tf)
#get the number of parameters
if ltisys.gain == 1:
N = len(ltisys.zeros) + len(ltisys.poles)
else:
N = len(ltisys.zeros) + len(ltisys.poles) + 1
#take all the derivatives
Dz = np.zeros([len(ltisys.zeros),len(s)],dtype=np.complex128)
Dp = np.zeros([len(ltisys.poles),len(s)],dtype=np.complex128)
for i,z in enumerate(ltisys.zeros):
Dz[i] = -1 / (s - z)
for i,p in enumerate(ltisys.poles):
Dp[i] = 1 / (s - p)
#check for unity gain and the absence of zeros
if ltisys.gain == 1 and ltisys.zeros.size:
deez = np.vstack((Dz,Dp))
elif ltisys.gain == 1 and not ltisys.zeros.size:
deez = Dp
elif ltisys.gain != 1 and ltisys.zeros.size:
deez = np.vstack((np.vstack((Dz,Dp)), 1/ltisys.gain[0] * np.ones(len(s))))
else:
deez = np.vstack((Dp,1/ltisys.gain[0] * np.ones(len(s))))
#put it together to make the fisher matrix
fisher = np.zeros([N,N,len(w)],dtype=np.float64)
for i in range(N):
for j in range(N):
fisher[i][j] = 0.5 * tfmagsq * np.real(np.conj(deez[i])*deez[j]) / Sn
#all done
return fisher
def fishersfab(ltisys,w,Sn):
"""
create the single-frequency fisher matrix for a transfer function as a rational function, i.e.
Sum_0^N b_i s^i
H(w) = --------------------
1 + Sum_1^M a_i s^i
*** the excitation signal is assumed to be a sum of sines ***
*** the denominator must be monic (it's enforced, so no worries)***
arguments:
ltisys = instance of scipy.signal.lti
w = frequencies of interest
Sn = PSD of the noise as an array (same length as w)
returns:
an NxN numpy array with each value being an array of len(w) (the collection of all single
frequency Fisher matrices at frequencies w)
NB: you have to take the transpose of the result if you want to, say compute the determinant via numpy.linalg.det
"""
###FIXME: add some basic error handling
#tf is in terms of iw, not w
s = 1j*w
#get the tf in the right form
a,b = lti2ab(ltisys)
#create the numerator and denominator of the tf
if b.size:
numer = np.sum(np.array([ b[i] * s**i for i in range(len(b))]),axis=0)
else: #i don't think this is even possible unless the tf is just a pass-through
numer = np.ones(len(s))
denom = np.sum(np.array([ a[i] * s**(i+1) for i in range(len(a))]),axis=0) + np.ones(len(s))
#number of parameters
N = len(a) + len(b)
#take all the derivatives
deez = np.zeros([N,len(w)],dtype=np.complex128)
for i in range(N):
#derivative wrt denominator
#funky numbering because denom is monic (demonic?)
if i < len(a):
deez[i] = - s**(i+1) * numer / denom**2
#derivative wrt numerator
else:
deez[i] = s**(i-len(a)) / denom
#put it together to make the fisher matrix
fisher = np.zeros([N,N,len(w)],dtype=np.float64)
for i in range(N):
for j in range(N):
fisher[i][j] = 0.5 * np.real(np.conj(deez[i])*deez[j]) / Sn
#all done
return fisher
def fishersf(ltisys,w,Sn,usezpk=False):
"""
convenience function to select between zpk (default) and ab form for computing the fisher matrix
"""
if usezpk is True:
return fishersfzpk(ltisys,w,Sn)
else:
return fishersfab(ltisys,w,Sn)
def fisherdesign(fmsf,Sx):
"""
compute the fisher matrix associated with the design Sx
uses the Sx and the single frequency fisher matrix
"""
return np.sum(fmsf*Sx,axis=2)
def dispersion(fmdesign,fmsf):
"""
compute the dispersion from the single frequency and design fisher matrices
"""
return np.trace(np.dot(np.linalg.inv(fmdesign),fmsf))
def lti2ab(ltisys):
"""
convenience function to convert scipy.signal.lti instance to a,b suitable for fisher matrix calculation
ltisys is an instance of scipy.signal.lti
returns a,b
"""
b = ltisys.num
a = ltisys.den
#fancy array slicing to reverse the order (::-1) and remove the first element of a (1:)
return a[::-1][1:] / a[-1], b[::-1] / a[-1]
def findfreqs(ltisys,Sn,w,nfreqs=None,usezpk=False):
"""
find best frequencies for optimal design (brute force method)
arguments:
ltisys = instance of scipy.signal.lti
w = (angular) frequencies of interest
nfreqs = # of frequencies to return. default is 3 x #parameters
usezpk = boolean for indicating form of the transfer function
returns:
wopt = array of optimal frequencies to use
fisherf = single-frequency fisher matrix evaluated at wopt (basically input for design optimization)
"""
#get the number of parameters and put the transfer function in the right form
if usezpk is True:
#number of parameters for zpk representation
if ltisys.gain == 1:
nparm = len(ltisys.zeros) + len(ltisys.poles)
else:
nparm = len(ltisys.zeros) + len(ltisys.poles) + 1
else:
#using ab form
a,b = lti2ab(ltisys)
#number of parameters
nparm = len(a) + len(b)
#set the number of frequencies
if nfreqs is None:
nfreqs = 3 * nparm
if nfreqs < 2 * nparm:
raise ValueError('Must specify an nfreqs at least twice as large as the number of parameters!')
sys.exit(0)
fmsf = fishersf(ltisys,w,Sn,usezpk=usezpk)
thesefreqs = np.sort(np.argsort(np.linalg.det(fmsf.T))[-nfreqs:])
return w[thesefreqs], fmsf.T[thesefreqs].T
def optdesign(ltisys,w,usezpk=False,fmsf=None,Sn=None,tol=None,maxit=10000):
"""
compute the optimal design, Sx
arguments:
ltisys = instance of scipy.signal.lti
w = the frequencies to optimize over
tol = if max(dispersion - nparam) < tol, then iteration ceases. if tol isn't specified then iteration continues until maxit
maxit = maximum number of iterations to perform
returns a tuple containing:
Sx = optimal design as a numpy array
max(dispersion - nparam)
"""
#FIXME: add some error handling
if fmsf is None and Sn is None:
raise ValueError('Must specify Sn to compute Fisher!')
sys.exit(1)
#get the number of parameters and put the transfer function in the right form
if usezpk is True:
#number of parameters for zpk representation
if ltisys.gain == 1:
nparm = len(ltisys.zeros) + len(ltisys.poles)
else:
nparm = len(ltisys.zeros) + len(ltisys.poles) + 1
else:
#using ab form
a,b = lti2ab(ltisys)
#number of parameters
nparm = len(a) + len(b)
#compute the single frequency fisher matrix
if fmsf is None:
fmsf = fishersf(ltisys,w,Sn,usezpk=usezpk)
#initial design
#normalized to one with all the power evenly distributed
#don't worry about phases for now...FIXME: optimize phases
Sx = np.ones(len(w)) / len (w)
#compute the dispersion
disp = dispersion(fisherdesign(fmsf,Sx),fmsf)
for i in range(maxit):
if tol is not None:
if np.max(disp - nparm) < tol:
break
else:
Sx *= (disp / nparm)
disp = dispersion(fisherdesign(fmsf,Sx),fmsf)
return Sx, np.max(disp - nparm)
| gpl-3.0 |
Godmaster49/mtasa-blue | vendor/google-breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/decoder.py | 261 | 25883 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for decoding protocol buffer primitives.
This code is very similar to encoder.py -- read the docs for that module first.
A "decoder" is a function with the signature:
Decode(buffer, pos, end, message, field_dict)
The arguments are:
buffer: The string containing the encoded message.
pos: The current position in the string.
end: The position in the string where the current message ends. May be
less than len(buffer) if we're reading a sub-message.
message: The message object into which we're parsing.
field_dict: message._fields (avoids a hashtable lookup).
The decoder reads the field and stores it into field_dict, returning the new
buffer position. A decoder for a repeated field may proactively decode all of
the elements of that field, if they appear consecutively.
Note that decoders may throw any of the following:
IndexError: Indicates a truncated message.
struct.error: Unpacking of a fixed-width field failed.
message.DecodeError: Other errors.
Decoders are expected to raise an exception if they are called with pos > end.
This allows callers to be lax about bounds checking: it's fineto read past
"end" as long as you are sure that someone else will notice and throw an
exception later on.
Something up the call stack is expected to catch IndexError and struct.error
and convert them to message.DecodeError.
Decoders are constructed using decoder constructors with the signature:
MakeDecoder(field_number, is_repeated, is_packed, key, new_default)
The arguments are:
field_number: The field number of the field we want to decode.
is_repeated: Is the field a repeated field? (bool)
is_packed: Is the field a packed field? (bool)
key: The key to use when looking up the field within field_dict.
(This is actually the FieldDescriptor but nothing in this
file should depend on that.)
new_default: A function which takes a message object as a parameter and
returns a new instance of the default value for this field.
(This is called for repeated fields and sub-messages, when an
instance does not already exist.)
As with encoders, we define a decoder constructor for every type of field.
Then, for every field of every message class we construct an actual decoder.
That decoder goes into a dict indexed by tag, so when we decode a message
we repeatedly read a tag, look up the corresponding decoder, and invoke it.
"""
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import message
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
_NAN = _POS_INF * 0
# This is not for optimization, but rather to avoid conflicts with local
# variables named "message".
_DecodeError = message.DecodeError
def _VarintDecoder(mask):
"""Return an encoder for a basic varint value (does not include tag).
Decoded values will be bitwise-anded with the given mask before being
returned, e.g. to limit them to 32 bits. The returned decoder does not
take the usual "end" parameter -- the caller is expected to do bounds checking
after the fact (often the caller can defer such checking until later). The
decoder returns a (value, new_pos) pair.
"""
local_ord = ord
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
def _SignedVarintDecoder(mask):
"""Like _VarintDecoder() but decodes signed values."""
local_ord = ord
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
if result > 0x7fffffffffffffff:
result -= (1 << 64)
result |= ~mask
else:
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
_DecodeVarint = _VarintDecoder((1 << 64) - 1)
_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1)
# Use these versions for values which must be limited to 32 bits.
_DecodeVarint32 = _VarintDecoder((1 << 32) - 1)
_DecodeSignedVarint32 = _SignedVarintDecoder((1 << 32) - 1)
def ReadTag(buffer, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
"""
start = pos
while ord(buffer[pos]) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos)
# --------------------------------------------------------------------
def _SimpleDecoder(wire_type, decode_value):
"""Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint()
"""
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
(element, pos) = decode_value(buffer, pos)
value.append(element)
if pos > endpoint:
del value[-1] # Discard corrupt value.
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_type)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = decode_value(buffer, pos)
value.append(element)
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(field_dict[key], pos) = decode_value(buffer, pos)
if pos > end:
del field_dict[key] # Discard corrupt value.
raise _DecodeError('Truncated message.')
return pos
return DecodeField
return SpecificDecoder
def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _StructPackDecoder(wire_type, format):
"""Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack().
"""
value_size = struct.calcsize(format)
local_unpack = struct.unpack
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
def InnerDecode(buffer, pos):
new_pos = pos + value_size
result = local_unpack(format, buffer[pos:new_pos])[0]
return (result, new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _FloatDecoder():
"""Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
new_pos = pos + 4
float_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
if ((float_bytes[3] in '\x7F\xFF')
and (float_bytes[2] >= '\x80')):
# If at least one significand bit is set...
if float_bytes[0:3] != '\x00\x00\x80':
return (_NAN, new_pos)
# If sign bit is set...
if float_bytes[3] == '\xFF':
return (_NEG_INF, new_pos)
return (_POS_INF, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<f', float_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode)
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7] in '\x7F\xFF')
and (double_bytes[6] >= '\xF0')
and (double_bytes[0:7] != '\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode)
# --------------------------------------------------------------------
Int32Decoder = EnumDecoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32)
Int64Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint)
UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32)
UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint)
SInt32Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode)
SInt64Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatDecoder = _FloatDecoder()
DoubleDecoder = _DoubleDecoder()
BoolDecoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, bool)
def StringDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a string field."""
local_DecodeVarint = _DecodeVarint
local_unicode = unicode
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(local_unicode(buffer[pos:new_pos], 'utf-8'))
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = local_unicode(buffer[pos:new_pos], 'utf-8')
return new_pos
return DecodeField
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos])
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos]
return new_pos
return DecodeField
def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a group field."""
end_tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_END_GROUP)
end_tag_len = len(end_tag_bytes)
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_START_GROUP)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value.add()._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
return new_pos
return DecodeField
def MessageDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a message field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value.add()._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
return new_pos
return DecodeField
# --------------------------------------------------------------------
MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP)
def MessageSetItemDecoder(extensions_by_number):
"""Returns a decoder for a MessageSet item.
The parameter is the _extensions_by_number map for the message class.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
type_id = -1
message_start = -1
message_end = -1
# Technically, type_id and message can appear in any order, so we need
# a little loop here.
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
pos = SkipField(buffer, pos, end, tag_bytes)
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = extensions_by_number.get(type_id)
if extension is not None:
value = field_dict.get(extension)
if value is None:
value = field_dict.setdefault(
extension, extension.message_type._concrete_class())
if value._InternalParse(buffer, message_start,message_end) != message_end:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
return pos
return DecodeItem
# --------------------------------------------------------------------
# Optimization is not as heavy here because calls to SkipField() are rare,
# except for handling end-group tags.
def _SkipVarint(buffer, pos, end):
"""Skip a varint value. Returns the new position."""
while ord(buffer[pos]) & 0x80:
pos += 1
pos += 1
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipFixed64(buffer, pos, end):
"""Skip a fixed64 value. Returns the new position."""
pos += 8
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipLengthDelimited(buffer, pos, end):
"""Skip a length-delimited value. Returns the new position."""
(size, pos) = _DecodeVarint(buffer, pos)
pos += size
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos
def _EndGroup(buffer, pos, end):
"""Skipping an END_GROUP tag returns -1 to tell the parent loop to break."""
return -1
def _SkipFixed32(buffer, pos, end):
"""Skip a fixed32 value. Returns the new position."""
pos += 4
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _RaiseInvalidWireType(buffer, pos, end):
"""Skip function for unknown wire types. Raises an exception."""
raise _DecodeError('Tag had invalid wire type.')
def _FieldSkipper():
"""Constructs the SkipField function."""
WIRETYPE_TO_SKIPPER = [
_SkipVarint,
_SkipFixed64,
_SkipLengthDelimited,
_SkipGroup,
_EndGroup,
_SkipFixed32,
_RaiseInvalidWireType,
_RaiseInvalidWireType,
]
wiretype_mask = wire_format.TAG_TYPE_MASK
local_ord = ord
def SkipField(buffer, pos, end, tag_bytes):
"""Skips a field with the specified tag.
|pos| should point to the byte immediately after the tag.
Returns:
The new position (after the tag value), or -1 if the tag is an end-group
tag (in which case the calling loop should break).
"""
# The wire type is always in the first byte since varints are little-endian.
wire_type = local_ord(tag_bytes[0]) & wiretype_mask
return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
return SkipField
SkipField = _FieldSkipper()
| gpl-3.0 |
fuselock/odoo | addons/point_of_sale/wizard/pos_confirm.py | 343 | 2403 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class pos_confirm(osv.osv_memory):
_name = 'pos.confirm'
_description = 'Post POS Journal Entries'
def action_confirm(self, cr, uid, ids, context=None):
order_obj = self.pool.get('pos.order')
ids = order_obj.search(cr, uid, [('state','=','paid')], context=context)
for order in order_obj.browse(cr, uid, ids, context=context):
todo = True
for line in order.statement_ids:
if line.statement_id.state != 'confirm':
todo = False
break
if todo:
order.signal_workflow('done')
# Check if there is orders to reconcile their invoices
ids = order_obj.search(cr, uid, [('state','=','invoiced'),('invoice_id.state','=','open')], context=context)
for order in order_obj.browse(cr, uid, ids, context=context):
invoice = order.invoice_id
data_lines = [x.id for x in invoice.move_id.line_id if x.account_id.id == invoice.account_id.id]
for st in order.statement_ids:
for move in st.move_ids:
data_lines += [x.id for x in move.line_id if x.account_id.id == invoice.account_id.id]
self.pool.get('account.move.line').reconcile(cr, uid, data_lines, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ejeschke/ginga | ginga/examples/gw/clocks.py | 3 | 14675 | #! /usr/bin/env python
#
# clocks.py -- Ginga clocks
#
# eric@naoj.org
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
"clocks" displays a grid of clocks in different time zones.
Usage:
$ clock.py HST Asia/Tokyo UTC US/Eastern US/Pacific Hongkong Portugal
$ clock.py --help
$ clock.py --show-timezones
$ clock.py --show-colors
"""
import sys
import os
from datetime import datetime
import pytz
from dateutil import tz
import ginga.toolkit as ginga_toolkit
from ginga import colors
from ginga.misc import log
from ginga.misc.Bunch import Bunch
from ginga.misc.Settings import SettingGroup
from ginga.util.paths import ginga_home
width, height = 300, 230
class Clock(object):
def __init__(self, app, logger, timezone_info, color='lightgreen',
font='Liberation Sans', show_seconds=False):
"""Constructor for a clock object using a ginga canvas.
"""
self.logger = logger
if isinstance(timezone_info, Bunch):
self.timezone_name = timezone_info.location
self.tzinfo = tz.tzoffset(self.timezone_name,
timezone_info.time_offset)
else:
# assume timezone_info is a str
self.timezone_name = timezone_info
#self.tzinfo = pytz.timezone(timezone)
# NOTE: wierd construction is necessary to get a dateutil
# timezone from the better names produced by pytz
self.tzinfo = tz.gettz(str(pytz.timezone(timezone_info)))
self.color = color
self.font = font
self.largesize = 72
self.smallsize = 24
self.show_seconds = show_seconds
# now import our items
from ginga.gw import Viewers
fi = Viewers.CanvasView(logger=logger)
fi.set_bg(0.2, 0.2, 0.2)
self.viewer = fi
fi.add_callback('configure', self.clock_resized_cb)
# canvas that we will draw on
self.canvas = fi.get_canvas()
wd, ht = width, height
if self.show_seconds:
wd += 300
fi.set_desired_size(wd, ht)
iw = Viewers.GingaViewerWidget(viewer=fi)
self.widget = iw
self.clock_resized_cb(self.viewer, wd, ht)
dt = datetime.utcnow().replace(tzinfo=tz.UTC)
self.update_clock(dt)
def clock_resized_cb(self, viewer, width, height):
"""This method is called when an individual clock is resized.
It deletes and reconstructs the placement of the text objects
in the canvas.
"""
self.logger.info("resized canvas to %dx%d" % (width, height))
# add text objects to canvas
self.canvas.delete_all_objects()
Text = self.canvas.get_draw_class('text')
x, y = 20, int(height * 0.55)
# text object for the time
self.time_txt = Text(x, y, text='', color=self.color,
font=self.font, fontsize=self.largesize,
coord='window')
self.canvas.add(self.time_txt, tag='_time', redraw=False)
# for supplementary info (date, timezone, etc)
self.suppl_txt = Text(x, height - 10, text='', color=self.color,
font=self.font, fontsize=self.smallsize,
coord='window')
self.canvas.add(self.suppl_txt, tag='_suppl', redraw=False)
self.canvas.update_canvas(whence=3)
def update_clock(self, dt):
"""This method is called by the ClockApp whenever the timer fires
to update the clock. `dt` is a timezone-aware datetime object.
"""
dt = dt.astimezone(self.tzinfo)
fmt = "%H:%M"
if self.show_seconds:
fmt = "%H:%M:%S"
self.time_txt.text = dt.strftime(fmt)
suppl_text = "{0} {1}".format(dt.strftime("%Y-%m-%d"),
self.timezone_name)
self.suppl_txt.text = suppl_text
self.viewer.redraw(whence=3)
class ClockApp(object):
def __init__(self, logger, settings, options):
self.logger = logger
self.options = options
self.settings = settings
colors = ['lightgreen', 'orange', 'cyan', 'pink', 'slateblue',
'yellow', 'maroon', 'brown']
self.color_index = 0
cols = 3
if options.num_cols is not None:
cols = options.num_cols
self.settings.add_defaults(columns=cols, zones=['UTC'],
colors=colors)
self.colors = self.settings.get('colors', colors)
# now import our items
from ginga.gw import Widgets, GwHelp
self.app = Widgets.Application(logger=logger)
self.app.add_callback('shutdown', self.quit)
self.top = self.app.make_window("Clocks")
self.top.add_callback('close', self.closed)
vbox = Widgets.VBox()
menubar = Widgets.Menubar()
clockmenu = menubar.add_name('Clock')
item = clockmenu.add_name("Quit")
item.add_callback('activated', lambda *args: self.quit())
vbox.add_widget(menubar, stretch=0)
self.grid = Widgets.GridBox()
self.grid.set_border_width(1)
self.grid.set_spacing(2)
vbox.add_widget(self.grid, stretch=1)
hbox = Widgets.HBox()
self.timezone_label = Widgets.Label('TimeZone')
self.county_timezone = Widgets.ComboBox(editable=True)
# make a giant list of time zones
zones = [timezone for timezones in pytz.country_timezones.values()
for timezone in timezones]
zones.sort()
for timezone in zones:
self.county_timezone.append_text(timezone)
# also let user set timezone by UTC offset
self.location_label = Widgets.Label('Location')
self.location = Widgets.TextEntry()
self.location.set_tooltip("Type a label to denote this UTC offset")
#self.location.set_length(10)
self.timeoffset_label = Widgets.Label('UTC Offset(hour)')
self.time_offset = Widgets.SpinBox(dtype=float)
self.time_offset.set_decimals(2)
self.time_offset.set_limits(-12, 12)
self.time_offset.set_tooltip("Time offset from UTC")
self.timezone_button = Widgets.Button('Add by Timezone')
self.offset_button = Widgets.Button('Add by Offset')
self.timezone_button.add_callback('activated',
self.more_clock_by_timezone)
self.offset_button.add_callback('activated',
self.more_clock_by_offset)
hbox.add_widget(self.timezone_label, stretch=0)
hbox.add_widget(self.county_timezone, stretch=0)
hbox.add_widget(self.timezone_button, stretch=0)
hbox.add_widget(Widgets.Label(''), stretch=1)
hbox.add_widget(self.location_label, stretch=0)
hbox.add_widget(self.location, stretch=0)
hbox.add_widget(self.timeoffset_label, stretch=0)
hbox.add_widget(self.time_offset, stretch=0)
hbox.add_widget(self.offset_button, stretch=0)
hbox.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(hbox, stretch=0)
self.top.set_widget(vbox)
self.clocks = {}
self.timer = GwHelp.Timer(1.0)
self.timer.add_callback('expired', self.timer_cb)
self.timer.start(1.0)
def more_clock_by_offset(self, w):
location = self.location.get_text()
time_offset = self.time_offset.get_value()
sec_hour = 3600
timezone = Bunch(location=location, time_offset=time_offset * sec_hour)
color = self.colors[self.color_index % len(self.colors)]
self.color_index += 1
self.add_clock(timezone=timezone, color=color)
def more_clock_by_timezone(self, w):
timezone = self.county_timezone.get_text()
color = self.colors[self.color_index % len(self.colors)]
self.color_index += 1
self.add_clock(timezone=timezone, color=color)
def add_clock(self, timezone, color='lightgreen', show_seconds=None):
"""Add a clock to the grid. `timezone` is a string representing
a valid timezone.
"""
if show_seconds is None:
show_seconds = self.options.show_seconds
clock = Clock(self.app, self.logger, timezone, color=color,
font=self.options.font, show_seconds=show_seconds)
clock.widget.cfg_expand(0x7, 0x7)
num_clocks = len(self.clocks)
cols = self.settings.get('columns')
row = num_clocks // cols
col = num_clocks % cols
self.clocks[timezone] = clock
self.grid.add_widget(clock.widget, row, col, stretch=1)
def timer_cb(self, timer):
"""Timer callback. Update all our clocks."""
dt_now = datetime.utcnow().replace(tzinfo=tz.UTC)
self.logger.debug("timer fired. utc time is '%s'" % (str(dt_now)))
for clock in self.clocks.values():
clock.update_clock(dt_now)
# update clocks approx every second
timer.start(1.0)
def set_geometry(self, geometry):
# translation of X window geometry specification WxH+X+Y
coords = geometry.replace('+', ' +')
coords = coords.replace('-', ' -')
coords = coords.split()
if 'x' in coords[0]:
# spec includes dimensions
dim = coords[0]
coords = coords[1:]
else:
# spec is position only
dim = None
if dim is not None:
# user specified dimensions
dim = [int(i) for i in dim.split('x')]
self.top.resize(*dim)
if len(coords) > 0:
# user specified position
coords = [int(i) for i in coords]
self.top.move(*coords)
def closed(self, w):
self.logger.info("Top window closed.")
top = self.top
self.top = None
self.app.quit()
def quit(self, *args):
self.logger.info("Attempting to shut down the application...")
if self.top is not None:
self.top.close()
sys.exit()
def main(options, args):
# TODO: when ginga gets updated on the summit
logger = log.get_logger("clocks", options=options)
if options.toolkit is None:
logger.error("Please choose a GUI toolkit with -t option")
# decide our toolkit, then import
ginga_toolkit.use(options.toolkit)
cfgfile = os.path.join(ginga_home, "clocks.cfg")
settings = SettingGroup(name='clocks', logger=logger,
preffile=cfgfile)
settings.load(onError='silent')
clock = ClockApp(logger, settings, options)
if len(options.args) == 0:
zones = ['UTC']
#zones = ['HST', 'Asia/Tokyo', 'UTC']
else:
zones = options.args
cols = settings.get('columns', 3)
wd, ht = width * cols, height
if options.show_seconds:
wd += cols * 300
clock.top.resize(wd, ht)
# get the list of colors
if options.colors is None:
colors = clock.colors
else:
colors = options.colors.split(',')
# get the list of time zones
for i, zone in enumerate(zones):
color = colors[i % len(colors)]
clock.add_clock(zone, color=color)
clock.color_index = i + 1
clock.top.show()
if options.geometry is not None:
clock.set_geometry(options.geometry)
clock.top.raise_()
try:
app = clock.top.get_app()
app.mainloop()
except KeyboardInterrupt:
if clock.top is not None:
clock.top.close()
logger.info("Terminating clocks...")
if __name__ == "__main__":
# Parse command line options
import argparse
argprs = argparse.ArgumentParser(description="Parse command line options to clock")
argprs.add_argument("args", type=str, nargs='*',
help="All remaining arguments")
argprs.add_argument("--colors", dest="colors", metavar="COLORS",
default=None,
help="Comma-separated list of COLORS to use for clocks")
argprs.add_argument("--debug", dest="debug", default=False,
action="store_true",
help="Enter the pdb debugger on main()")
argprs.add_argument("--display", dest="display", metavar="HOST:N",
help="Use X display on HOST:N")
argprs.add_argument("--font", dest="font", metavar="NAME",
default='Liberation Sans',
help="Choose font NAME")
argprs.add_argument("-g", "--geometry", dest="geometry",
metavar="GEOM",
help="X geometry for initial size and placement")
argprs.add_argument("-c", "--num-cols", dest="num_cols",
metavar="NUM", type=int, default=None,
help="Number of columns to use")
argprs.add_argument("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
argprs.add_argument("-s", "--show-seconds", dest="show_seconds",
default=False, action="store_true",
help="Show seconds on the clock")
argprs.add_argument("--show-colors", dest="show_colors",
default=False, action="store_true",
help="Show a list of valid colors")
argprs.add_argument("--show-timezones", dest="show_timezones",
default=False, action="store_true",
help="Show a list of valid time zones and exit")
argprs.add_argument("-t", "--toolkit", dest="toolkit", metavar="NAME",
default='qt5',
help="Choose GUI toolkit (gtk|qt)")
log.addlogopts(argprs)
options = argprs.parse_args()
args = options.args
if options.show_timezones:
for timezone in pytz.all_timezones:
print(timezone)
sys.exit(0)
if options.show_colors:
names = colors.get_colors()
for color in names:
print(color)
sys.exit(0)
if options.display:
os.environ['DISPLAY'] = options.display
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
| bsd-3-clause |
UOMx/edx-platform | common/lib/capa/capa/tests/test_util.py | 47 | 5662 | """
Tests capa util
"""
import unittest
from lxml import etree
from . import test_capa_system
from capa.util import compare_with_tolerance, sanitize_html, get_inner_html_from_xpath
class UtilTest(unittest.TestCase):
"""Tests for util"""
def setUp(self):
super(UtilTest, self).setUp()
self.system = test_capa_system()
def test_compare_with_tolerance(self):
# Test default tolerance '0.001%' (it is relative)
result = compare_with_tolerance(100.0, 100.0)
self.assertTrue(result)
result = compare_with_tolerance(100.001, 100.0)
self.assertTrue(result)
result = compare_with_tolerance(101.0, 100.0)
self.assertFalse(result)
# Test absolute percentage tolerance
result = compare_with_tolerance(109.9, 100.0, '10%', False)
self.assertTrue(result)
result = compare_with_tolerance(110.1, 100.0, '10%', False)
self.assertFalse(result)
# Test relative percentage tolerance
result = compare_with_tolerance(111.0, 100.0, '10%', True)
self.assertTrue(result)
result = compare_with_tolerance(112.0, 100.0, '10%', True)
self.assertFalse(result)
# Test absolute tolerance (string)
result = compare_with_tolerance(109.9, 100.0, '10.0', False)
self.assertTrue(result)
result = compare_with_tolerance(110.1, 100.0, '10.0', False)
self.assertFalse(result)
# Test relative tolerance (string)
result = compare_with_tolerance(111.0, 100.0, '0.1', True)
self.assertTrue(result)
result = compare_with_tolerance(112.0, 100.0, '0.1', True)
self.assertFalse(result)
# Test absolute tolerance (float)
result = compare_with_tolerance(109.9, 100.0, 10.0, False)
self.assertTrue(result)
result = compare_with_tolerance(110.1, 100.0, 10.0, False)
self.assertFalse(result)
# Test relative tolerance (float)
result = compare_with_tolerance(111.0, 100.0, 0.1, True)
self.assertTrue(result)
result = compare_with_tolerance(112.0, 100.0, 0.1, True)
self.assertFalse(result)
##### Infinite values #####
infinity = float('Inf')
# Test relative tolerance (float)
result = compare_with_tolerance(infinity, 100.0, 1.0, True)
self.assertFalse(result)
result = compare_with_tolerance(100.0, infinity, 1.0, True)
self.assertFalse(result)
result = compare_with_tolerance(infinity, infinity, 1.0, True)
self.assertTrue(result)
# Test absolute tolerance (float)
result = compare_with_tolerance(infinity, 100.0, 1.0, False)
self.assertFalse(result)
result = compare_with_tolerance(100.0, infinity, 1.0, False)
self.assertFalse(result)
result = compare_with_tolerance(infinity, infinity, 1.0, False)
self.assertTrue(result)
# Test relative tolerance (string)
result = compare_with_tolerance(infinity, 100.0, '1.0', True)
self.assertFalse(result)
result = compare_with_tolerance(100.0, infinity, '1.0', True)
self.assertFalse(result)
result = compare_with_tolerance(infinity, infinity, '1.0', True)
self.assertTrue(result)
# Test absolute tolerance (string)
result = compare_with_tolerance(infinity, 100.0, '1.0', False)
self.assertFalse(result)
result = compare_with_tolerance(100.0, infinity, '1.0', False)
self.assertFalse(result)
result = compare_with_tolerance(infinity, infinity, '1.0', False)
self.assertTrue(result)
# Test absolute tolerance for smaller values
result = compare_with_tolerance(100.01, 100.0, 0.01, False)
self.assertTrue(result)
result = compare_with_tolerance(100.001, 100.0, 0.001, False)
self.assertTrue(result)
result = compare_with_tolerance(100.01, 100.0, '0.01%', False)
self.assertTrue(result)
result = compare_with_tolerance(100.002, 100.0, 0.001, False)
self.assertFalse(result)
result = compare_with_tolerance(0.4, 0.44, 0.01, False)
self.assertFalse(result)
result = compare_with_tolerance(100.01, 100.0, 0.010, False)
self.assertTrue(result)
# Test complex_number instructor_complex
result = compare_with_tolerance(0.4, complex(0.44, 0), 0.01, False)
self.assertFalse(result)
result = compare_with_tolerance(100.01, complex(100.0, 0), 0.010, False)
self.assertTrue(result)
result = compare_with_tolerance(110.1, complex(100.0, 0), '10.0', False)
self.assertFalse(result)
result = compare_with_tolerance(111.0, complex(100.0, 0), '10%', True)
self.assertTrue(result)
def test_sanitize_html(self):
"""
Test for html sanitization with bleach.
"""
allowed_tags = ['div', 'p', 'audio', 'pre', 'span']
for tag in allowed_tags:
queue_msg = "<{0}>Test message</{0}>".format(tag)
self.assertEqual(sanitize_html(queue_msg), queue_msg)
not_allowed_tag = 'script'
queue_msg = "<{0}>Test message</{0}>".format(not_allowed_tag)
expected = "<script>Test message</script>"
self.assertEqual(sanitize_html(queue_msg), expected)
def test_get_inner_html_from_xpath(self):
"""
Test for getting inner html as string from xpath node.
"""
xpath_node = etree.XML('<hint style="smtng">aa<a href="#">bb</a>cc</hint>')
self.assertEqual(get_inner_html_from_xpath(xpath_node), 'aa<a href="#">bb</a>cc')
| agpl-3.0 |
arauzoliver/uip-iiig2016-prog3 | FinalPC3/noteapp.py | 1 | 1122 | import bottle
import pymongo
import book
"""
Ruta por defecto para el index
"""
@bottle.route('/')
def book_index():
mynames_list = book.find_names()
return bottle.template('index', dict(mynames = mynames_list))
"""
Postea las nuevas entrada para ser insertadas a MongoDB
"""
@bottle.route('/newguest', method='POST')
def insert_newguest():
name = bottle.request.forms.get("name")
email = bottle.request.forms.get("email")
book.insert_name(name,email)
bottle.redirect('/')
"""
Se configura la conexion de datos
"""
"""
Configura una conexion string al servidor local
"""
connection_string = "mongodb://localhost"
"""
Gestiona la conexion entre MongoDB y PyMongo, PyMongo maneja nuestro pool
"""
connection = pymongo.MongoClient(connection_string)
#Now we want to set a context to the names database we created using the mongo interactive shell
"""
Enviamos la base de datos de nombres al shell de mongo
"""
database = connection.names
"""
Se adjunta la data u objeto
"""
book = book.book(database)
bottle.debug(True)
bottle.run(host='localhost', port=8082) | mit |
mmckinst/pykickstart | tests/commands/sshpw.py | 3 | 4586 | #
# Peter Jones <pjones@redhat.com>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest, CommandSequenceTest
from pykickstart.errors import KickstartParseError, KickstartValueError
class F13_TestCase(CommandTest):
command = "sshpw"
def runTest(self):
# pass
self.assert_parse("sshpw --username=someguy --iscrypted secrethandshake", "sshpw --username=someguy --iscrypted secrethandshake\n")
self.assertFalse(self.assert_parse("sshpw --username=A --iscrypted secrethandshake") == None)
self.assertTrue(self.assert_parse("sshpw --username=A --iscrypted secrethandshake") != \
self.assert_parse("sshpw --username=B --iscrypted secrethandshake"))
self.assertFalse(self.assert_parse("sshpw --username=A --iscrypted secrethandshake") == \
self.assert_parse("sshpw --username=B --iscrypted secrethandshake"))
# fail
self.assert_parse_error("sshpw", KickstartValueError)
self.assert_parse_error("sshpw --username=someguy", KickstartValueError)
self.assert_parse_error("sshpw --username=someguy --iscrypted=OMGSEKRITZ", KickstartParseError)
self.assert_parse_error("sshpw --username=someguy --iscrypted", KickstartValueError)
# pass
self.assert_parse("sshpw --username=someguy --lock secrethandshake", "sshpw --username=someguy --lock --plaintext secrethandshake\n")
self.assert_parse("sshpw --username=someguy --plaintext secrethandshake", "sshpw --username=someguy --plaintext secrethandshake\n")
self.assert_parse("sshpw --username=someguy --plaintext --iscrypted secrethandshake", "sshpw --username=someguy --iscrypted secrethandshake\n")
self.assert_parse("sshpw --username=someguy --iscrypted --plaintext secrethandshake\n", "sshpw --username=someguy --plaintext secrethandshake\n")
self.assert_parse("sshpw --username=someguy --lock --plaintext secrethandshake", "sshpw --username=someguy --lock --plaintext secrethandshake\n")
self.assert_parse("sshpw --username=someguy --iscrypted --lock secrethandshake", "sshpw --username=someguy --lock --iscrypted secrethandshake\n")
self.assert_parse("sshpw --username=someguy --lock --iscrypted --plaintext secrethandshake", "sshpw --username=someguy --lock --plaintext secrethandshake\n")
self.assert_parse("sshpw --username=someguy --lock --plaintext --iscrypted secrethandshake", "sshpw --username=someguy --lock --iscrypted secrethandshake\n")
self.assert_parse("sshpw --username=someguy --plaintext --iscrypted --lock secrethandshake", "sshpw --username=someguy --lock --iscrypted secrethandshake\n")
self.assert_parse("sshpw --username=someguy --iscrypted --plaintext --lock secrethandshake", "sshpw --username=someguy --lock --plaintext secrethandshake\n")
# fail
self.assert_parse_error("sshpw --username=someguy --plaintext=ISEEENGLAND secrethandshake", KickstartParseError)
self.assert_parse_error("sshpw --username=someguy --lock=NOKEYSFORYOU secrethandshake", KickstartParseError)
self.assert_parse_error("sshpw --username=someguy --plaintext", KickstartValueError)
self.assert_parse_error("sshpw --username=someguy --lock", KickstartValueError)
class F13_Duplicate_TestCase(CommandSequenceTest):
def runTest(self):
self.assert_parse("""
sshpw --username=someguy --iscrypted passwordA
sshpw --username=otherguy --iscrypted passwordA""")
self.assert_parse_error("""
sshpw --username=someguy --iscrypted passwordA
sshpw --username=someguy --iscrypted passwordB""", UserWarning)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
lhuriguen/tophandball | utils/models.py | 1 | 1767 | import urllib
import json
from decimal import Decimal
from django.db import models
class Marker(models.Model):
"""
Abstract model that provides geocoding for models with address.
"""
address = models.CharField(max_length=200, blank=True,
help_text="Separate address items with commas.")
latitude = models.DecimalField(max_digits=8, decimal_places=6,
null=True, blank=True)
longitude = models.DecimalField(max_digits=9, decimal_places=6,
null=True, blank=True)
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
super(Marker, self).__init__(*args, **kwargs)
self._original_address = self.address
def save(self, *args, **kwargs):
if self._original_address != self.address:
self.latitude, self.longitude = 0, 0
if self.address and (not self.latitude or not self.longitude):
self.latitude, self.longitude = self.geocode(self.address)
# print self.latitude, self.longitude
super(Marker, self).save(*args, **kwargs)
def geocode(self, address):
address = urllib.quote_plus(address.encode('utf-8'))
base_url = "http://maps.googleapis.com/maps/api/geocode/json?"
request = base_url + "address=%s" % address
if self.country:
request += "®ion=%s" % self.country.code
data = json.loads(urllib.urlopen(request).read())
if data['status'] == 'OK':
latitude = data['results'][0]['geometry']['location']['lat']
longitude = data['results'][0]['geometry']['location']['lng']
return Decimal(latitude), Decimal(longitude)
return 0, 0
| mit |
zenodo/invenio | invenio/modules/workflows/worker_result.py | 15 | 2118 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Contain the AsynchronousResultWrapper class for asynchronous execution."""
from abc import abstractmethod, ABCMeta
from six import add_metaclass
@add_metaclass(ABCMeta)
class AsynchronousResultWrapper(object):
"""Wrap results from asynchronous results.
This class is an abstract class. When you inherit it you should
absolutely implement all the functions.
This class is here for two reason, get and unified interface for all
the worker and so allow to switch from one to another seamlessly,
and also add feature to functions.
For example the get method now allow a post processing
on the result.
"""
def __init__(self, asynchronousresult):
"""Instantiate a AsynchronousResultWrapper around a given result object.
:param asynchronousresult: the async result that you want to wrap.
"""
self.asyncresult = asynchronousresult
@abstractmethod
def get(self, postprocess=None):
"""Return the value of the process."""
return
@abstractmethod
def status(self):
"""Return the current status of the tasks."""
return
def uuid_to_workflow(uuid):
"""Return the workflow associated to an uuid."""
from invenio.modules.workflows.models import Workflow
return Workflow.query.filter(Workflow.uuid == uuid).first()
| gpl-2.0 |
litchfield/django | django/contrib/gis/geos/prototypes/errcheck.py | 486 | 2954 | """
Error checking functions for GEOS ctypes prototype functions.
"""
from ctypes import c_void_p, string_at
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.libgeos import GEOSFuncFactory
# Getting the `free` routine used to free the memory allocated for
# string pointers returned by GEOS.
free = GEOSFuncFactory('GEOSFree')
free.argtypes = [c_void_p]
def last_arg_byref(args):
"Returns the last C argument's value by reference."
return args[-1]._obj.value
def check_dbl(result, func, cargs):
"Checks the status code and returns the double value passed in by reference."
# Checking the status code
if result != 1:
return None
# Double passed in by reference, return its value.
return last_arg_byref(cargs)
def check_geom(result, func, cargs):
"Error checking on routines that return Geometries."
if not result:
raise GEOSException('Error encountered checking Geometry returned from GEOS C function "%s".' % func.__name__)
return result
def check_minus_one(result, func, cargs):
"Error checking on routines that should not return -1."
if result == -1:
raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__)
else:
return result
def check_predicate(result, func, cargs):
"Error checking for unary/binary predicate functions."
val = ord(result) # getting the ordinal from the character
if val == 1:
return True
elif val == 0:
return False
else:
raise GEOSException('Error encountered on GEOS C predicate function "%s".' % func.__name__)
def check_sized_string(result, func, cargs):
"""
Error checking for routines that return explicitly sized strings.
This frees the memory allocated by GEOS at the result pointer.
"""
if not result:
raise GEOSException('Invalid string pointer returned by GEOS C function "%s"' % func.__name__)
# A c_size_t object is passed in by reference for the second
# argument on these routines, and its needed to determine the
# correct size.
s = string_at(result, last_arg_byref(cargs))
# Freeing the memory allocated within GEOS
free(result)
return s
def check_string(result, func, cargs):
"""
Error checking for routines that return strings.
This frees the memory allocated by GEOS at the result pointer.
"""
if not result:
raise GEOSException('Error encountered checking string return value in GEOS C function "%s".' % func.__name__)
# Getting the string value at the pointer address.
s = string_at(result)
# Freeing the memory allocated within GEOS
free(result)
return s
def check_zero(result, func, cargs):
"Error checking on routines that should not return 0."
if result == 0:
raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__)
else:
return result
| bsd-3-clause |
exu/poligon | python/python_koans/python3/koans/about_none.py | 79 | 1473 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutNil in the Ruby Koans
#
from runner.koan import *
class AboutNone(Koan):
def test_none_is_an_object(self):
"Unlike NULL in a lot of languages"
self.assertEqual(__, isinstance(None, object))
def test_none_is_universal(self):
"There is only one None"
self.assertEqual(____, None is None)
def test_what_exception_do_you_get_when_calling_nonexistent_methods(self):
"""
What is the Exception that is thrown when you call a method that does
not exist?
Hint: launch python command console and try the code in the block below.
Don't worry about what 'try' and 'except' do, we'll talk about this later
"""
try:
None.some_method_none_does_not_know_about()
except Exception as ex:
ex2 = ex
# What exception has been caught?
#
# Need a recap on how to evaluate __class__ attributes?
#
# http://bit.ly/__class__
self.assertEqual(__, ex2.__class__)
# What message was attached to the exception?
# (HINT: replace __ with part of the error message.)
self.assertRegexpMatches(ex2.args[0], __)
def test_none_is_distinct(self):
"""
None is distinct from other things which are False.
"""
self.assertEqual(__, None is not 0)
self.assertEqual(__, None is not False)
| mit |
maurofaccenda/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_igw.py | 42 | 4772 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: ec2_vpc_igw
short_description: Manage an AWS VPC Internet gateway
description:
- Manage an AWS VPC Internet gateway
version_added: "2.0"
author: Robert Estelle (@erydo)
options:
vpc_id:
description:
- The VPC ID for the VPC in which to manage the Internet Gateway.
required: true
default: null
state:
description:
- Create or terminate the IGW
required: false
default: present
choices: [ 'present', 'absent' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Ensure that the VPC has an Internet Gateway.
# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc.
ec2_vpc_igw:
vpc_id: vpc-abcdefgh
state: present
register: igw
'''
try:
import boto.ec2
import boto.vpc
from boto.exception import EC2ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
if __name__ != '__main__':
raise
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
class AnsibleIGWException(Exception):
pass
def ensure_igw_absent(vpc_conn, vpc_id, check_mode):
igws = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc_id})
if not igws:
return {'changed': False}
if check_mode:
return {'changed': True}
for igw in igws:
try:
vpc_conn.detach_internet_gateway(igw.id, vpc_id)
vpc_conn.delete_internet_gateway(igw.id)
except EC2ResponseError as e:
raise AnsibleIGWException(
'Unable to delete Internet Gateway, error: {0}'.format(e))
return {'changed': True}
def ensure_igw_present(vpc_conn, vpc_id, check_mode):
igws = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc_id})
if len(igws) > 1:
raise AnsibleIGWException(
'EC2 returned more than one Internet Gateway for VPC {0}, aborting'
.format(vpc_id))
if igws:
return {'changed': False, 'gateway_id': igws[0].id}
else:
if check_mode:
return {'changed': True, 'gateway_id': None}
try:
igw = vpc_conn.create_internet_gateway()
vpc_conn.attach_internet_gateway(igw.id, vpc_id)
return {'changed': True, 'gateway_id': igw.id}
except EC2ResponseError as e:
raise AnsibleIGWException(
'Unable to create Internet Gateway, error: {0}'.format(e))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
vpc_id = dict(required=True),
state = dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
vpc_id = module.params.get('vpc_id')
state = module.params.get('state', 'present')
try:
if state == 'present':
result = ensure_igw_present(connection, vpc_id, check_mode=module.check_mode)
elif state == 'absent':
result = ensure_igw_absent(connection, vpc_id, check_mode=module.check_mode)
except AnsibleIGWException as e:
module.fail_json(msg=str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
chop-dbhi/django-concerns | concerns/migrations/0004_auto__chg_field_concern_reporter.py | 1 | 4938 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Concern.reporter'
db.alter_column('concerns_concern', 'reporter_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['auth.User']))
def backwards(self, orm):
# Changing field 'Concern.reporter'
db.alter_column('concerns_concern', 'reporter_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['auth.User']))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'concerns.concern': {
'Meta': {'ordering': "('created',)", 'object_name': 'Concern'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'document': ('django.db.models.fields.TextField', [], {}),
'headers': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'reporter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reported_concerns'", 'null': 'True', 'to': "orm['auth.User']"}),
'resolution': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resolver': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'resolved_conerns'", 'null': 'True', 'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'New'", 'max_length': '100'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['concerns']
| bsd-2-clause |
larsmans/scipy | scipy/spatial/tests/test__procrustes.py | 102 | 5132 | from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import (TestCase, run_module_suite, assert_allclose,
assert_equal, assert_almost_equal, assert_raises)
from scipy.spatial import procrustes
class ProcrustesTests(TestCase):
def setUp(self):
"""creates inputs"""
# an L
self.data1 = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
# a larger, shifted, mirrored L
self.data2 = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
# an L shifted up 1, right 1, and with point 4 shifted an extra .5
# to the right
# pointwise distance disparity with data1: 3*(2) + (1 + 1.5^2)
self.data3 = np.array([[2, 4], [2, 3], [2, 2], [3, 2.5]], 'd')
# data4, data5 are standardized (trace(A*A') = 1).
# procrustes should return an identical copy if they are used
# as the first matrix argument.
shiftangle = np.pi / 8
self.data4 = np.array([[1, 0], [0, 1], [-1, 0],
[0, -1]], 'd') / np.sqrt(4)
self.data5 = np.array([[np.cos(shiftangle), np.sin(shiftangle)],
[np.cos(np.pi / 2 - shiftangle),
np.sin(np.pi / 2 - shiftangle)],
[-np.cos(shiftangle),
-np.sin(shiftangle)],
[-np.cos(np.pi / 2 - shiftangle),
-np.sin(np.pi / 2 - shiftangle)]],
'd') / np.sqrt(4)
def test_procrustes(self):
# tests procrustes' ability to match two matrices.
#
# the second matrix is a rotated, shifted, scaled, and mirrored version
# of the first, in two dimensions only
#
# can shift, mirror, and scale an 'L'?
a, b, disparity = procrustes(self.data1, self.data2)
assert_allclose(b, a)
assert_almost_equal(disparity, 0.)
# if first mtx is standardized, leaves first mtx unchanged?
m4, m5, disp45 = procrustes(self.data4, self.data5)
assert_equal(m4, self.data4)
# at worst, data3 is an 'L' with one point off by .5
m1, m3, disp13 = procrustes(self.data1, self.data3)
#self.assertTrue(disp13 < 0.5 ** 2)
def test_procrustes2(self):
# procrustes disparity should not depend on order of matrices
m1, m3, disp13 = procrustes(self.data1, self.data3)
m3_2, m1_2, disp31 = procrustes(self.data3, self.data1)
assert_almost_equal(disp13, disp31)
# try with 3d, 8 pts per
rand1 = np.array([[2.61955202, 0.30522265, 0.55515826],
[0.41124708, -0.03966978, -0.31854548],
[0.91910318, 1.39451809, -0.15295084],
[2.00452023, 0.50150048, 0.29485268],
[0.09453595, 0.67528885, 0.03283872],
[0.07015232, 2.18892599, -1.67266852],
[0.65029688, 1.60551637, 0.80013549],
[-0.6607528, 0.53644208, 0.17033891]])
rand3 = np.array([[0.0809969, 0.09731461, -0.173442],
[-1.84888465, -0.92589646, -1.29335743],
[0.67031855, -1.35957463, 0.41938621],
[0.73967209, -0.20230757, 0.52418027],
[0.17752796, 0.09065607, 0.29827466],
[0.47999368, -0.88455717, -0.57547934],
[-0.11486344, -0.12608506, -0.3395779],
[-0.86106154, -0.28687488, 0.9644429]])
res1, res3, disp13 = procrustes(rand1, rand3)
res3_2, res1_2, disp31 = procrustes(rand3, rand1)
assert_almost_equal(disp13, disp31)
def test_procrustes_shape_mismatch(self):
assert_raises(ValueError, procrustes,
np.array([[1, 2], [3, 4]]),
np.array([[5, 6, 7], [8, 9, 10]]))
def test_procrustes_empty_rows_or_cols(self):
empty = np.array([[]])
assert_raises(ValueError, procrustes, empty, empty)
def test_procrustes_no_variation(self):
assert_raises(ValueError, procrustes,
np.array([[42, 42], [42, 42]]),
np.array([[45, 45], [45, 45]]))
def test_procrustes_bad_number_of_dimensions(self):
# fewer dimensions in one dataset
assert_raises(ValueError, procrustes,
np.array([1, 1, 2, 3, 5, 8]),
np.array([[1, 2], [3, 4]]))
# fewer dimensions in both datasets
assert_raises(ValueError, procrustes,
np.array([1, 1, 2, 3, 5, 8]),
np.array([1, 1, 2, 3, 5, 8]))
# zero dimensions
assert_raises(ValueError, procrustes, np.array(7), np.array(11))
# extra dimensions
assert_raises(ValueError, procrustes,
np.array([[[11], [7]]]),
np.array([[[5, 13]]]))
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
jollyroger/debian-buildbot | buildbot/db/sourcestampsets.py | 2 | 1255 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.db import base
class SourceStampSetsConnectorComponent(base.DBConnectorComponent):
# Documentation is in developer/database.rst
def addSourceStampSet(self):
def thd(conn):
# insert the sourcestampset. sourcestampset has no attributes, but
# inserting a new row results in a new setid
r = conn.execute(self.db.model.sourcestampsets.insert(), dict())
sourcestampsetid = r.inserted_primary_key[0]
return sourcestampsetid
return self.db.pool.do(thd)
| gpl-2.0 |
temasek/android_external_chromium_org | third_party/jinja2/constants.py | 1169 | 1626 | # -*- coding: utf-8 -*-
"""
jinja.constants
~~~~~~~~~~~~~~~
Various constants.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
#: list of lorem ipsum words used by the lipsum() helper function
LOREM_IPSUM_WORDS = u'''\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
penatibus per pharetra phasellus placerat platea porta porttitor posuere
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
viverra volutpat vulputate'''
| bsd-3-clause |
JP-Ellis/django-select2-forms | select2/views.py | 1 | 6241 | import copy
import json
from django.apps import apps
from django.db import models
from django.forms.models import ModelChoiceIterator
from django.http import HttpResponse
from django.utils.encoding import force_text
import logging
logger = logging.getLogger(__name__)
class ViewException(Exception):
pass
class InvalidParameter(ViewException):
pass
class JsonResponse(HttpResponse):
callback = None
def __init__(self, content='', callback=None, content_type="application/json", *args, **kwargs):
if not isinstance(content, str):
content = json.dumps(content)
if callback is not None:
self.callback = callback
if self.callback is not None:
content = u"%s(\n%s\n)" % (self.callback, content)
content_type = "text/javascript"
return super(JsonResponse, self).__init__(
content=content,
content_type=content_type,
*args,
**kwargs)
class Select2View(object):
def __init__(self, app_label, model_name, field_name):
self.app_label = app_label
self.model_name = model_name
self.field_name = field_name
_field = None
def get_field_and_model(self):
model_cls = apps.get_model(self.app_label, self.model_name)
if model_cls is None:
raise ViewException('Model %s.%s does not exist' % (self.app_label, self.model_name))
if self._field is None:
self._field = model_cls._meta.get_field(self.field_name)
return self._field, model_cls
def get_response(self, data, request, **kwargs):
callback = request.GET.get('callback', None)
if callback is None:
response_cls = JsonResponse
else:
response_cls = type('JsonpResponse', (JsonResponse,), {
'callback': callback,
})
return response_cls(data, **kwargs)
def get_data(self, queryset, page=None, page_limit=None):
field, model_cls = self.get_field_and_model()
# Check for the existences of a callable %s_queryset method on the
# model class and use it to filter the Select2 queryset.
#
# This is useful for model inheritance where the limit_choices_to can
# not easily be overriden in child classes.
model_queryset_method = '%s_queryset' % field.name
if callable(getattr(model_cls, model_queryset_method, None)):
queryset = getattr(model_cls, model_queryset_method)(queryset)
formfield = field.formfield()
total_count = None
if page is not None and page_limit is not None:
total_count = queryset.count()
offset = (page - 1) * page_limit
end = offset + page_limit
queryset = queryset[offset:end]
else:
offset = None
formfield.queryset = queryset
iterator = ModelChoiceIterator(formfield)
if offset is None:
total_count = len(iterator)
more = False
else:
paged_count = offset + len(iterator)
more = bool(paged_count < total_count)
data = {
'total': total_count,
'more': more,
'results': [],
}
for value, label in iterator:
if value is u'':
continue
data['results'].append({
'id': value,
'text': label,
})
return data
def init_selection(self, pks, is_multiple=False):
field, model_cls = self.get_field_and_model()
pks = [int(pk) for pk in pks]
queryset = field.queryset.filter(**{
('{}__in'.format(field.rel.get_related_field().name)): pks,
}).distinct()
pk_ordering = dict([(force_text(pk), i) for i, pk in enumerate(pks)])
data = self.get_data(queryset)
# Make sure we return in the same order we were passed
def results_sort_callback(item):
pk = force_text(item['id'])
return pk_ordering[pk]
data['results'] = sorted(data['results'], key=results_sort_callback)
return data['results']
def fetch_items(self, request):
try:
field, model_cls = self.get_field_and_model()
except ViewException as e:
return self.get_response({'error': str(e)}, request, status=500)
queryset = copy.deepcopy(field.queryset)
q = request.GET.get('q', None)
page_limit = request.GET.get('page_limit', 10)
page = request.GET.get('page', 1)
try:
if q is None:
return self.get_response({"results": [], "total": 0, "more": False}, request)
try:
page_limit = int(page_limit)
except TypeError:
raise InvalidParameter("Invalid page_limit '%s' passed" % page_limit)
else:
if page_limit < 1:
raise InvalidParameter("Invalid page_limit '%s' passed" % page_limit)
try:
page = int(page)
except TypeError:
raise InvalidParameter("Invalid page '%s' passed")
else:
if page < 1:
raise InvalidParameter("Invalid page '%s' passed")
except InvalidParameter as e:
return self.get_response({'error': str(e)}, request, status=500)
search_field = field.search_field
if callable(search_field):
search_field = search_field(q)
if isinstance(search_field, models.Q):
q_obj = search_field
else:
qset_contains_filter_key = '%(search_field)s__%(insensitive)scontains' % {
'search_field': search_field,
'insensitive': 'i' if not field.case_sensitive else '',
}
q_obj = models.Q(**{qset_contains_filter_key: q})
queryset = queryset.filter(q_obj)
data = self.get_data(queryset, page, page_limit)
return self.get_response(data, request)
def fetch_items(request, app_label, model_name, field_name):
view_cls = Select2View(app_label, model_name, field_name)
return view_cls.fetch_items(request)
| bsd-2-clause |
mnahm5/django-estore | Lib/encodings/shift_jis_2004.py | 816 | 1059 | #
# shift_jis_2004.py: Python Unicode Codec for SHIFT_JIS_2004
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jis_2004')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jis_2004',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mit |
zhuwenping/python-for-android | python-build/python-libs/gdata/src/gdata/tlslite/utils/Cryptlib_AES.py | 359 | 1364 | """Cryptlib AES implementation."""
from cryptomath import *
from AES import *
if cryptlibpyLoaded:
def new(key, mode, IV):
return Cryptlib_AES(key, mode, IV)
class Cryptlib_AES(AES):
def __init__(self, key, mode, IV):
AES.__init__(self, key, mode, IV, "cryptlib")
self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_AES)
cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_MODE, cryptlib_py.CRYPT_MODE_CBC)
cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key))
cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key)
cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_IV, IV)
def __del__(self):
cryptlib_py.cryptDestroyContext(self.context)
def encrypt(self, plaintext):
AES.encrypt(self, plaintext)
bytes = stringToBytes(plaintext)
cryptlib_py.cryptEncrypt(self.context, bytes)
return bytesToString(bytes)
def decrypt(self, ciphertext):
AES.decrypt(self, ciphertext)
bytes = stringToBytes(ciphertext)
cryptlib_py.cryptDecrypt(self.context, bytes)
return bytesToString(bytes)
| apache-2.0 |
joopert/home-assistant | homeassistant/components/wink/climate.py | 2 | 16343 | """Support for Wink thermostats and Air Conditioners."""
import logging
import pywink
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
FAN_ON,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_ECO,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
SUPPORT_PRESET_MODE,
PRESET_NONE,
)
from homeassistant.const import ATTR_TEMPERATURE, PRECISION_TENTHS, TEMP_CELSIUS
from homeassistant.helpers.temperature import display_temp as show_temp
from . import DOMAIN, WinkDevice
_LOGGER = logging.getLogger(__name__)
ATTR_ECO_TARGET = "eco_target"
ATTR_EXTERNAL_TEMPERATURE = "external_temperature"
ATTR_OCCUPIED = "occupied"
ATTR_SCHEDULE_ENABLED = "schedule_enabled"
ATTR_SMART_TEMPERATURE = "smart_temperature"
ATTR_TOTAL_CONSUMPTION = "total_consumption"
HA_HVAC_TO_WINK = {
HVAC_MODE_AUTO: "auto",
HVAC_MODE_COOL: "cool_only",
HVAC_MODE_FAN_ONLY: "fan_only",
HVAC_MODE_HEAT: "heat_only",
HVAC_MODE_OFF: "off",
}
WINK_HVAC_TO_HA = {value: key for key, value in HA_HVAC_TO_WINK.items()}
SUPPORT_FLAGS_THERMOSTAT = (
SUPPORT_TARGET_TEMPERATURE
| SUPPORT_TARGET_TEMPERATURE_RANGE
| SUPPORT_FAN_MODE
| SUPPORT_AUX_HEAT
)
SUPPORT_FAN_THERMOSTAT = [FAN_AUTO, FAN_ON]
SUPPORT_PRESET_THERMOSTAT = [PRESET_AWAY, PRESET_ECO]
SUPPORT_FLAGS_AC = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE | SUPPORT_PRESET_MODE
SUPPORT_FAN_AC = [FAN_HIGH, FAN_LOW, FAN_MEDIUM]
SUPPORT_PRESET_AC = [PRESET_NONE, PRESET_ECO]
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink climate devices."""
for climate in pywink.get_thermostats():
_id = climate.object_id() + climate.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkThermostat(climate, hass)])
for climate in pywink.get_air_conditioners():
_id = climate.object_id() + climate.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkAC(climate, hass)])
class WinkThermostat(WinkDevice, ClimateDevice):
"""Representation of a Wink thermostat."""
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_THERMOSTAT
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]["entities"]["climate"].append(self)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
# The Wink API always returns temp in Celsius
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the optional device state attributes."""
data = {}
if self.external_temperature is not None:
data[ATTR_EXTERNAL_TEMPERATURE] = show_temp(
self.hass,
self.external_temperature,
self.temperature_unit,
PRECISION_TENTHS,
)
if self.smart_temperature:
data[ATTR_SMART_TEMPERATURE] = self.smart_temperature
if self.occupied is not None:
data[ATTR_OCCUPIED] = self.occupied
if self.eco_target is not None:
data[ATTR_ECO_TARGET] = self.eco_target
return data
@property
def current_temperature(self):
"""Return the current temperature."""
return self.wink.current_temperature()
@property
def current_humidity(self):
"""Return the current humidity."""
if self.wink.current_humidity() is not None:
# The API states humidity will be a float 0-1
# the only example API response with humidity listed show an int
# This will address both possibilities
if self.wink.current_humidity() < 1:
return self.wink.current_humidity() * 100
return self.wink.current_humidity()
return None
@property
def external_temperature(self):
"""Return the current external temperature."""
return self.wink.current_external_temperature()
@property
def smart_temperature(self):
"""Return the current average temp of all remote sensor."""
return self.wink.current_smart_temperature()
@property
def eco_target(self):
"""Return status of eco target (Is the thermostat in eco mode)."""
return self.wink.eco_target()
@property
def occupied(self):
"""Return status of if the thermostat has detected occupancy."""
return self.wink.occupied()
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
mode = self.wink.current_hvac_mode()
if mode == "eco":
return PRESET_ECO
if self.wink.away():
return PRESET_AWAY
return None
@property
def preset_modes(self):
"""Return a list of available preset modes."""
return SUPPORT_PRESET_THERMOSTAT
@property
def target_humidity(self):
"""Return the humidity we try to reach."""
target_hum = None
if self.wink.current_humidifier_mode() == "on":
if self.wink.current_humidifier_set_point() is not None:
target_hum = self.wink.current_humidifier_set_point() * 100
elif self.wink.current_dehumidifier_mode() == "on":
if self.wink.current_dehumidifier_set_point() is not None:
target_hum = self.wink.current_dehumidifier_set_point() * 100
else:
target_hum = None
return target_hum
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.hvac_mode != HVAC_MODE_AUTO and not self.wink.away():
if self.hvac_mode == HVAC_MODE_COOL:
return self.wink.current_max_set_point()
if self.hvac_mode == HVAC_MODE_HEAT:
return self.wink.current_min_set_point()
return None
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_AUTO:
return self.wink.current_min_set_point()
return None
@property
def target_temperature_high(self):
"""Return the higher bound temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_AUTO:
return self.wink.current_max_set_point()
return None
@property
def is_aux_heat(self):
"""Return true if aux heater."""
if "aux" not in self.wink.hvac_modes():
return None
if self.wink.current_hvac_mode() == "aux":
return True
return False
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
if not self.wink.is_on():
return HVAC_MODE_OFF
wink_mode = self.wink.current_hvac_mode()
if wink_mode == "aux":
return HVAC_MODE_HEAT
if wink_mode == "eco":
return HVAC_MODE_AUTO
return WINK_HVAC_TO_HA.get(wink_mode)
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
hvac_list = [HVAC_MODE_OFF]
modes = self.wink.hvac_modes()
for mode in modes:
if mode in ("eco", "aux"):
continue
try:
ha_mode = WINK_HVAC_TO_HA[mode]
hvac_list.append(ha_mode)
except KeyError:
_LOGGER.error(
"Invalid operation mode mapping. %s doesn't map. "
"Please report this.",
mode,
)
return hvac_list
@property
def hvac_action(self):
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
if not self.wink.is_on():
return CURRENT_HVAC_OFF
if self.wink.cool_on():
return CURRENT_HVAC_COOL
if self.wink.heat_on():
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if target_temp is not None:
if self.hvac_mode == HVAC_MODE_COOL:
target_temp_high = target_temp
if self.hvac_mode == HVAC_MODE_HEAT:
target_temp_low = target_temp
self.wink.set_temperature(target_temp_low, target_temp_high)
def set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
hvac_mode_to_set = HA_HVAC_TO_WINK.get(hvac_mode)
self.wink.set_operation_mode(hvac_mode_to_set)
def set_preset_mode(self, preset_mode):
"""Set new preset mode."""
# Away
if preset_mode != PRESET_AWAY and self.wink.away():
self.wink.set_away_mode(False)
elif preset_mode == PRESET_AWAY:
self.wink.set_away_mode()
if preset_mode == PRESET_ECO:
self.wink.set_operation_mode("eco")
@property
def fan_mode(self):
"""Return whether the fan is on."""
if self.wink.current_fan_mode() == "on":
return FAN_ON
if self.wink.current_fan_mode() == "auto":
return FAN_AUTO
# No Fan available so disable slider
return None
@property
def fan_modes(self):
"""List of available fan modes."""
if self.wink.has_fan():
return SUPPORT_FAN_THERMOSTAT
return None
def set_fan_mode(self, fan_mode):
"""Turn fan on/off."""
self.wink.set_fan_mode(fan_mode.lower())
def turn_aux_heat_on(self):
"""Turn auxiliary heater on."""
self.wink.set_operation_mode("aux")
def turn_aux_heat_off(self):
"""Turn auxiliary heater off."""
self.wink.set_operation_mode("heat_only")
@property
def min_temp(self):
"""Return the minimum temperature."""
minimum = 7 # Default minimum
min_min = self.wink.min_min_set_point()
min_max = self.wink.min_max_set_point()
if self.hvac_mode == HVAC_MODE_HEAT:
if min_min:
return_value = min_min
else:
return_value = minimum
elif self.hvac_mode == HVAC_MODE_COOL:
if min_max:
return_value = min_max
else:
return_value = minimum
elif self.hvac_mode == HVAC_MODE_AUTO:
if min_min and min_max:
return_value = min(min_min, min_max)
else:
return_value = minimum
else:
return_value = minimum
return return_value
@property
def max_temp(self):
"""Return the maximum temperature."""
maximum = 35 # Default maximum
max_min = self.wink.max_min_set_point()
max_max = self.wink.max_max_set_point()
if self.hvac_mode == HVAC_MODE_HEAT:
if max_min:
return_value = max_min
else:
return_value = maximum
elif self.hvac_mode == HVAC_MODE_COOL:
if max_max:
return_value = max_max
else:
return_value = maximum
elif self.hvac_mode == HVAC_MODE_AUTO:
if max_min and max_max:
return_value = min(max_min, max_max)
else:
return_value = maximum
else:
return_value = maximum
return return_value
class WinkAC(WinkDevice, ClimateDevice):
"""Representation of a Wink air conditioner."""
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_AC
@property
def temperature_unit(self):
"""Return the unit of measurement."""
# The Wink API always returns temp in Celsius
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the optional device state attributes."""
data = {}
data[ATTR_TOTAL_CONSUMPTION] = self.wink.total_consumption()
data[ATTR_SCHEDULE_ENABLED] = self.wink.schedule_enabled()
return data
@property
def current_temperature(self):
"""Return the current temperature."""
return self.wink.current_temperature()
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
if not self.wink.is_on():
return PRESET_NONE
mode = self.wink.current_mode()
if mode == "auto_eco":
return PRESET_ECO
return PRESET_NONE
@property
def preset_modes(self):
"""Return a list of available preset modes."""
return SUPPORT_PRESET_AC
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
if not self.wink.is_on():
return HVAC_MODE_OFF
wink_mode = self.wink.current_mode()
if wink_mode == "auto_eco":
return HVAC_MODE_COOL
return WINK_HVAC_TO_HA.get(wink_mode)
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
hvac_list = [HVAC_MODE_OFF]
modes = self.wink.modes()
for mode in modes:
if mode == "auto_eco":
continue
try:
ha_mode = WINK_HVAC_TO_HA[mode]
hvac_list.append(ha_mode)
except KeyError:
_LOGGER.error(
"Invalid operation mode mapping. %s doesn't map. "
"Please report this.",
mode,
)
return hvac_list
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
self.wink.set_temperature(target_temp)
def set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
hvac_mode_to_set = HA_HVAC_TO_WINK.get(hvac_mode)
self.wink.set_operation_mode(hvac_mode_to_set)
def set_preset_mode(self, preset_mode):
"""Set new preset mode."""
if preset_mode == PRESET_ECO:
self.wink.set_operation_mode("auto_eco")
elif self.hvac_mode == HVAC_MODE_COOL and preset_mode == PRESET_NONE:
self.set_hvac_mode(HVAC_MODE_COOL)
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.wink.current_max_set_point()
@property
def fan_mode(self):
"""
Return the current fan mode.
The official Wink app only supports 3 modes [low, medium, high]
which are equal to [0.33, 0.66, 1.0] respectively.
"""
speed = self.wink.current_fan_speed()
if speed <= 0.33:
return FAN_LOW
if speed <= 0.66:
return FAN_MEDIUM
return FAN_HIGH
@property
def fan_modes(self):
"""Return a list of available fan modes."""
return SUPPORT_FAN_AC
def set_fan_mode(self, fan_mode):
"""
Set fan speed.
The official Wink app only supports 3 modes [low, medium, high]
which are equal to [0.33, 0.66, 1.0] respectively.
"""
if fan_mode == FAN_LOW:
speed = 0.33
elif fan_mode == FAN_MEDIUM:
speed = 0.66
elif fan_mode == FAN_HIGH:
speed = 1.0
self.wink.set_ac_fan_speed(speed)
| apache-2.0 |
MounirMesselmeni/django | django/db/migrations/operations/special.py | 374 | 7425 | from __future__ import unicode_literals
from django.db import router
from .base import Operation
class SeparateDatabaseAndState(Operation):
"""
Takes two lists of operations - ones that will be used for the database,
and ones that will be used for the state change. This allows operations
that don't support state change to have it applied, or have operations
that affect the state or not the database, or so on.
"""
serialization_expand_args = ['database_operations', 'state_operations']
def __init__(self, database_operations=None, state_operations=None):
self.database_operations = database_operations or []
self.state_operations = state_operations or []
def deconstruct(self):
kwargs = {}
if self.database_operations:
kwargs['database_operations'] = self.database_operations
if self.state_operations:
kwargs['state_operations'] = self.state_operations
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
# We calculate state separately in here since our state functions aren't useful
for database_operation in self.database_operations:
to_state = from_state.clone()
database_operation.state_forwards(app_label, to_state)
database_operation.database_forwards(app_label, schema_editor, from_state, to_state)
from_state = to_state
def database_backwards(self, app_label, schema_editor, from_state, to_state):
# We calculate state separately in here since our state functions aren't useful
base_state = to_state
for pos, database_operation in enumerate(reversed(self.database_operations)):
to_state = base_state.clone()
for dbop in self.database_operations[:-(pos + 1)]:
dbop.state_forwards(app_label, to_state)
from_state = base_state.clone()
database_operation.state_forwards(app_label, from_state)
database_operation.database_backwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Custom state/database change combination"
class RunSQL(Operation):
"""
Runs some raw SQL. A reverse SQL statement may be provided.
Also accepts a list of operations that represent the state change effected
by this SQL change, in case it's custom column/table creation/deletion.
"""
noop = ''
def __init__(self, sql, reverse_sql=None, state_operations=None, hints=None):
self.sql = sql
self.reverse_sql = reverse_sql
self.state_operations = state_operations or []
self.hints = hints or {}
def deconstruct(self):
kwargs = {
'sql': self.sql,
}
if self.reverse_sql is not None:
kwargs['reverse_sql'] = self.reverse_sql
if self.state_operations:
kwargs['state_operations'] = self.state_operations
if self.hints:
kwargs['hints'] = self.hints
return (
self.__class__.__name__,
[],
kwargs
)
@property
def reversible(self):
return self.reverse_sql is not None
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self._run_sql(schema_editor, self.sql)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_sql is None:
raise NotImplementedError("You cannot reverse this operation")
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self._run_sql(schema_editor, self.reverse_sql)
def describe(self):
return "Raw SQL operation"
def _run_sql(self, schema_editor, sqls):
if isinstance(sqls, (list, tuple)):
for sql in sqls:
params = None
if isinstance(sql, (list, tuple)):
elements = len(sql)
if elements == 2:
sql, params = sql
else:
raise ValueError("Expected a 2-tuple but got %d" % elements)
schema_editor.execute(sql, params=params)
elif sqls != RunSQL.noop:
statements = schema_editor.connection.ops.prepare_sql_script(sqls)
for statement in statements:
schema_editor.execute(statement, params=None)
class RunPython(Operation):
"""
Runs Python code in a context suitable for doing versioned ORM operations.
"""
reduces_to_sql = False
def __init__(self, code, reverse_code=None, atomic=True, hints=None):
self.atomic = atomic
# Forwards code
if not callable(code):
raise ValueError("RunPython must be supplied with a callable")
self.code = code
# Reverse code
if reverse_code is None:
self.reverse_code = None
else:
if not callable(reverse_code):
raise ValueError("RunPython must be supplied with callable arguments")
self.reverse_code = reverse_code
self.hints = hints or {}
def deconstruct(self):
kwargs = {
'code': self.code,
}
if self.reverse_code is not None:
kwargs['reverse_code'] = self.reverse_code
if self.atomic is not True:
kwargs['atomic'] = self.atomic
if self.hints:
kwargs['hints'] = self.hints
return (
self.__class__.__name__,
[],
kwargs
)
@property
def reversible(self):
return self.reverse_code is not None
def state_forwards(self, app_label, state):
# RunPython objects have no state effect. To add some, combine this
# with SeparateDatabaseAndState.
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
# We now execute the Python code in a context that contains a 'models'
# object, representing the versioned models as an app registry.
# We could try to override the global cache, but then people will still
# use direct imports, so we go with a documentation approach instead.
self.code(from_state.apps, schema_editor)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_code is None:
raise NotImplementedError("You cannot reverse this operation")
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self.reverse_code(from_state.apps, schema_editor)
def describe(self):
return "Raw Python operation"
@staticmethod
def noop(apps, schema_editor):
return None
| bsd-3-clause |
v1bri/gnuradio | gr-wxgui/python/wxgui/gui.py | 76 | 4565 | #
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import wx
from gnuradio import gr
#
# Top-level display panel with vertical box sizer. User does not create or
# subclass this class; rather, the user supplies his own class constructor
# that gets invoked with needed parameters.
#
class top_panel(wx.Panel):
def __init__(self, frame, top_block, gui, options, args):
wx.Panel.__init__(self, frame, -1)
vbox = wx.BoxSizer(wx.VERTICAL)
# Create the user's GUI class
if gui is not None:
self.gui = gui(frame, # Top-level window frame
self, # Parent class for user created windows
vbox, # Sizer for user to add windows to
top_block, # GUI-unaware flowgraph to manipulate
options, # Command-line options
args) # Command-line arguments
else:
# User hasn't made their own GUI, create our default
# We don't have a default GUI yet either :)
p = wx.Panel(self)
p.SetSize((640,480))
vbox.Add(p, 1, wx.EXPAND)
self.SetSizer(vbox)
self.SetAutoLayout(True)
vbox.Fit(self)
def shutdown(self):
try:
self.gui.shutdown()
except AttributeError:
pass
#
# Top-level window frame with menu and status bars.
#
class top_frame(wx.Frame):
def __init__ (self, top_block, gui, options, args,
title, nstatus, start, realtime):
wx.Frame.__init__(self, None, -1, title)
self.top_block = top_block
self.CreateStatusBar(nstatus)
mainmenu = wx.MenuBar()
self.SetMenuBar(mainmenu)
menu = wx.Menu()
item = menu.Append(200, 'E&xit', 'Exit Application') # FIXME magic ID
self.Bind(wx.EVT_MENU, self.OnCloseWindow, item)
mainmenu.Append(menu, "&File")
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
# Create main panel, creates user GUI class with supplied parameters
self.panel = top_panel(self, top_block, gui, options, args)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.panel, 1, wx.EXPAND)
self.SetSizer(vbox)
self.SetAutoLayout(True)
vbox.Fit(self)
if realtime:
if gr.enable_realtime_scheduling() != gr.RT_OK:
self.SetStatusText("Failed to enable realtime scheduling")
if start and self.top_block is not None:
self.top_block.start()
def OnCloseWindow(self, event):
# Give user API a chance to do something
self.panel.shutdown()
# Stop flowgraph as a convenience
self.SetStatusText("Ensuring flowgraph has completed before exiting...")
if self.top_block is not None:
self.top_block.stop()
self.top_block.wait()
self.Destroy()
#
# Top-level wxPython application object. User creates or subclasses this
# in their GUI script.
#
class app(wx.App):
def __init__ (self, top_block=None, gui=None, options=None, args=None,
title="GNU Radio", nstatus=1, start=False, realtime=False):
self.top_block = top_block
self.gui = gui
self.options = options
self.args = args
self.title = title
self.nstatus = nstatus
self.start = start
self.realtime = realtime
wx.App.__init__ (self, redirect=False)
def OnInit(self):
# Pass user parameters to top window frame
frame = top_frame(self.top_block, self.gui, self.options, self.args,
self.title, self.nstatus, self.start, self.realtime)
frame.Show(True)
self.SetTopWindow(frame)
return True
| gpl-3.0 |
marioaugustorama/yowsup | yowsup/layers/protocol_notifications/protocolentities/notification.py | 60 | 2064 | from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from yowsup.layers.protocol_receipts.protocolentities import OutgoingReceiptProtocolEntity
class NotificationProtocolEntity(ProtocolEntity):
'''
<notification offline="0" id="{{NOTIFICATION_ID}}" notify="{{NOTIFY_NAME}}" type="{{NOTIFICATION_TYPE}}"
t="{{TIMESTAMP}}" from="{{SENDER_JID}}">
</notification>
'''
def __init__(self, _type, _id, _from, timestamp, notify, offline):
super(NotificationProtocolEntity, self).__init__("notification")
self._type = _type
self._id = _id
self._from =_from
self.timestamp = int(timestamp)
self.notify = notify
self.offline = offline == "1"
def __str__(self):
out = "Notification\n"
out += "From: %s\n" % self.getFrom()
out += "Type: %s\n" % self.getType()
return out
def getFrom(self, full = True):
return self._from if full else self._from.split('@')[0]
def getType(self):
return self._type
def getId(self):
return self._id
def getTimestamp(self):
return self.timestamp
def toProtocolTreeNode(self):
attribs = {
"t" : str(self.timestamp),
"from" : self._from,
"offline" : "1" if self.offline else "0",
"type" : self._type,
"id" : self._id,
"notify" : self.notify
}
return self._createProtocolTreeNode(attribs, children = None, data = None)
def ack(self):
return OutgoingReceiptProtocolEntity(self.getId(), self.getFrom())
@staticmethod
def fromProtocolTreeNode(node):
return NotificationProtocolEntity(
node.getAttributeValue("type"),
node.getAttributeValue("id"),
node.getAttributeValue("from"),
node.getAttributeValue("t"),
node.getAttributeValue("notify"),
node.getAttributeValue("offline")
)
| gpl-3.0 |
bearstech/nuka | nuka/task.py | 1 | 20281 | # Copyright 2017 by Bearstech <py@bearstech.com>
#
# This file is part of nuka.
#
# nuka is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nuka is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nuka. If not, see <http://www.gnu.org/licenses/>.
import time
import base64
import codecs
import inspect
import asyncio
import logging
import importlib
import asyncssh.misc
from nuka.remote.task import RemoteTask
from nuka.configuration import config
from nuka import remote
from nuka import utils
from nuka import gpg
import nuka
class Base(asyncio.Future):
def __init__(self, **kwargs):
self.initialize(**kwargs)
super().__init__(loop=self.host.loop)
if self.host.cancelled():
self.cancel()
else:
self.process()
def initialize(self, host=None,
switch_user=None, switch_ssh_user=None, **args):
meta = {'filename': None, 'lineno': None,
'start': time.time(), 'times': [],
'remote_calls': [],
}
for infos in inspect.stack(2):
f = infos.frame
if isinstance(f.f_locals.get('self'), RemoteTask):
continue
if host is None:
host = f.f_locals.get('host')
if switch_user is None:
switch_user = f.f_locals.get('switch_user')
if switch_ssh_user is None:
switch_ssh_user = f.f_locals.get('switch_ssh_user')
if meta['filename'] is None:
filename = infos.filename
if filename.endswith('nuka/task.py'):
filename = 'nuka/task.py'
meta.update(filename=filename,
lineno=infos.lineno)
if host is not None:
break
if host is None: # pragma: no cover
raise RuntimeError('No valid host found in the stack')
self.switch_user = switch_user
self.switch_ssh_user = switch_ssh_user
self.meta = meta
self.host = host
self.loop = self.host.loop
self.args = args
self.res = {'changed': True, 'rc': 0}
self.start = time.time()
self.run_task = None
host.add_task(self)
def running(self):
"""return True if a remote task is running"""
if self.run_task is not None:
return not self.run_task.done()
return False
def process(self, fut=None):
if fut is not None: # pragma: no cover
# we waited for boot
self.meta['start'] = time.time()
start = time.time()
try:
self.pre_process()
except Exception as e:
self.host.log.exception(e)
self.cancel()
raise
else:
duration = time.time() - start
if duration > .05: # pragma: no cover
self.host.add_time(
start=start, time=duration,
type='pre_process', task=self)
self.run_task = self._loop.create_task(self._run())
def pre_process(self):
"""run locally before anything is sent to the host"""
def post_process(self):
"""run when we get a valid reply from the host"""
def render_template(self, fd):
"""render a template from a file descriptor:
.. code-block:: python
{'src': path, 'dst': path}
"""
src = fd['src']
ctx = dict(self.args, **self.args.get('ctx', {}))
ctx.update(host=self.host, env=config, **fd)
engine = config.get_template_engine()
template = engine.get_template(src)
fd['data'] = template.render(ctx)
if 'executable' not in fd:
fd['executable'] = utils.isexecutable(src)
def render_file(self, fd):
"""render a file from a file descriptor. A file descriptor is a dict:
.. code-block:: python
{'src': path, 'dst': path}
"""
src = fd['src']
if src.endswith('.gpg'):
_, data = gpg.decrypt(src, 'utf8')
elif src.endswith(utils.ARCHIVE_EXTS):
with open(src, 'rb',) as fd_:
data = fd_.read()
data = base64.b64encode(data).decode('utf8')
else:
with codecs.open(src, 'r', 'utf8') as fd_:
data = fd_.read()
fd['data'] = data
if 'executable' not in fd:
fd['executable'] = utils.isexecutable(src)
def log(self):
self.host.log.info(self)
def cancel(self):
"""cancel a task"""
if not self.cancelled():
super().cancel()
if not self.res.get('signal') and not self.host.failed():
# do not log cancellation if the user wanted it
self.log()
if self.run_task is not None:
self.run_task.cancel()
self.host.cancel()
async def _run(self):
# wrap the task to catch exception
try:
await self.run()
except Exception as e:
self.cancel()
if not isinstance(e, asyncio.CancelledError):
self.host.log.exception5(self)
# update meta
self.meta.update(self.res.pop('meta', {}))
# if task succeded then run post_process
start = time.time()
try:
self.post_process()
except Exception:
self.cancel()
self.host.log.exception5(self)
finally:
duration = time.time() - start
if duration > .05:
self.host.add_time(
start=start, time=duration,
type='post_process', task=self)
# set result / log stuff
if not self.done():
self.set_result(self)
self.meta.setdefault('time', time.time() - self.meta['start'])
self.host.add_time(type='task', task=self, **self.meta)
# log if not cancelled
if not self.cancelled():
self.log()
def __bool__(self):
return self.res.get('rc') == 0
def __repr__(self):
return '<{0}>'.format(str(self))
def __str__(self):
name = self.__class_name__()
instance_name = self.args.get('name')
if instance_name is None:
instance_name = '-'
s = '{0}({1})'.format(name, instance_name)
if self.res:
if self.res['rc'] == 0:
if self.cancelled():
s += ' cancelled at {filename}:{lineno}'.format(
**self.meta)
elif self.done() and getattr(self, 'changed', True):
s += ' changed'
else:
s += ' fail({0[rc]})'.format(self.res)
time = self.meta.get('local_time')
if time:
s += ' time({0}s)'.format(round(time, 1))
return s.strip()
def __class_name__(self):
klass = self.__class__
name = '{0}.{1}'.format(klass.__module__.split('.')[-1],
klass.__name__)
return name
class Task(Base, RemoteTask):
def process(self):
if self.host.cancelled():
self.cancel()
else:
diff_mode = self.args.get('diff_mode', nuka.cli.args.diff)
if diff_mode:
# ignore diff call if the task do not support it
attr = getattr(self, 'diff', None)
if attr in (None, False):
self.res['changed'] = False
self.meta['local_time'] = 0.
if attr is False:
self.host.log.info("{0}.diff is False".format(self))
else:
self.host.log.warning("{0}.diff is None".format(self))
self.set_result(self)
return
if self.host.fully_booted.done():
super().process()
else:
# use asyncio with callback since we are in a sync __init__
task = self.loop.create_task(wait_for_boot(self.host))
task.add_done_callback(super().process)
async def run(self):
"""Serialize the task, send it to the remote host.
The remote script will deserialize the task and run
:meth:`~nuka.remote.task.Task.do` (or diff() when using --diff)
"""
self.host.log.debug(self)
diff_mode = self.args.get('diff_mode', nuka.cli.args.diff)
klass = self.__class__
args = {}
for k, v in self.args.items():
if k not in ('ctx',):
args[k] = v
# prep stdin
stdin_data = dict(
task=(klass.__module__, klass.__name__),
remote_tmp=config['remote_tmp'],
switch_user=self.switch_user,
args=args,
check_mode=False,
diff_mode=diff_mode,
log_level=config['log']['levels']['remote_level'])
if config['testing'] and 'coverage' in self.host.vars:
# check if we can/want use coverage
cmd = (
'{coverage} run -p '
'--source={remote_dir}/nuka/tasks '
'{script} '
).format(coverage=self.host.vars['coverage'], **config)
else:
# use python
inventory = self.host.vars.get(
'inventory',
{'python': {'executable': 'python'}})
executable = inventory['python'].get('executable', 'python')
cmd = '{0} {script} '.format(executable, **config)
# allow to trac some ids from ps
cmd += '--deploy-id={0} --task-id={1}'.format(config['id'],
id(self))
# create process
proc = await self.host.create_process(
cmd, task=self,
switch_user=self.switch_user,
switch_ssh_user=self.switch_ssh_user)
# send stdin
zlib_avalaible = self.host.inventory['python']['zlib_available']
stdin = utils.proto_dumps_std(
stdin_data, proc.stdin,
content_type=zlib_avalaible and 'zlib' or 'plain')
await proc.stdin.drain()
res = {}
while res.get('message_type') != 'exit':
# wait for messages
try:
res = await proc.next_message()
except asyncio.CancelledError:
raise
except Exception as e:
self.cancel()
self.host.log.exception5(
'{0}\n\n{1}'.format(self, stdin))
else:
if res.get('message_type') == 'log':
self.host.log.log(res['level'], res['msg'])
# finalize
self.res.update(res)
if self.res['rc'] != 0 and not self.ignore_errors:
if not diff_mode:
self.cancel()
def log(self):
log = self.host.log
if 'exc' in self.res:
exc = '\n' + ''.join(self.res['exc'])
log.error('{0}\n{1}'.format(self, exc))
elif self.res.get('stderr'):
if self.res.get('rc') != 0:
log.error('{0}\n{1}\n{2}'.format(
self, self.res.get('stdout', ''), self.res['stderr']))
elif self.res['changed']:
log.changed('{0}\n{1}'.format(self, self.res['stderr']))
else:
data = self.res.get('diff', '')
if data.strip():
data = data.strip()
log.changed('{0} diff=\n{1}\n'.format(self, data))
elif self.cancelled():
log.error(self)
elif self.res['changed']:
log.changed(self)
else:
log.info(self)
for cmd_ in self.meta.get('remote_calls', []):
rtime = round(cmd_['time'], 3)
inf = '^ sh({cmd}) time({rtime})'.format(
rtime=rtime, **cmd_)
if cmd_['exc']:
log.error(inf + '\n' + ''.join(cmd_['exc']))
elif nuka.cli.args.verbose > 1:
log.info(inf)
stds = {k: v for k, v in cmd_.items()
if k in ('stderr', 'stdout') and v}
if stds:
log.debug3('^ ' + str(stds))
data = self.res.get('log')
if data:
level = None
for line in data.rstrip().split('\n'):
line = line.rstrip()
try:
line_level, message = line.split(':', 1)
except ValueError:
if level:
log.log(level, '^ ' + line)
else:
if line_level in logging._nameToLevel:
level = getattr(logging, line_level)
log.log(level, '^ ' + message)
else:
if level:
log.log(level, '^ ' + line)
class SetupTask(Base):
changed = False
def log(self):
self.host.log.debug(self)
def pre_process(self):
self.args['name'] = '' # unamed task
def cancel(self):
super().cancel()
if not self.host.cancelled():
self.host.cancel()
self.host.log.critical(
'Cancelled at {filename}:{lineno}...'.format(**self.meta))
class boot(SetupTask):
"""A task that just call host.boot()"""
def __class_name__(self):
return 'boot'
async def run(self):
# wait for boot async
try:
await self.host.boot()
except Exception:
self.host.log.exception('boot')
self.meta['start'] = self.host._start
class setup(SetupTask):
"""A task that just wait for :class:`~nuka.task.boot` then setup the
host"""
setup_cmd = (
'{0}rm -Rf {2[remote_tmp]}; '
'{0}mkdir -p {2[remote_tmp]} && {0}chmod 777 {2[remote_tmp]} &&'
'{0}mkdir -p {2[remote_dir]} &&'
'dd bs={1} count=1 | {0}tar -xz -C {2[remote_dir]} && '
'{0}`which python 2> /dev/null || which python3 || echo python` '
'{2[script]} --setup'
)
def __class_name__(self):
return 'setup'
async def run(self):
host = self.host
# wait for boot async
await host._named_tasks[boot.__name__]
self.meta['start'] = time.time()
# run bootstrap_command if any
if host.bootstrap_command:
res = await host.run_command(host.bootstrap_command)
if res['rc'] != 0:
self.host.log.error(res)
self.cancel()
return
# setup
sudo = ''
if host.use_sudo:
sudo = '{sudo} '.format(**config)
cmd = self.setup_cmd.format(sudo, '{bytes}', config)
mods = nuka.config['inventory_modules'][:]
mods += self.host.vars.get('inventory_modules', [])
if mods:
cmd += ' ' + ' '.join(['--inventory=' + m for m in mods])
stdin = remote.build_archive(
extra_classes=all_task_classes(),
mode='x:gz')
c = cmd.format(bytes=len(stdin))
host.log.debug('Uploading archive ({0}kb)...'.format(
int(len(stdin) / 1000)))
try:
proc = await self.host.create_process(c, task=self)
proc.stdin.write(stdin)
await proc.stdin.drain()
except (LookupError, OSError, asyncssh.misc.Error) as e:
if isinstance(e, asyncssh.misc.Error):
e = LookupError(str(e), self.host)
self.host.log.error(e.args[0])
self.host.fail(e)
return
res = {}
while res.get('message_type') != 'exit':
# wait for messages
try:
res = await proc.next_message()
except asyncio.CancelledError:
raise
except (LookupError, OSError) as e:
self.host.log.error(e.args[0])
self.host.fail(e)
return
except Exception as e:
self.cancel()
self.host.log.exception5(
'{0}\n\n{1}'.format(self, stdin))
else:
if res.get('message_type') == 'log':
self.host.log.log(res['level'], res['msg'])
self.res.update(res)
if self.res['rc'] != 0:
self.cancel()
host.vars['inventory'] = self.res['inventory']
for name in mods:
mod = importlib.import_module(name)
meth = getattr(mod, 'finalize_inventory', None)
if meth is not None:
meth(host.vars['inventory'])
host.log.debug(
'Inventory:\n{0}'.format(host.vars['inventory']))
if not host.fully_booted.done():
host.fully_booted.set_result(True)
class teardown(SetupTask):
"""remove `remote_dir` from the host"""
teardown_cmd = '{0}rm -Rf {1[remote_dir]}'
def __init__(self, host):
host._cancelled = False
super().__init__(host=host)
def __class_name__(self):
return 'teardown'
async def run(self):
if not self.host.failed():
sudo = self.host.use_sudo and 'sudo ' or ''
cmd = self.teardown_cmd.format(sudo, config)
await self.host.run_command(cmd, task=self)
class destroy(SetupTask):
"""destroy the host"""
def __init__(self, host):
host._cancelled = False
super().__init__(host=host)
def __class_name__(self):
return 'destroy'
async def run(self):
if 'destroyed' not in self.host.vars:
await self.host.destroy()
class wait(Base):
"""A task that wait for a coroutine / event / future:
.. code-block:: python
nuka.wait(do_something(host), event)
You can use a timeout:
.. code-block:: python
nuka.wait(event, timeout=30)
"""
def __class_name__(self):
return 'wait'
def __init__(self, future, *futures, **kwargs):
futures = list(futures)
if not isinstance(future, list):
futures.insert(0, future)
else:
futures[0:0] = future
kwargs['name'] = repr(futures)
kwargs['futures'] = futures
super().__init__(**kwargs)
async def run(self):
futures = self.args['futures']
res = await asyncio.wait_for(asyncio.gather(*futures),
timeout=self.args.get('timeout'))
self.set_result(res)
self.meta.setdefault('time', time.time() - self.meta['start'])
# skip time if we dont wait for a nuka.Event
events = [e for e in res if isinstance(e, nuka.Event)]
if events:
self.host.add_time(type='task', task=self, **self.meta)
async def wait_for_boot(host):
if not host.fully_booted.done():
create_setup_tasks(host)
task = host._named_tasks[setup.__name__]
if not task.done():
await task
def create_setup_tasks(host):
if not host.fully_booted.done():
for task in (boot, setup):
instance = host._named_tasks.get(task.__name__)
if instance is None:
instance = task(host=host)
host._named_tasks[task.__name__] = instance
host._tasks.append(instance)
def get_task_from_stack():
for info in inspect.stack(3):
f = info.frame
self = f.f_locals.get('self')
if isinstance(f.f_locals.get('self'), Base):
return self
def all_task_classes(cls=Task):
for klass in cls.__subclasses__():
yield from all_task_classes(klass)
yield klass
| gpl-3.0 |
jobsafran/mediadrop | mediadrop/lib/tests/xhtml_normalization_test.py | 3 | 3403 | # -*- coding: utf-8 -*-
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
from mediadrop.lib.helpers import clean_xhtml, line_break_xhtml
from mediadrop.lib.xhtml import cleaner_settings
from mediadrop.lib.xhtml.htmlsanitizer import entities_to_unicode
from mediadrop.lib.test.pythonic_testcase import *
class XHTMLNormalizationTest(PythonicTestCase):
def test_can_replace_linebreaks_with_br_tags(self):
htmlified_text = clean_xhtml('first\nline\n\nsecond line')
assert_equals('<p>first\nline<br>second line</p>', htmlified_text)
assert_equals(htmlified_text, clean_xhtml(htmlified_text))
def test_trailing_newlines_are_removed_in_output(self):
expected_html = '<p>first</p>'
assert_equals(expected_html, clean_xhtml('first\n'))
self.skipTest('broken by bleach')
assert_equals(expected_html, clean_xhtml('first\n\n'))
def test_text_do_not_change_after_a_clean_xhtml_and_line_break_xhtml_cycle(self):
"""Mimics the input -> clean -> display -> input... cycle of the
XHTMLTextArea widget.
"""
expected_html = '<p>first line<br>second line</p>'
htmlified_text = clean_xhtml('first line\n\nsecond line')
assert_equals(expected_html, htmlified_text)
# Ensure that re-cleaning the XHTML provides the same result.
display_text = line_break_xhtml(htmlified_text)
assert_equals('<p>first line<br>second line</p>', display_text)
assert_equals(expected_html, clean_xhtml(display_text))
def test_adds_nofollow_attribute_to_links(self):
original = '<a href="http://example.com">link</a>'
cleaned = clean_xhtml(original)
assert_equals(cleaned, '<p><a href="http://example.com" rel="nofollow">link</a></p>')
def _test_removes_follow_attribute_from_links(self):
original = '<a href="http://example.com" rel="follow">link</a>'
cleaned = clean_xhtml(original)
assert_equals(cleaned, '<a href="http://example.com" rel="nofollow">link</a>')
def test_makes_automatic_links_nofollow(self):
original = 'http://example.com'
cleaned = clean_xhtml(original)
assert_equals(cleaned, '<p><a href="http://example.com" rel="nofollow">http://example.com</a></p>')
def test_adds_target_blank_to_links(self):
original = '<a href="http://example.com">link</a>'
from copy import deepcopy
settings = deepcopy(cleaner_settings)
settings['add_target_blank'] = True
cleaned = clean_xhtml(original, _cleaner_settings=settings)
assert_equals(cleaned, '<p><a href="http://example.com" rel="nofollow" target="_blank">link</a></p>')
def test_entities_to_unicode(self):
testtext = 'Playing Toccata & Fugue <script>evil/script</script>'
testtextunicode = entities_to_unicode(testtext)
assert_equals(testtextunicode, 'Playing Toccata & Fugue evil/script')
import unittest
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(XHTMLNormalizationTest))
return suite
| gpl-3.0 |
535521469/crawler_sth | scrapy/log.py | 6 | 4258 | """
Scrapy logging facility
See documentation in docs/topics/logging.rst
"""
import sys
import logging
import warnings
from twisted.python import log
import scrapy
from scrapy.utils.python import unicode_to_str
# Logging levels
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
SILENT = CRITICAL + 1
level_names = {
logging.DEBUG: "DEBUG",
logging.INFO: "INFO",
logging.WARNING: "WARNING",
logging.ERROR: "ERROR",
logging.CRITICAL: "CRITICAL",
SILENT: "SILENT",
}
class ScrapyFileLogObserver(log.FileLogObserver):
def __init__(self, f, level=INFO, encoding='utf-8', crawler=None):
self.level = level
self.encoding = encoding
if crawler:
self.crawler = crawler
self.emit = self._emit_with_crawler
else:
self.emit = self._emit
log.FileLogObserver.__init__(self, f)
def _emit(self, eventDict):
ev = _adapt_eventdict(eventDict, self.level, self.encoding)
if ev is not None:
log.FileLogObserver.emit(self, ev)
return ev
def _emit_with_crawler(self, eventDict):
ev = self._emit(eventDict)
if ev:
level = ev['logLevel']
sname = 'log_count/%s' % level_names.get(level, level)
self.crawler.stats.inc_value(sname)
def _adapt_eventdict(eventDict, log_level=INFO, encoding='utf-8', prepend_level=True):
"""Adapt Twisted log eventDict making it suitable for logging with a Scrapy
log observer. It may return None to indicate that the event should be
ignored by a Scrapy log observer.
`log_level` is the minimum level being logged, and `encoding` is the log
encoding.
"""
ev = eventDict.copy()
if ev['isError']:
ev.setdefault('logLevel', ERROR)
# ignore non-error messages from outside scrapy
if ev.get('system') != 'scrapy' and not ev['isError']:
return
level = ev.get('logLevel')
if level < log_level:
return
spider = ev.get('spider')
if spider:
ev['system'] = spider.name
lvlname = level_names.get(level, 'NOLEVEL')
message = ev.get('message')
if message:
message = [unicode_to_str(x, encoding) for x in message]
if prepend_level:
message[0] = "%s: %s" % (lvlname, message[0])
ev['message'] = message
why = ev.get('why')
if why:
why = unicode_to_str(why, encoding)
if prepend_level:
why = "%s: %s" % (lvlname, why)
ev['why'] = why
fmt = ev.get('format')
if fmt:
fmt = unicode_to_str(fmt, encoding)
if prepend_level:
fmt = "%s: %s" % (lvlname, fmt)
ev['format'] = fmt
return ev
def _get_log_level(level_name_or_id):
if isinstance(level_name_or_id, int):
return level_name_or_id
elif isinstance(level_name_or_id, basestring):
return globals()[level_name_or_id]
else:
raise ValueError("Unknown log level: %r" % level_name_or_id)
def start(logfile=None, loglevel='INFO', logstdout=True, logencoding='utf-8', crawler=None):
loglevel = _get_log_level(loglevel)
file = open(logfile, 'a') if logfile else sys.stderr
sflo = ScrapyFileLogObserver(file, loglevel, logencoding, crawler)
_oldshowwarning = warnings.showwarning
log.startLoggingWithObserver(sflo.emit, setStdout=logstdout)
# restore warnings, wrongly silenced by Twisted
warnings.showwarning = _oldshowwarning
return sflo
def msg(message=None, _level=INFO, **kw):
kw['logLevel'] = kw.pop('level', _level)
kw.setdefault('system', 'scrapy')
if message is None:
log.msg(**kw)
else:
log.msg(message, **kw)
def err(_stuff=None, _why=None, **kw):
kw['logLevel'] = kw.pop('level', ERROR)
kw.setdefault('system', 'scrapy')
log.err(_stuff, _why, **kw)
def start_from_crawler(crawler):
settings = crawler.settings
if not settings.getbool('LOG_ENABLED'):
return
start(settings['LOG_FILE'], settings['LOG_LEVEL'], settings['LOG_STDOUT'],
settings['LOG_ENCODING'], crawler)
msg("Scrapy %s started (bot: %s)" % (scrapy.__version__, \
settings['BOT_NAME']))
| bsd-3-clause |
zygmuntz/pybrain | pybrain/rl/learners/directsearch/enac.py | 25 | 1478 | from __future__ import print_function
__author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de'
from pybrain.rl.learners.directsearch.policygradient import PolicyGradientLearner
from scipy import ones, dot, ravel
from scipy.linalg import pinv
class ENAC(PolicyGradientLearner):
""" Episodic Natural Actor-Critic. See J. Peters "Natural Actor-Critic", 2005.
Estimates natural gradient with regression of log likelihoods to rewards.
"""
def calculateGradient(self):
# normalize rewards
# self.dataset.data['reward'] /= max(ravel(abs(self.dataset.data['reward'])))
# initialize variables
R = ones((self.dataset.getNumSequences(), 1), float)
X = ones((self.dataset.getNumSequences(), self.loglh.getDimension('loglh') + 1), float)
# collect sufficient statistics
print((self.dataset.getNumSequences()))
for n in range(self.dataset.getNumSequences()):
_state, _action, reward = self.dataset.getSequence(n)
seqidx = ravel(self.dataset['sequence_index'])
if n == self.dataset.getNumSequences() - 1:
# last sequence until end of dataset
loglh = self.loglh['loglh'][seqidx[n]:, :]
else:
loglh = self.loglh['loglh'][seqidx[n]:seqidx[n + 1], :]
X[n, :-1] = sum(loglh, 0)
R[n, 0] = sum(reward, 0)
# linear regression
beta = dot(pinv(X), R)
return beta[:-1]
| bsd-3-clause |
maurofaccenda/ansible | lib/ansible/modules/network/vyos/vyos_system.py | 37 | 6270 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: "vyos_system"
version_added: "2.3"
author: "Nathaniel Case (@qalthos)"
short_description: Run `set system` commands on VyOS devices
description:
- Runs one or more commands on remote devices running VyOS.
This module can also be introspected to validate key parameters before
returning successfully.
extends_documentation_fragment: vyos
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- The new domain name to apply to the device.
name_server:
description:
- A list of name servers to use with the device. Mutually exclusive with
I(domain_search)
required: false
default: null
domain_search:
description:
- A list of domain names to search. Mutually exclusive with
I(name_server)
state:
description:
- Whether to apply (C(present)) or remove (C(absent)) the settings.
default: present
choices: ['present', 'absent']
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set system hostname vyos01
- set system domain-name foo.example.com
"""
EXAMPLES = """
- name: configure hostname and domain-name
vyos_system:
hostname: vyos01
domain_name: test.example.com
- name: remove all configuration
vyos_system:
state: absent
- name: configure name servers
vyos_system:
name_server:
- 8.8.8.8
- 8.8.4.4
- name: configure domain search suffixes
vyos_system:
domain_search:
- sub1.example.com
- sub2.example.com
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vyos import get_config, load_config
from ansible.module_utils.vyos import vyos_argument_spec, check_args
def spec_key_to_device_key(key):
device_key = key.replace('_', '-')
# domain-search is longer than just it's key
if device_key == 'domain-search':
device_key += ' domain'
return device_key
def config_to_dict(module):
data = get_config(module)
config = {'domain_search': [], 'name_server': []}
for line in data.split('\n'):
if line.startswith('set system host-name'):
config['host_name'] = line[22:-1]
elif line.startswith('set system domain-name'):
config['domain_name'] = line[24:-1]
elif line.startswith('set system domain-search domain'):
config['domain_search'].append(line[33:-1])
elif line.startswith('set system name-server'):
config['name_server'].append(line[24:-1])
return config
def spec_to_commands(want, have):
commands = []
state = want.pop('state')
# state='absent' by itself has special meaning
if state == 'absent' and all(v is None for v in want.values()):
# Clear everything
for key in have:
commands.append('delete system %s' % spec_key_to_device_key(key))
for key in want:
if want[key] is None:
continue
current = have.get(key)
proposed = want[key]
device_key = spec_key_to_device_key(key)
# These keys are lists which may need to be reconciled with the device
if key in ['domain_search', 'name_server']:
if not proposed:
# Empty list was passed, delete all values
commands.append("delete system %s" % device_key)
for config in proposed:
if state == 'absent' and config in current:
commands.append("delete system %s '%s'" % (device_key, config))
elif state == 'present' and config not in current:
commands.append("set system %s '%s'" % (device_key, config))
else:
if state == 'absent' and current and proposed:
commands.append('delete system %s' % device_key)
elif state == 'present' and proposed and proposed != current:
commands.append("set system %s '%s'" % (device_key, proposed))
return commands
def map_param_to_obj(module):
return {
'host_name': module.params['host_name'],
'domain_name': module.params['domain_name'],
'domain_search': module.params['domain_search'],
'name_server': module.params['name_server'],
'state': module.params['state']
}
def main():
argument_spec = dict(
host_name=dict(type='str'),
domain_name=dict(type='str'),
domain_search=dict(type='list'),
name_server=dict(type='list'),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
argument_spec.update(vyos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[('domain_name', 'domain_search')],
)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
want = map_param_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands(want, have)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
cuongnv23/ansible | lib/ansible/modules/cloud/digital_ocean/digital_ocean_tag.py | 26 | 6604 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean_tag
short_description: Create and remove tag(s) to DigitalOcean resource.
description:
- Create and remove tag(s) to DigitalOcean resource.
author: "Victor Volle (@kontrafiktion)"
version_added: "2.2"
options:
name:
description:
- The name of the tag. The supported characters for names include
alphanumeric characters, dashes, and underscores.
required: true
resource_id:
description:
- The ID of the resource to operate on.
resource_type:
description:
- The type of resource to operate on. Currently, only tagging of
droplets is supported.
default: droplet
choices: ['droplet']
state:
description:
- Whether the tag should be present or absent on the resource.
default: present
choices: ['present', 'absent']
api_token:
description:
- DigitalOcean api token.
notes:
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN.
They both refer to the v2 token.
- As of Ansible 2.0, Version 2 of the DigitalOcean API is used.
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
- name: create a tag
digital_ocean_tag:
name: production
state: present
- name: tag a resource; creating the tag if it does not exists
digital_ocean_tag:
name: "{{ item }}"
resource_id: YYY
state: present
with_items:
- staging
- dbserver
- name: untag a resource
digital_ocean_tag:
name: staging
resource_id: YYY
state: absent
# Deleting a tag also untags all the resources that have previously been
# tagged with it
- name: remove a tag
digital_ocean_tag:
name: dbserver
state: absent
'''
RETURN = '''
data:
description: a DigitalOcean Tag resource
returned: success and no resource constraint
type: dict
sample: {
"tag": {
"name": "awesome",
"resources": {
"droplets": {
"count": 0,
"last_tagged": null
}
}
}
}
'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
from ansible.module_utils._text import to_native
def core(module):
state = module.params['state']
name = module.params['name']
resource_id = module.params['resource_id']
resource_type = module.params['resource_type']
rest = DigitalOceanHelper(module)
# Check if api_token is valid or not
response = rest.get('account')
if response.status_code == 401:
module.fail_json(msg='Failed to login using api_token, please verify '
'validity of api_token')
if state == 'present':
response = rest.get('tags/{0}'.format(name))
status_code = response.status_code
resp_json = response.json
changed = False
if status_code == 200 and resp_json['tag']['name'] == name:
changed = False
else:
# Ensure Tag exists
response = rest.post("tags", data={'name': name})
status_code = response.status_code
resp_json = response.json
if status_code == 201:
changed = True
elif status_code == 422:
changed = False
else:
module.exit_json(changed=False, data=resp_json)
if resource_id is None:
# No resource defined, we're done.
module.exit_json(changed=changed, data=resp_json)
else:
# Check if resource is already tagged or not
found = False
url = "{0}?tag_name={1}".format(resource_type, name)
if resource_type == 'droplet':
url = "droplets?tag_name={0}".format(name)
response = rest.get(url)
status_code = response.status_code
resp_json = response.json
if status_code == 200:
for resource in resp_json['droplets']:
if not found and resource['id'] == int(resource_id):
found = True
break
if not found:
# If resource is not tagged, tag a resource
url = "tags/{0}/resources".format(name)
payload = {
'resources': [{
'resource_id': resource_id,
'resource_type': resource_type}]}
response = rest.post(url, data=payload)
if response.status_code == 204:
module.exit_json(changed=True)
else:
module.fail_json(msg="error tagging resource '{0}': {1}".format(resource_id, response.json["message"]))
else:
# Already tagged resource
module.exit_json(changed=False)
else:
# Unable to find resource specified by user
module.fail_json(msg=resp_json['message'])
elif state == 'absent':
if resource_id:
url = "tags/{0}/resources".format(name)
payload = {
'resources': [{
'resource_id': resource_id,
'resource_type': resource_type}]}
response = rest.delete(url, data=payload)
else:
url = "tags/{0}".format(name)
response = rest.delete(url)
if response.status_code == 204:
module.exit_json(changed=True)
else:
module.exit_json(changed=False, data=response.json)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
resource_id=dict(aliases=['droplet_id'], type='int'),
resource_type=dict(choices=['droplet'], default='droplet'),
state=dict(choices=['present', 'absent'], default='present'),
api_token=dict(aliases=['API_TOKEN'], no_log=True),
)
)
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
sander76/home-assistant | homeassistant/components/simulated/sensor.py | 5 | 4506 | """Adds a simulated sensor."""
from datetime import datetime
import math
from random import Random
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
CONF_AMP = "amplitude"
CONF_FWHM = "spread"
CONF_MEAN = "mean"
CONF_PERIOD = "period"
CONF_PHASE = "phase"
CONF_SEED = "seed"
CONF_UNIT = "unit"
CONF_RELATIVE_TO_EPOCH = "relative_to_epoch"
DEFAULT_AMP = 1
DEFAULT_FWHM = 0
DEFAULT_MEAN = 0
DEFAULT_NAME = "simulated"
DEFAULT_PERIOD = 60
DEFAULT_PHASE = 0
DEFAULT_SEED = 999
DEFAULT_UNIT = "value"
DEFAULT_RELATIVE_TO_EPOCH = True
ICON = "mdi:chart-line"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_AMP, default=DEFAULT_AMP): vol.Coerce(float),
vol.Optional(CONF_FWHM, default=DEFAULT_FWHM): vol.Coerce(float),
vol.Optional(CONF_MEAN, default=DEFAULT_MEAN): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PERIOD, default=DEFAULT_PERIOD): cv.positive_int,
vol.Optional(CONF_PHASE, default=DEFAULT_PHASE): vol.Coerce(float),
vol.Optional(CONF_SEED, default=DEFAULT_SEED): cv.positive_int,
vol.Optional(CONF_UNIT, default=DEFAULT_UNIT): cv.string,
vol.Optional(
CONF_RELATIVE_TO_EPOCH, default=DEFAULT_RELATIVE_TO_EPOCH
): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the simulated sensor."""
name = config.get(CONF_NAME)
unit = config.get(CONF_UNIT)
amp = config.get(CONF_AMP)
mean = config.get(CONF_MEAN)
period = config.get(CONF_PERIOD)
phase = config.get(CONF_PHASE)
fwhm = config.get(CONF_FWHM)
seed = config.get(CONF_SEED)
relative_to_epoch = config.get(CONF_RELATIVE_TO_EPOCH)
sensor = SimulatedSensor(
name, unit, amp, mean, period, phase, fwhm, seed, relative_to_epoch
)
add_entities([sensor], True)
class SimulatedSensor(SensorEntity):
"""Class for simulated sensor."""
def __init__(
self, name, unit, amp, mean, period, phase, fwhm, seed, relative_to_epoch
):
"""Init the class."""
self._name = name
self._unit = unit
self._amp = amp
self._mean = mean
self._period = period
self._phase = phase # phase in degrees
self._fwhm = fwhm
self._seed = seed
self._random = Random(seed) # A local seeded Random
self._start_time = (
datetime(1970, 1, 1, tzinfo=dt_util.UTC)
if relative_to_epoch
else dt_util.utcnow()
)
self._relative_to_epoch = relative_to_epoch
self._state = None
def time_delta(self):
"""Return the time delta."""
dt0 = self._start_time
dt1 = dt_util.utcnow()
return dt1 - dt0
def signal_calc(self):
"""Calculate the signal."""
mean = self._mean
amp = self._amp
time_delta = self.time_delta().total_seconds() * 1e6 # to milliseconds
period = self._period * 1e6 # to milliseconds
fwhm = self._fwhm / 2
phase = math.radians(self._phase)
if period == 0:
periodic = 0
else:
periodic = amp * (math.sin((2 * math.pi * time_delta / period) + phase))
noise = self._random.gauss(mu=0, sigma=fwhm)
return round(mean + periodic + noise, 3)
async def async_update(self):
"""Update the sensor."""
self._state = self.signal_calc()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit
@property
def extra_state_attributes(self):
"""Return other details about the sensor state."""
return {
"amplitude": self._amp,
"mean": self._mean,
"period": self._period,
"phase": self._phase,
"spread": self._fwhm,
"seed": self._seed,
"relative_to_epoch": self._relative_to_epoch,
}
| apache-2.0 |
ingenioustechie/zamboni | mkt/feed/tests/test_fakedata.py | 19 | 2143 | from nose.tools import eq_
import mkt.site.tests
from mkt.feed.models import FeedItem
from mkt.feed.fakedata import app_item, brand, collection, shelf
from mkt.site.fixtures import fixture
from mkt.webapps.models import Webapp
class TestFeedGeneration(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def test_brand(self):
app = Webapp.objects.get(pk=337141)
br = brand(layout='grid', type='hidden-gem',
apps=[app], region='br')
eq_(br.layout, 'grid')
eq_(br.type, 'hidden-gem')
eq_(list(br.apps()), [app])
eq_(FeedItem.objects.get(brand=br).region, 7)
def test_collection(self):
app = Webapp.objects.get(pk=337141)
co = collection(apps=[app], slug='test-coll', color='amber',
name='Example Collection',
description='Test Desc', region='br')
eq_(co.slug, 'test-coll')
eq_(co.color, 'amber')
eq_(co.name, 'Example Collection')
eq_(co.description, 'Test Desc')
eq_(list(co.apps()), [app])
eq_(FeedItem.objects.get(collection=co).region, 7)
def test_shelf(self):
app = Webapp.objects.get(pk=337141)
sh = shelf(apps=[app], slug='test-shelf', name='Example Shelf',
description='Test Desc', region='br')
eq_(sh.slug, 'test-shelf')
eq_(sh.name, 'Example Shelf')
eq_(sh.description, 'Test Desc')
eq_(list(sh.apps()), [app])
eq_(FeedItem.objects.get(shelf=sh).region, 7)
def test_app(self):
app = Webapp.objects.get(pk=337141)
a = app_item(app, type='quote', slug='test-quote',
color='amber',
pullquote_attribution='test attribution',
pullquote_rating=1,
pullquote_text='test quote')
eq_(a.type, 'quote')
eq_(a.color, 'amber')
eq_(a.slug, 'test-quote')
eq_(unicode(a.pullquote_attribution), u'test attribution')
eq_(a.pullquote_rating, 1)
eq_(unicode(a.pullquote_text), u'test quote')
eq_(a.app, app)
| bsd-3-clause |
sumspr/scikit-learn | sklearn/feature_selection/from_model.py | 224 | 4316 | # Authors: Gilles Louppe, Mathieu Blondel
# License: BSD 3 clause
import numpy as np
from ..base import TransformerMixin
from ..externals import six
from ..utils import safe_mask, check_array
from ..utils.validation import NotFittedError, check_is_fitted
class _LearntSelectorMixin(TransformerMixin):
# Note because of the extra threshold parameter in transform, this does
# not naturally extend from SelectorMixin
"""Transformer mixin selecting features based on importance weights.
This implementation can be mixin on any estimator that exposes a
``feature_importances_`` or ``coef_`` attribute to evaluate the relative
importance of individual features for feature selection.
"""
def transform(self, X, threshold=None):
"""Reduce X to its most important features.
Uses ``coef_`` or ``feature_importances_`` to determine the most
important features. For models with a ``coef_`` for each class, the
absolute sum over the classes is used.
Parameters
----------
X : array or scipy sparse matrix of shape [n_samples, n_features]
The input samples.
threshold : string, float or None, optional (default=None)
The threshold value to use for feature selection. Features whose
importance is greater or equal are kept while the others are
discarded. If "median" (resp. "mean"), then the threshold value is
the median (resp. the mean) of the feature importances. A scaling
factor (e.g., "1.25*mean") may also be used. If None and if
available, the object attribute ``threshold`` is used. Otherwise,
"mean" is used by default.
Returns
-------
X_r : array of shape [n_samples, n_selected_features]
The input samples with only the selected features.
"""
check_is_fitted(self, ('coef_', 'feature_importances_'),
all_or_any=any)
X = check_array(X, 'csc')
# Retrieve importance vector
if hasattr(self, "feature_importances_"):
importances = self.feature_importances_
elif hasattr(self, "coef_"):
if self.coef_ is None:
msg = "This model is not fitted yet. Please call fit() first"
raise NotFittedError(msg)
if self.coef_.ndim == 1:
importances = np.abs(self.coef_)
else:
importances = np.sum(np.abs(self.coef_), axis=0)
if len(importances) != X.shape[1]:
raise ValueError("X has different number of features than"
" during model fitting.")
# Retrieve threshold
if threshold is None:
if hasattr(self, "penalty") and self.penalty == "l1":
# the natural default threshold is 0 when l1 penalty was used
threshold = getattr(self, "threshold", 1e-5)
else:
threshold = getattr(self, "threshold", "mean")
if isinstance(threshold, six.string_types):
if "*" in threshold:
scale, reference = threshold.split("*")
scale = float(scale.strip())
reference = reference.strip()
if reference == "median":
reference = np.median(importances)
elif reference == "mean":
reference = np.mean(importances)
else:
raise ValueError("Unknown reference: " + reference)
threshold = scale * reference
elif threshold == "median":
threshold = np.median(importances)
elif threshold == "mean":
threshold = np.mean(importances)
else:
threshold = float(threshold)
# Selection
try:
mask = importances >= threshold
except TypeError:
# Fails in Python 3.x when threshold is str;
# result is array of True
raise ValueError("Invalid threshold: all features are discarded.")
if np.any(mask):
mask = safe_mask(X, mask)
return X[:, mask]
else:
raise ValueError("Invalid threshold: all features are discarded.")
| bsd-3-clause |
makinacorpus/odoo | addons/project/__init__.py | 436 | 1141 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project
import company
import report
import wizard
import res_partner
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
craynot/django | django/core/management/color.py | 309 | 1807 | """
Sets up the terminal color scheme.
"""
import os
import sys
from django.utils import lru_cache, termcolors
def supports_color():
"""
Returns True if the running system's terminal supports color,
and False otherwise.
"""
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ)
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if not supported_platform or not is_a_tty:
return False
return True
class Style(object):
pass
def make_style(config_string=''):
"""
Create a Style object from the given config_string.
If config_string is empty django.utils.termcolors.DEFAULT_PALETTE is used.
"""
style = Style()
color_settings = termcolors.parse_color_setting(config_string)
# The nocolor palette has all available roles.
# Use that palette as the basis for populating
# the palette as defined in the environment.
for role in termcolors.PALETTES[termcolors.NOCOLOR_PALETTE]:
if color_settings:
format = color_settings.get(role, {})
style_func = termcolors.make_style(**format)
else:
style_func = lambda x: x
setattr(style, role, style_func)
# For backwards compatibility,
# set style for ERROR_OUTPUT == ERROR
style.ERROR_OUTPUT = style.ERROR
return style
@lru_cache.lru_cache(maxsize=None)
def no_style():
"""
Returns a Style object with no color scheme.
"""
return make_style('nocolor')
def color_style():
"""
Returns a Style object from the Django color scheme.
"""
if not supports_color():
return no_style()
return make_style(os.environ.get('DJANGO_COLORS', ''))
| bsd-3-clause |
Setheri/ProjectFrontier | bot/C_sarcasticball.py | 67 | 1479 | from random import choice as fsample
sarcastic_responses = ["Yeah right","What do I look like to you?","Are you kidding me?",#UsF
"As much as you","You don't believe that yourself","When pigs fly",#UsF
"Like your grandma","You would like to know, wouldn't you?", #UsF
"Like your mom", #Spectre
"Totally","Not at all", #Spectre
"AHAHAHahahaha, No.", #Strumpetplaya
"Not as much as USER","As much as USER",
"Really, you expect me to tell you that?",
"Right, and you've been building NOUNs for those USERs in the LOCATION, haven't you?" ] #Richard
locations = ["woods","baystation","ditch"]
nouns = ["bomb","toilet","robot","cyborg",
"garbage can","gun","cake",
"missile"]
def sarcasticball(data,debug,sender,users,prefix):
arg = data.lower().replace(prefix+"sarcasticball ","")
arg = arg.replace(prefix+"sball ","")
if debug:
print sender+":"+prefix+"sarcasticball", arg
choice = fsample(sarcastic_responses)
if "USER" in choice:
choice = choice.replace("USER",fsample(users),1)
choice = choice.replace("USER",fsample(users),1)
if "NOUN" in choice:
choice = choice.replace("NOUN",fsample(nouns),1)
if "LOCATION" in choice:
choice = choice.replace("LOCATION",fsample(locations),1)
if debug:
print "Responded with", choice
return(choice)
| agpl-3.0 |
pwong-mapr/private-hue | desktop/core/ext-py/Pygments-1.3.1/pygments/cmdline.py | 75 | 13055 | # -*- coding: utf-8 -*-
"""
pygments.cmdline
~~~~~~~~~~~~~~~~
Command line interface.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import getopt
from textwrap import dedent
from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline
from pygments.lexers import get_all_lexers, get_lexer_by_name, get_lexer_for_filename, \
find_lexer_class, guess_lexer, TextLexer
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
get_formatter_for_filename, find_formatter_class, \
TerminalFormatter # pylint:disable-msg=E0611
from pygments.filters import get_all_filters, find_filter_class
from pygments.styles import get_all_styles, get_style_by_name
USAGE = """\
Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>]
[-O <options>] [-P <option=value>] [-o <outfile>] [<infile>]
%s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>]
%s -L [<which> ...]
%s -N <filename>
%s -H <type> <name>
%s -h | -V
Highlight the input file and write the result to <outfile>.
If no input file is given, use stdin, if -o is not given, use stdout.
<lexer> is a lexer name (query all lexer names with -L). If -l is not
given, the lexer is guessed from the extension of the input file name
(this obviously doesn't work if the input is stdin). If -g is passed,
attempt to guess the lexer from the file contents, or pass through as
plain text if this fails (this can work for stdin).
Likewise, <formatter> is a formatter name, and will be guessed from
the extension of the output file name. If no output file is given,
the terminal formatter will be used by default.
With the -O option, you can give the lexer and formatter a comma-
separated list of options, e.g. ``-O bg=light,python=cool``.
The -P option adds lexer and formatter options like the -O option, but
you can only give one option per -P. That way, the option value may
contain commas and equals signs, which it can't with -O, e.g.
``-P "heading=Pygments, the Python highlighter".
With the -F option, you can add filters to the token stream, you can
give options in the same way as for -O after a colon (note: there must
not be spaces around the colon).
The -O, -P and -F options can be given multiple times.
With the -S option, print out style definitions for style <style>
for formatter <formatter>. The argument given by -a is formatter
dependent.
The -L option lists lexers, formatters, styles or filters -- set
`which` to the thing you want to list (e.g. "styles"), or omit it to
list everything.
The -N option guesses and prints out a lexer name based solely on
the given filename. It does not take input or highlight anything.
If no specific lexer can be determined "text" is returned.
The -H option prints detailed help for the object <name> of type <type>,
where <type> is one of "lexer", "formatter" or "filter".
The -h option prints this help.
The -V option prints the package version.
"""
def _parse_options(o_strs):
opts = {}
if not o_strs:
return opts
for o_str in o_strs:
if not o_str:
continue
o_args = o_str.split(',')
for o_arg in o_args:
o_arg = o_arg.strip()
try:
o_key, o_val = o_arg.split('=')
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
opts[o_arg] = True
else:
opts[o_key] = o_val
return opts
def _parse_filters(f_strs):
filters = []
if not f_strs:
return filters
for f_str in f_strs:
if ':' in f_str:
fname, fopts = f_str.split(':', 1)
filters.append((fname, _parse_options([fopts])))
else:
filters.append((f_str, {}))
return filters
def _print_help(what, name):
try:
if what == 'lexer':
cls = find_lexer_class(name)
print "Help on the %s lexer:" % cls.name
print dedent(cls.__doc__)
elif what == 'formatter':
cls = find_formatter_class(name)
print "Help on the %s formatter:" % cls.name
print dedent(cls.__doc__)
elif what == 'filter':
cls = find_filter_class(name)
print "Help on the %s filter:" % name
print dedent(cls.__doc__)
except AttributeError:
print >>sys.stderr, "%s not found!" % what
def _print_list(what):
if what == 'lexer':
print
print "Lexers:"
print "~~~~~~~"
info = []
for fullname, names, exts, _ in get_all_lexers():
tup = (', '.join(names)+':', fullname,
exts and '(filenames ' + ', '.join(exts) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print ('* %s\n %s %s') % i
elif what == 'formatter':
print
print "Formatters:"
print "~~~~~~~~~~~"
info = []
for cls in get_all_formatters():
doc = docstring_headline(cls)
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print ('* %s\n %s %s') % i
elif what == 'filter':
print
print "Filters:"
print "~~~~~~~~"
for name in get_all_filters():
cls = find_filter_class(name)
print "* " + name + ':'
print " %s" % docstring_headline(cls)
elif what == 'style':
print
print "Styles:"
print "~~~~~~~"
for name in get_all_styles():
cls = get_style_by_name(name)
print "* " + name + ':'
print " %s" % docstring_headline(cls)
def main(args=sys.argv):
"""
Main command line entry point.
"""
# pylint: disable-msg=R0911,R0912,R0915
usage = USAGE % ((args[0],) * 6)
try:
popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:hVHg")
except getopt.GetoptError, err:
print >>sys.stderr, usage
return 2
opts = {}
O_opts = []
P_opts = []
F_opts = []
for opt, arg in popts:
if opt == '-O':
O_opts.append(arg)
elif opt == '-P':
P_opts.append(arg)
elif opt == '-F':
F_opts.append(arg)
opts[opt] = arg
if not opts and not args:
print usage
return 0
if opts.pop('-h', None) is not None:
print usage
return 0
if opts.pop('-V', None) is not None:
print 'Pygments version %s, (c) 2006-2008 by Georg Brandl.' % __version__
return 0
# handle ``pygmentize -L``
L_opt = opts.pop('-L', None)
if L_opt is not None:
if opts:
print >>sys.stderr, usage
return 2
# print version
main(['', '-V'])
if not args:
args = ['lexer', 'formatter', 'filter', 'style']
for arg in args:
_print_list(arg.rstrip('s'))
return 0
# handle ``pygmentize -H``
H_opt = opts.pop('-H', None)
if H_opt is not None:
if opts or len(args) != 2:
print >>sys.stderr, usage
return 2
what, name = args
if what not in ('lexer', 'formatter', 'filter'):
print >>sys.stderr, usage
return 2
_print_help(what, name)
return 0
# parse -O options
parsed_opts = _parse_options(O_opts)
opts.pop('-O', None)
# parse -P options
for p_opt in P_opts:
try:
name, value = p_opt.split('=', 1)
except ValueError:
parsed_opts[p_opt] = True
else:
parsed_opts[name] = value
opts.pop('-P', None)
# handle ``pygmentize -N``
infn = opts.pop('-N', None)
if infn is not None:
try:
lexer = get_lexer_for_filename(infn, **parsed_opts)
except ClassNotFound, err:
lexer = TextLexer()
except OptionError, err:
print >>sys.stderr, 'Error:', err
return 1
print lexer.aliases[0]
return 0
# handle ``pygmentize -S``
S_opt = opts.pop('-S', None)
a_opt = opts.pop('-a', None)
if S_opt is not None:
f_opt = opts.pop('-f', None)
if not f_opt:
print >>sys.stderr, usage
return 2
if opts or args:
print >>sys.stderr, usage
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
except ClassNotFound, err:
print >>sys.stderr, err
return 1
arg = a_opt or ''
try:
print fmter.get_style_defs(arg)
except Exception, err:
print >>sys.stderr, 'Error:', err
return 1
return 0
# if no -S is given, -a is not allowed
if a_opt is not None:
print >>sys.stderr, usage
return 2
# parse -F options
F_opts = _parse_filters(F_opts)
opts.pop('-F', None)
# select formatter
outfn = opts.pop('-o', None)
fmter = opts.pop('-f', None)
if fmter:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
except (OptionError, ClassNotFound), err:
print >>sys.stderr, 'Error:', err
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
except (OptionError, ClassNotFound), err:
print >>sys.stderr, 'Error:', err
return 1
try:
outfile = open(outfn, 'wb')
except Exception, err:
print >>sys.stderr, 'Error: cannot open outfile:', err
return 1
else:
if not fmter:
fmter = TerminalFormatter(**parsed_opts)
outfile = sys.stdout
# select lexer
lexer = opts.pop('-l', None)
if lexer:
try:
lexer = get_lexer_by_name(lexer, **parsed_opts)
except (OptionError, ClassNotFound), err:
print >>sys.stderr, 'Error:', err
return 1
if args:
if len(args) > 1:
print >>sys.stderr, usage
return 2
infn = args[0]
try:
code = open(infn, 'rb').read()
except Exception, err:
print >>sys.stderr, 'Error: cannot read infile:', err
return 1
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
except ClassNotFound, err:
if '-g' in opts:
try:
lexer = guess_lexer(code)
except ClassNotFound:
lexer = TextLexer()
else:
print >>sys.stderr, 'Error:', err
return 1
except OptionError, err:
print >>sys.stderr, 'Error:', err
return 1
else:
if '-g' in opts:
code = sys.stdin.read()
try:
lexer = guess_lexer(code)
except ClassNotFound:
lexer = TextLexer()
elif not lexer:
print >>sys.stderr, 'Error: no lexer name given and reading ' + \
'from stdin (try using -g or -l <lexer>)'
return 2
else:
code = sys.stdin.read()
# No encoding given? Use latin1 if output file given,
# stdin/stdout encoding otherwise.
# (This is a compromise, I'm not too happy with it...)
if 'encoding' not in parsed_opts and 'outencoding' not in parsed_opts:
if outfn:
# encoding pass-through
fmter.encoding = 'latin1'
else:
if sys.version_info < (3,):
# use terminal encoding; Python 3's terminals already do that
lexer.encoding = getattr(sys.stdin, 'encoding',
None) or 'ascii'
fmter.encoding = getattr(sys.stdout, 'encoding',
None) or 'ascii'
# ... and do it!
try:
# process filters
for fname, fopts in F_opts:
lexer.add_filter(fname, **fopts)
highlight(code, lexer, fmter, outfile)
except Exception, err:
import traceback
info = traceback.format_exception(*sys.exc_info())
msg = info[-1].strip()
if len(info) >= 3:
# extract relevant file and position info
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
print >>sys.stderr
print >>sys.stderr, '*** Error while highlighting:'
print >>sys.stderr, msg
return 1
return 0
| apache-2.0 |
biomassives/mezzanine | mezzanine/generic/views.py | 21 | 6280 | from __future__ import unicode_literals
from future.builtins import str
from json import dumps
from string import punctuation
from django.apps import apps
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.messages import error
from django.core.urlresolvers import reverse
from django.db.models import ObjectDoesNotExist
from django.http import HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
from mezzanine.conf import settings
from mezzanine.generic.forms import ThreadedCommentForm, RatingForm
from mezzanine.generic.models import Keyword
from mezzanine.utils.cache import add_cache_bypass
from mezzanine.utils.views import render, set_cookie, is_spam
from mezzanine.utils.importing import import_dotted_path
@staff_member_required
def admin_keywords_submit(request):
"""
Adds any new given keywords from the custom keywords field in the
admin, and returns their IDs for use when saving a model with a
keywords field.
"""
keyword_ids, titles = [], []
remove = punctuation.replace("-", "") # Strip punctuation, allow dashes.
for title in request.POST.get("text_keywords", "").split(","):
title = "".join([c for c in title if c not in remove]).strip()
if title:
kw, created = Keyword.objects.get_or_create_iexact(title=title)
keyword_id = str(kw.id)
if keyword_id not in keyword_ids:
keyword_ids.append(keyword_id)
titles.append(title)
return HttpResponse("%s|%s" % (",".join(keyword_ids), ", ".join(titles)))
def initial_validation(request, prefix):
"""
Returns the related model instance and post data to use in the
comment/rating views below.
Both comments and ratings have a ``prefix_ACCOUNT_REQUIRED``
setting. If this is ``True`` and the user is unauthenticated, we
store their post data in their session, and redirect to login with
the view's url (also defined by the prefix arg) as the ``next``
param. We can then check the session data once they log in,
and complete the action authenticated.
On successful post, we pass the related object and post data back,
which may have come from the session, for each of the comments and
ratings view functions to deal with as needed.
"""
post_data = request.POST
login_required_setting_name = prefix.upper() + "S_ACCOUNT_REQUIRED"
posted_session_key = "unauthenticated_" + prefix
redirect_url = ""
if getattr(settings, login_required_setting_name, False):
if not request.user.is_authenticated():
request.session[posted_session_key] = request.POST
error(request, _("You must be logged in. Please log in or "
"sign up to complete this action."))
redirect_url = "%s?next=%s" % (settings.LOGIN_URL, reverse(prefix))
elif posted_session_key in request.session:
post_data = request.session.pop(posted_session_key)
if not redirect_url:
model_data = post_data.get("content_type", "").split(".", 1)
if len(model_data) != 2:
return HttpResponseBadRequest()
try:
model = apps.get_model(*model_data)
obj = model.objects.get(id=post_data.get("object_pk", None))
except (TypeError, ObjectDoesNotExist, LookupError):
redirect_url = "/"
if redirect_url:
if request.is_ajax():
return HttpResponse(dumps({"location": redirect_url}))
else:
return redirect(redirect_url)
return obj, post_data
@require_POST
def comment(request, template="generic/comments.html", extra_context=None):
"""
Handle a ``ThreadedCommentForm`` submission and redirect back to its
related object.
"""
response = initial_validation(request, "comment")
if isinstance(response, HttpResponse):
return response
obj, post_data = response
form_class = import_dotted_path(settings.COMMENT_FORM_CLASS)
form = form_class(request, obj, post_data)
if form.is_valid():
url = obj.get_absolute_url()
if is_spam(request, form, url):
return redirect(url)
comment = form.save(request)
response = redirect(add_cache_bypass(comment.get_absolute_url()))
# Store commenter's details in a cookie for 90 days.
for field in ThreadedCommentForm.cookie_fields:
cookie_name = ThreadedCommentForm.cookie_prefix + field
cookie_value = post_data.get(field, "")
set_cookie(response, cookie_name, cookie_value)
return response
elif request.is_ajax() and form.errors:
return HttpResponse(dumps({"errors": form.errors}))
# Show errors with stand-alone comment form.
context = {"obj": obj, "posted_comment_form": form}
context.update(extra_context or {})
response = render(request, template, context)
return response
@require_POST
def rating(request):
"""
Handle a ``RatingForm`` submission and redirect back to its
related object.
"""
response = initial_validation(request, "rating")
if isinstance(response, HttpResponse):
return response
obj, post_data = response
url = add_cache_bypass(obj.get_absolute_url().split("#")[0])
response = redirect(url + "#rating-%s" % obj.id)
rating_form = RatingForm(request, obj, post_data)
if rating_form.is_valid():
rating_form.save()
if request.is_ajax():
# Reload the object and return the rating fields as json.
obj = obj.__class__.objects.get(id=obj.id)
rating_name = obj.get_ratingfield_name()
json = {}
for f in ("average", "count", "sum"):
json["rating_" + f] = getattr(obj, "%s_%s" % (rating_name, f))
response = HttpResponse(dumps(json))
if rating_form.undoing:
ratings = set(rating_form.previous) ^ set([rating_form.current])
else:
ratings = rating_form.previous + [rating_form.current]
set_cookie(response, "mezzanine-rating", ",".join(ratings))
return response
| bsd-2-clause |
yosshy/nova | nova/api/auth.py | 13 | 5832 | # Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Auth Middleware.
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_middleware import request_id
from oslo_serialization import jsonutils
import webob.dec
import webob.exc
from nova import context
from nova.i18n import _
from nova import wsgi
auth_opts = [
cfg.BoolOpt('api_rate_limit',
default=False,
help='Whether to use per-user rate limiting for the api. '
'This option is only used by v2 api. Rate limiting '
'is removed from v2.1 api.'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='''
The strategy to use for auth: keystone or noauth2. noauth2 is designed for
testing only, as it does no actual credential checking. noauth2 provides
administrative credentials only if 'admin' is specified as the username.
'''),
cfg.BoolOpt('use_forwarded_for',
default=False,
help='Treat X-Forwarded-For as the canonical remote address. '
'Only enable this if you have a sanitizing proxy.'),
]
CONF = cfg.CONF
CONF.register_opts(auth_opts)
LOG = logging.getLogger(__name__)
def _load_pipeline(loader, pipeline):
filters = [loader.get_filter(n) for n in pipeline[:-1]]
app = loader.get_app(pipeline[-1])
filters.reverse()
for filter in filters:
app = filter(app)
return app
def pipeline_factory(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
pipeline = local_conf[CONF.auth_strategy]
if not CONF.api_rate_limit:
limit_name = CONF.auth_strategy + '_nolimit'
pipeline = local_conf.get(limit_name, pipeline)
pipeline = pipeline.split()
return _load_pipeline(loader, pipeline)
def pipeline_factory_v21(loader, global_conf, **local_conf):
"""A paste pipeline replica that keys off of auth_strategy."""
return _load_pipeline(loader, local_conf[CONF.auth_strategy].split())
# NOTE(oomichi): This pipeline_factory_v3 is for passing check-grenade-dsvm.
pipeline_factory_v3 = pipeline_factory_v21
class InjectContext(wsgi.Middleware):
"""Add a 'nova.context' to WSGI environ."""
def __init__(self, context, *args, **kwargs):
self.context = context
super(InjectContext, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
req.environ['nova.context'] = self.context
return self.application
class NovaKeystoneContext(wsgi.Middleware):
"""Make a request context from keystone headers."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
user_id = req.headers.get('X_USER')
user_id = req.headers.get('X_USER_ID', user_id)
if user_id is None:
LOG.debug("Neither X_USER_ID nor X_USER found in request")
return webob.exc.HTTPUnauthorized()
roles = self._get_roles(req)
if 'X_TENANT_ID' in req.headers:
# This is the new header since Keystone went to ID/Name
project_id = req.headers['X_TENANT_ID']
else:
# This is for legacy compatibility
project_id = req.headers['X_TENANT']
project_name = req.headers.get('X_TENANT_NAME')
user_name = req.headers.get('X_USER_NAME')
req_id = req.environ.get(request_id.ENV_REQUEST_ID)
# Get the auth token
auth_token = req.headers.get('X_AUTH_TOKEN',
req.headers.get('X_STORAGE_TOKEN'))
# Build a context, including the auth_token...
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
service_catalog = None
if req.headers.get('X_SERVICE_CATALOG') is not None:
try:
catalog_header = req.headers.get('X_SERVICE_CATALOG')
service_catalog = jsonutils.loads(catalog_header)
except ValueError:
raise webob.exc.HTTPInternalServerError(
_('Invalid service catalog json.'))
# NOTE(jamielennox): This is a full auth plugin set by auth_token
# middleware in newer versions.
user_auth_plugin = req.environ.get('keystone.token_auth')
ctx = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=auth_token,
remote_address=remote_address,
service_catalog=service_catalog,
request_id=req_id,
user_auth_plugin=user_auth_plugin)
req.environ['nova.context'] = ctx
return self.application
def _get_roles(self, req):
"""Get the list of roles."""
roles = req.headers.get('X_ROLES', '')
return [r.strip() for r in roles.split(',')]
| apache-2.0 |
sevaivanov/ring-api | tests/rest-api/0.1/unit-tests.py | 1 | 18477 | #!/usr/bin/env python3
import unittest
import requests
import json
API_URL = 'http://127.0.0.1:8080/api/v1'
def print_json(data):
print(json.dumps(data, sort_keys=True, indent=4))
class TestAccount(unittest.TestCase):
def test_accounts_get(self):
print("\nGET /accounts/")
res = requests.get(API_URL + '/accounts/')
res = res.json()
self.assertTrue('status' in res)
self.assertEqual(res['status'], 200)
def test_account_get(self):
print("\nGET /account/")
res = requests.get(
API_URL + '/account/',
params={'type': 'SIP'}
)
res = res.json()
self.assertTrue('status' in res)
self.assertEqual(res['status'], 200)
self.assertTrue('details' in res)
details = res['details']
self.assertTrue('Account.type' in details)
self.assertEqual(details['Account.type'], 'SIP')
res = requests.get(
API_URL + '/account/',
params={'type': 'RING'}
)
res = res.json()
self.assertTrue('status' in res)
self.assertEqual(res['status'], 200)
self.assertTrue('details' in res)
details = res['details']
self.assertTrue('Account.type' in details)
self.assertEqual(details['Account.type'], 'RING')
res = requests.get(
API_URL + '/account/',
params={'type': 'stuff'}
)
res = res.json()
self.assertTrue('status' in res)
self.assertEqual(res['status'], 400)
self.assertFalse('details' in res)
def test_account_post(self):
print("\nPOST /account/")
req = requests.get(API_URL + '/account/?type=RING')
req = req.json()
req['details']['Account.alias'] = "Unittest"
res = requests.post(
"http://localhost:8080/account/",
data=json.dumps(req)
)
res = res.json()
self.assertTrue('account_id' in res)
self.assertTrue('status' in res)
self.test_RING_account = res['account_id']
def test_account_details_get(self):
print("\nGET /accounts/<account_id>/details")
res = requests.get(API_URL + '/accounts/')
res = res.json()
accounts = res['accounts']
for account in accounts:
res = requests.get(
API_URL + '/accounts/' + account + '/details/',
params={'type': 'default'}
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('details' in res)
for account in accounts:
res = requests.get(
API_URL + '/accounts/' + account + '/details/',
params={'type' : 'volatile'}
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('details' in res)
def test_account_details_put(self):
print("\nPUT /accounts/<account_id>/details")
res = requests.get(API_URL + '/accounts/')
res = res.json()
accounts = res['accounts']
for account in accounts:
res = requests.get(
API_URL + '/accounts/' + account + '/details/',
params={'type': 'default'}
)
res = res.json()
self.assertEqual(res['status'], 200)
details = res['details']
res = requests.put(
API_URL + '/accounts/' + account + '/details/',
data=json.dumps({'details': details})
)
res = res.json()
self.assertEqual(res['status'], 200)
def test_account_delete(self):
print("\nDELETE /accounts/<account_id>")
res = requests.get(API_URL + '/accounts/')
res = res.json()
accounts = res['accounts']
for account in accounts:
res = requests.get(
API_URL + '/accounts/' + account + '/details/',
params={'type': 'default'}
)
res = res.json()
self.assertEqual(res['status'], 200)
if (res['details']['Account.alias'] == "Unittest"):
res = requests.delete(
API_URL + '/accounts/' + account + '/'
)
res = res.json()
self.assertEqual(res['status'], 200)
def test_account_ciphers_get(self):
print("\nGET /accounts/<account_id>/ciphers/")
res = requests.get(API_URL + '/accounts/')
res = res.json()
accounts = res['accounts']
for account in accounts:
res = requests.get(
API_URL + '/accounts/' + account + '/ciphers/'
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('ciphers' in res)
def test_account_codecs_get(self):
print("\nGET /accounts/<account_id>/codecs/")
res = requests.get(API_URL + '/accounts/')
res = res.json()
accounts = res['accounts']
for account in accounts:
res = requests.get(
API_URL + '/accounts/' + account + '/codecs/'
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('codecs' in res)
def test_account_codecs_put(self):
print("\nPUT /accounts/<account_id>/codecs/")
res = requests.get(API_URL + '/accounts/')
res = res.json()
accounts = res['accounts']
for account in accounts:
res = requests.get(
API_URL + '/accounts/' + account + '/codecs/'
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('codecs' in res)
codecs = res['codecs']
res = requests.put(
API_URL + '/accounts/' + account + '/codecs/',
data=json.dumps({'codecs': codecs})
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('codecs' in res)
def test_account_codec_details_get(self):
print("\nGET /accounts/<account_id>/codecs/<codec_id>")
res = requests.get(API_URL + '/accounts/')
res = res.json()
accounts = res['accounts']
for account in accounts:
res = requests.get(
API_URL + '/accounts/' + account + '/codecs/'
)
res = res.json()
self.assertEqual(res['status'], 200)
codecs = res['codecs']
for codec in codecs:
res = requests.get(
API_URL + '/accounts/' +
account +
'/codecs/' +
str(codec) + '/'
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('details' in res)
def test_account_codec_details_put(self):
print("\nPUT /accounts/<account_id>/codecs/<codec_id>")
res = requests.get(API_URL + '/accounts/')
res = res.json()
accounts = res['accounts']
for account in accounts:
res = requests.get(
API_URL + '/accounts/' + account + '/codecs/'
)
res = res.json()
self.assertEqual(res['status'], 200)
codecs = res['codecs']
for codec in codecs:
res = requests.get(
API_URL + '/accounts/' +
account +
'/codecs/' +
str(codec) + '/'
)
res = res.json()
self.assertEqual(res['status'], 200)
details = res['details']
res = requests.get(
API_URL + '/accounts/' +
account +
'/codecs/' +
str(codec) + '/',
data=json.dumps({'details': details})
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('details' in res)
def test_account_certificates_get(self):
print("\nGET /accounts/<account_id>/certificates/<cert_id>")
res = requests.get(API_URL + '/accounts/')
res = res.json()
accounts = []
for account in res['accounts']:
res = requests.get(
API_URL + '/accounts/' + account + '/details/',
{'type': 'default'}
)
res = res.json()
self.assertEqual(res['status'], 200)
if (res['details']['Account.alias'] == "Unittest"):
res = requests.get(
API_URL + '/accounts/' +
account +
'/certificates/fa5c04850341c00ba074518db52ee6745bb49bc1/',
params={'action': 'pin'}
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertEqual(res['success'], True)
res = requests.get(
API_URL + '/accounts/' +
account +
'/certificates/fa5c04850341c00ba074518db52ee6745bb49bc1/',
params={'action': 'validate'}
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('certificates' in res)
def test_account_certificates_put(self):
print("\nPUT /accounts/<account_id>/certificates/<cert_id>")
res = requests.get(API_URL + '/accounts/')
res = res.json()
accounts = []
for account in res['accounts']:
res = requests.get(
API_URL + '/accounts/' + account + '/details/',
params={'type': 'default'}
)
res = res.json()
self.assertEqual(res['status'], 200)
if (res['details']['Account.alias'] == "Unittest"):
res = requests.put(
API_URL + '/accounts/' +
account +
'/certificates/fa5c04850341c00ba074518db52ee6745bb49bc1/',
data=json.dumps({'status': 'ALLOWED'})
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertEqual(res['success'], True)
class TestCodec(unittest.TestCase):
def test_codecs(self):
print("\nGET /codecs/")
res = requests.get(API_URL + '/codecs/')
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('codecs' in res)
class TestCrypto(unittest.TestCase):
def test_crypto_tls(self):
print("\nGET /crypto/tls/")
res = requests.get(
API_URL + '/crypto/tls/',
{'type': 'settings'}
)
res = res.json()
self.assertTrue('settings' in res)
self.assertTrue('status' in res)
res = requests.get(
API_URL + '/crypto/tls/',
{'type': 'method'}
)
res = res.json()
self.assertTrue('methods' in res)
self.assertTrue('status' in res)
class TestCertificates(unittest.TestCase):
def test_certificates_get(self):
print("\nGET /certificates/")
res = requests.get(API_URL + '/certificates/')
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('pinned' in res)
def test_certificate_get(self):
print("\nGET /certificate/<cert_id>/")
res = requests.get(API_URL + '/certificates/')
res = res.json()
pinned = res['pinned']
for certificate in pinned:
res = requests.get(
API_URL + '/certificates/' + certificate + '/'
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('details' in res)
def test_certificate_post(self):
print("\nPOST /certificate/<cert_id>/")
res = requests.get(API_URL + '/certificates/')
res = res.json()
pinned = res['pinned']
for certificate in pinned:
res = requests.post(
API_URL + '/certificates/' + certificate + '/',
data=json.dumps({'action': 'pin', 'local': 'True'})
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('action' in res)
res = requests.post(
API_URL + '/certificates/' + certificate + '/',
data=json.dumps({'action': 'unpin'})
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('action' in res)
class TestAudio(unittest.TestCase):
def test_audio_plugins_get(self):
print("\nGET /audio/plugins/")
res = requests.get(API_URL + '/audio/plugins/')
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('plugins' in res)
class TestVideo(unittest.TestCase):
def test_video_device_get(self):
print("\nGET /video/devices/")
res = requests.get(
API_URL + '/video/devices/',
{'type': 'all'}
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('devices' in res)
res = requests.get(
API_URL + '/video/devices/',
{'type': 'default'}
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('default' in res)
def test_video_device_put(self):
print("\nPUT /video/devices/")
res = requests.get(
API_URL + '/video/devices/',
{'type': 'default'}
)
res = res.json()
default = res['default']
res = requests.put(
API_URL + '/video/devices/',
params={'type': 'default'},
data=json.dumps({'device': default})
)
res = res.json()
def test_video_settings_get(self):
print("\nGET /video/<device_name>/settings/")
res = requests.get(
API_URL + '/video/devices/',
{'type': 'default'}
)
res = res.json()
default = res['default']
res = requests.get(
API_URL + '/video/' + default + '/settings/'
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('settings' in res)
def test_video_settings_put(self):
print("\nPUT /video/<device_name>/settings/")
res = requests.get(
API_URL + '/video/devices/',
{'type': 'default'}
)
res = res.json()
default = res['default']
res = requests.get(
API_URL + '/video/' + default + '/settings/'
)
res = res.json()
settings = res['settings']
res = requests.put(
API_URL + '/video/' + default + '/settings/',
data=json.dumps({'settings': settings})
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('settings' in res)
def test_video_camera_get(self):
print("\nGET /video/camera/")
res = requests.get(API_URL + '/video/camera/')
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('cameraStatus' in res)
def test_video_camera_put(self):
print("\nPUT /video/camera/")
res = requests.put(
API_URL + '/video/camera/',
data=json.dumps({'action': 'start'})
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('cameraStatus' in res)
res = requests.put(
API_URL + '/video/camera/',
data=json.dumps({'action': 'stop'})
)
res = res.json()
self.assertEqual(res['status'], 200)
self.assertTrue('cameraStatus' in res)
def TestOrder():
suite = unittest.TestSuite()
suite.addTest(TestAccount('test_account_get'))
suite.addTest(TestAccount('test_accounts_get'))
#suite.addTest(TestAccount('test_account_post'))
suite.addTest(TestAccount('test_account_details_get'))
suite.addTest(TestAccount('test_account_details_put'))
suite.addTest(TestAccount('test_account_ciphers_get'))
suite.addTest(TestAccount('test_account_codecs_get'))
suite.addTest(TestAccount('test_account_codecs_put'))
suite.addTest(TestAccount('test_account_codec_details_get'))
suite.addTest(TestAccount('test_account_codec_details_put'))
suite.addTest(TestAccount('test_account_certificates_get'))
suite.addTest(TestAccount('test_account_certificates_put'))
suite.addTest(TestCodec('test_codecs'))
suite.addTest(TestCrypto('test_crypto_tls'))
suite.addTest(TestCertificates('test_certificates_get'))
suite.addTest(TestCertificates('test_certificate_get'))
suite.addTest(TestCertificates('test_certificate_post'))
suite.addTest(TestAudio('test_audio_plugins_get'))
suite.addTest(TestVideo('test_video_device_get'))
suite.addTest(TestVideo('test_video_device_put'))
suite.addTest(TestVideo('test_video_settings_get'))
suite.addTest(TestVideo('test_video_settings_put'))
suite.addTest(TestVideo('test_video_camera_get'))
suite.addTest(TestVideo('test_video_camera_put'))
suite.addTest(TestAccount('test_account_delete'))
return suite
def delete_test_data():
print("\nFlushing all remaining data")
res = requests.get(API_URL + '/accounts/')
res = res.json()
accounts = res['accounts']
for account in accounts:
res = requests.get(
API_URL + '/accounts/' + account + '/details/',
{'type': 'default'}
)
res = res.json()
if (res['details']['Account.alias'] == "Unittest"):
res = requests.delete(
API_URL + '/accounts/' + account + '/'
)
res = res.json()
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(TestOrder())
delete_test_data()
| gpl-3.0 |
simonluijk/pyFilesystem | fs/remote.py | 2 | 26660 | """
fs.remote
=========
Utilities for interfacing with remote filesystems
This module provides reusable utility functions that can be used to construct
FS subclasses interfacing with a remote filesystem. These include:
* RemoteFileBuffer: a file-like object that locally buffers the contents of
a remote file, writing them back on flush() or close().
* ConnectionManagerFS: a WrapFS subclass that tracks the connection state
of a remote FS, and allows client code to wait for
a connection to be re-established.
* CacheFS: a WrapFS subclass that caches file and directory meta-data in
memory, to speed access to a remote FS.
"""
from __future__ import with_statement
import time
import stat as statinfo
from errno import EINVAL
import fs.utils
from fs.base import threading, FS
from fs.wrapfs import WrapFS, wrap_fs_methods
from fs.wrapfs.lazyfs import LazyFS
from fs.path import *
from fs.errors import *
from fs.local_functools import wraps
from fs.filelike import StringIO, SpooledTemporaryFile, FileWrapper
from fs import SEEK_SET, SEEK_CUR, SEEK_END
_SENTINAL = object()
class RemoteFileBuffer(FileWrapper):
"""File-like object providing buffer for local file operations.
Instances of this class manage a local tempfile buffer corresponding
to the contents of a remote file. All reads and writes happen locally,
with the content being copied to the remote file only on flush() or
close(). Writes to the remote file are performed using the setcontents()
method on the owning FS object.
The intended use-case is for a remote filesystem (e.g. S3FS) to return
instances of this class from its open() method, and to provide the
file-uploading logic in its setcontents() method, as in the following
pseudo-code::
def open(self,path,mode="r"):
rf = self._get_remote_file(path)
return RemoteFileBuffer(self,path,mode,rf)
def setcontents(self,path,file):
self._put_remote_file(path,file)
The contents of the remote file are read into the buffer on-demand.
"""
max_size_in_memory = 1024 * 8
def __init__(self, fs, path, mode, rfile=None, write_on_flush=True):
"""RemoteFileBuffer constructor.
The owning filesystem, path and mode must be provided. If the
optional argument 'rfile' is provided, it must be a read()-able
object or a string containing the initial file contents.
"""
wrapped_file = SpooledTemporaryFile(max_size=self.max_size_in_memory)
self.fs = fs
self.path = path
self.write_on_flush = write_on_flush
self._changed = False
self._readlen = 0 # How many bytes already loaded from rfile
self._rfile = None # Reference to remote file object
self._eof = False # Reached end of rfile?
if getattr(fs,"_lock",None) is not None:
self._lock = fs._lock.__class__()
else:
self._lock = threading.RLock()
if "r" in mode or "+" in mode or "a" in mode:
if rfile is None:
# File was just created, force to write anything
self._changed = True
self._eof = True
if not hasattr(rfile, "read"):
rfile = StringIO(unicode(rfile))
self._rfile = rfile
else:
# Do not use remote file object
self._eof = True
self._rfile = None
if rfile is not None and hasattr(rfile,"close"):
rfile.close()
super(RemoteFileBuffer,self).__init__(wrapped_file,mode)
# FIXME: What if mode with position on eof?
if "a" in mode:
# Not good enough...
self.seek(0, SEEK_END)
def __del__(self):
# Don't try to close a partially-constructed file
if "_lock" in self.__dict__:
if not self.closed:
try:
self.close()
except FSError:
pass
def _write(self,data,flushing=False):
with self._lock:
# Do we need to discard info from the buffer?
toread = len(data) - (self._readlen - self.wrapped_file.tell())
if toread > 0:
if not self._eof:
self._fillbuffer(toread)
else:
self._readlen += toread
self._changed = True
self.wrapped_file.write(data)
def _read_remote(self, length=None):
"""Read data from the remote file into the local buffer."""
chunklen = 1024 * 256
bytes_read = 0
while True:
toread = chunklen
if length is not None and length - bytes_read < chunklen:
toread = length - bytes_read
if not toread:
break
data = self._rfile.read(toread)
datalen = len(data)
if not datalen:
self._eof = True
break
bytes_read += datalen
self.wrapped_file.write(data)
if datalen < toread:
# We reached EOF,
# no more reads needed
self._eof = True
break
if self._eof and self._rfile is not None:
self._rfile.close()
self._readlen += bytes_read
def _fillbuffer(self, length=None):
"""Fill the local buffer, leaving file position unchanged.
This method is used for on-demand loading of data from the remote file
into the buffer. It reads 'length' bytes from rfile and writes them
into the buffer, seeking back to the original file position.
"""
curpos = self.wrapped_file.tell()
if length == None:
if not self._eof:
# Read all data and we didn't reached EOF
# Merge endpos - tell + bytes from rfile
self.wrapped_file.seek(0, SEEK_END)
self._read_remote()
self._eof = True
self.wrapped_file.seek(curpos)
elif not self._eof:
if curpos + length > self._readlen:
# Read all data and we didn't reached EOF
# Load endpos - tell() + len bytes from rfile
toload = length - (self._readlen - curpos)
self.wrapped_file.seek(0, SEEK_END)
self._read_remote(toload)
self.wrapped_file.seek(curpos)
def _read(self, length=None):
if length < 0:
length = None
with self._lock:
self._fillbuffer(length)
data = self.wrapped_file.read(length if length != None else -1)
if not data:
data = None
return data
def _seek(self,offset,whence=SEEK_SET):
with self._lock:
if not self._eof:
# Count absolute position of seeking
if whence == SEEK_SET:
abspos = offset
elif whence == SEEK_CUR:
abspos = offset + self.wrapped_file.tell()
elif whence == SEEK_END:
abspos = None
else:
raise IOError(EINVAL, 'Invalid whence')
if abspos != None:
toread = abspos - self._readlen
if toread > 0:
self.wrapped_file.seek(self._readlen)
self._fillbuffer(toread)
else:
self.wrapped_file.seek(self._readlen)
self._fillbuffer()
self.wrapped_file.seek(offset, whence)
def _truncate(self,size):
with self._lock:
if not self._eof and self._readlen < size:
# Read the rest of file
self._fillbuffer(size - self._readlen)
# Lock rfile
self._eof = True
elif self._readlen >= size:
# Crop rfile metadata
self._readlen = size if size != None else 0
# Lock rfile
self._eof = True
self.wrapped_file.truncate(size)
self._changed = True
self.flush()
if self._rfile is not None:
self._rfile.close()
def flush(self):
with self._lock:
self.wrapped_file.flush()
if self.write_on_flush:
self._setcontents()
def _setcontents(self):
if not self._changed:
# Nothing changed, no need to write data back
return
# If not all data loaded, load until eof
if not self._eof:
self._fillbuffer()
if "w" in self.mode or "a" in self.mode or "+" in self.mode:
pos = self.wrapped_file.tell()
self.wrapped_file.seek(0)
self.fs.setcontents(self.path, self.wrapped_file)
self.wrapped_file.seek(pos)
def close(self):
with self._lock:
if not self.closed:
self._setcontents()
if self._rfile is not None:
self._rfile.close()
super(RemoteFileBuffer,self).close()
class ConnectionManagerFS(LazyFS):
"""FS wrapper providing simple connection management of a remote FS.
The ConnectionManagerFS class is designed to wrap a remote FS object
and provide some convenience methods for dealing with its remote
connection state.
The boolean attribute 'connected' indicates whether the remote filesystem
has an active connection, and is initially True. If any of the remote
filesystem methods raises a RemoteConnectionError, 'connected' will
switch to False and remain so until a successful remote method call.
Application code can use the method 'wait_for_connection' to block
until the connection is re-established. Currently this reconnection
is checked by a simple polling loop; eventually more sophisticated
operating-system integration may be added.
Since some remote FS classes can raise RemoteConnectionError during
initialization, this class makes use of lazy initialization. The
remote FS can be specified as an FS instance, an FS subclass, or a
(class,args) or (class,args,kwds) tuple. For example::
>>> fs = ConnectionManagerFS(MyRemoteFS("http://www.example.com/"))
Traceback (most recent call last):
...
RemoteConnectionError: couldn't connect to "http://www.example.com/"
>>> fs = ConnectionManagerFS((MyRemoteFS,["http://www.example.com/"]))
>>> fs.connected
False
>>>
"""
poll_interval = 1
def __init__(self,wrapped_fs,poll_interval=None,connected=True):
super(ConnectionManagerFS,self).__init__(wrapped_fs)
if poll_interval is not None:
self.poll_interval = poll_interval
self._connection_cond = threading.Condition()
self._poll_thread = None
self._poll_sleeper = threading.Event()
self.connected = connected
def setcontents(self, path, data, chunk_size=64*1024):
return self.wrapped_fs.setcontents(path, data, chunk_size=chunk_size)
def __getstate__(self):
state = super(ConnectionManagerFS,self).__getstate__()
del state["_connection_cond"]
del state["_poll_sleeper"]
state["_poll_thread"] = None
return state
def __setstate__(self,state):
super(ConnectionManagerFS,self).__setstate__(state)
self._connection_cond = threading.Condition()
self._poll_sleeper = threading.Event()
def wait_for_connection(self,timeout=None):
self._connection_cond.acquire()
try:
if not self.connected:
if not self._poll_thread:
target = self._poll_connection
self._poll_thread = threading.Thread(target=target)
self._poll_thread.daemon = True
self._poll_thread.start()
self._connection_cond.wait(timeout)
finally:
self._connection_cond.release()
def _poll_connection(self):
while not self.connected and not self.closed:
try:
self.wrapped_fs.isdir("")
except RemoteConnectionError:
self._poll_sleeper.wait(self.poll_interval)
self._poll_sleeper.clear()
except FSError:
break
else:
break
self._connection_cond.acquire()
try:
if not self.closed:
self.connected = True
self._poll_thread = None
self._connection_cond.notifyAll()
finally:
self._connection_cond.release()
def close(self):
if not self.closed:
try:
super(ConnectionManagerFS,self).close()
except (RemoteConnectionError,):
pass
if self._poll_thread:
self.connected = True
self._poll_sleeper.set()
self._poll_thread.join()
self._poll_thread = None
def _ConnectionManagerFS_method_wrapper(func):
"""Method wrapper for ConnectionManagerFS.
This method wrapper keeps an eye out for RemoteConnectionErrors and
adjusts self.connected accordingly.
"""
@wraps(func)
def wrapper(self,*args,**kwds):
try:
result = func(self,*args,**kwds)
except RemoteConnectionError:
self.connected = False
raise
except FSError:
self.connected = True
raise
else:
self.connected = True
return result
return wrapper
wrap_fs_methods(_ConnectionManagerFS_method_wrapper,ConnectionManagerFS)
class CachedInfo(object):
"""Info objects stored in cache for CacheFS."""
__slots__ = ("timestamp","info","has_full_info","has_full_children")
def __init__(self,info={},has_full_info=True,has_full_children=False):
self.timestamp = time.time()
self.info = info
self.has_full_info = has_full_info
self.has_full_children = has_full_children
def clone(self):
new_ci = self.__class__()
new_ci.update_from(self)
return new_ci
def update_from(self,other):
self.timestamp = other.timestamp
self.info = other.info
self.has_full_info = other.has_full_info
self.has_full_children = other.has_full_children
@classmethod
def new_file_stub(cls):
info = {"info" : 0700 | statinfo.S_IFREG}
return cls(info,has_full_info=False)
@classmethod
def new_dir_stub(cls):
info = {"info" : 0700 | statinfo.S_IFDIR}
return cls(info,has_full_info=False)
class CacheFSMixin(FS):
"""Simple FS mixin to cache meta-data of a remote filesystems.
This FS mixin implements a simplistic cache that can help speed up
access to a remote filesystem. File and directory meta-data is cached
but the actual file contents are not.
If you want to add caching to an existing FS object, use the CacheFS
class instead; it's an easy-to-use wrapper rather than a mixin.
This mixin class is provided for FS implementors who want to use
caching internally in their own classes.
FYI, the implementation of CacheFS is this:
class CacheFS(CacheFSMixin,WrapFS):
pass
"""
def __init__(self,*args,**kwds):
"""CacheFSMixin constructor.
The optional keyword argument 'cache_timeout' specifies the cache
timeout in seconds. The default timeout is 1 second. To prevent
cache entries from ever timing out, set it to None.
The optional keyword argument 'max_cache_size' specifies the maximum
number of entries to keep in the cache. To allow the cache to grow
without bound, set it to None. The default is 1000.
"""
self.cache_timeout = kwds.pop("cache_timeout",1)
self.max_cache_size = kwds.pop("max_cache_size",1000)
self.__cache = PathMap()
self.__cache_size = 0
self.__cache_lock = threading.RLock()
super(CacheFSMixin,self).__init__(*args,**kwds)
def clear_cache(self,path=""):
with self.__cache_lock:
self.__cache.clear(path)
try:
scc = super(CacheFSMixin,self).clear_cache
except AttributeError:
pass
else:
scc()
def __getstate__(self):
state = super(CacheFSMixin,self).__getstate__()
state.pop("_CacheFSMixin__cache",None)
state.pop("_CacheFSMixin__cache_size",None)
state.pop("_CacheFSMixin__cache_lock",None)
return state
def __setstate__(self,state):
super(CacheFSMixin,self).__setstate__(state)
self.__cache = PathMap()
self.__cache_size = 0
self.__cache_lock = threading.RLock()
def __get_cached_info(self,path,default=_SENTINAL):
try:
info = self.__cache[path]
if self.cache_timeout is not None:
now = time.time()
if info.timestamp < (now - self.cache_timeout):
with self.__cache_lock:
self.__expire_from_cache(path)
raise KeyError
return info
except KeyError:
if default is not _SENTINAL:
return default
raise
def __set_cached_info(self,path,new_ci,old_ci=None):
was_room = True
with self.__cache_lock:
# Free up some room in the cache
if self.max_cache_size is not None and old_ci is None:
while self.__cache_size >= self.max_cache_size:
try:
to_del = iter(self.__cache).next()
except StopIteration:
break
else:
was_room = False
self.__expire_from_cache(to_del)
# Atomically add to the cache.
# If there's a race, newest information wins
ci = self.__cache.setdefault(path,new_ci)
if ci is new_ci:
self.__cache_size += 1
else:
if old_ci is None or ci is old_ci:
if ci.timestamp < new_ci.timestamp:
ci.update_from(new_ci)
return was_room
def __expire_from_cache(self,path):
del self.__cache[path]
self.__cache_size -= 1
for ancestor in recursepath(path):
try:
self.__cache[ancestor].has_full_children = False
except KeyError:
pass
def open(self,path,mode="r",**kwds):
# Try to validate the entry using the cached info
try:
ci = self.__get_cached_info(path)
except KeyError:
if path in ("","/"):
raise ResourceInvalidError(path)
try:
ppath = dirname(path)
pci = self.__get_cached_info(ppath)
except KeyError:
pass
else:
if not fs.utils.isdir(super(CacheFSMixin,self),ppath,pci.info):
raise ResourceInvalidError(path)
if pci.has_full_children:
raise ResourceNotFoundError(path)
else:
if not fs.utils.isfile(super(CacheFSMixin,self),path,ci.info):
raise ResourceInvalidError(path)
f = super(CacheFSMixin,self).open(path,mode,**kwds)
if "w" in mode or "a" in mode or "+" in mode:
with self.__cache_lock:
self.__cache.clear(path)
f = self._CacheInvalidatingFile(self,path,f,mode)
return f
class _CacheInvalidatingFile(FileWrapper):
def __init__(self,owner,path,wrapped_file,mode=None):
self.path = path
sup = super(CacheFSMixin._CacheInvalidatingFile,self)
sup.__init__(wrapped_file,mode)
self.owner = owner
def _write(self,string,flushing=False):
with self.owner._CacheFSMixin__cache_lock:
self.owner._CacheFSMixin__cache.clear(self.path)
sup = super(CacheFSMixin._CacheInvalidatingFile,self)
return sup._write(string,flushing=flushing)
def _truncate(self,size):
with self.owner._CacheFSMixin__cache_lock:
self.owner._CacheFSMixin__cache.clear(self.path)
sup = super(CacheFSMixin._CacheInvalidatingFile,self)
return sup._truncate(size)
def exists(self,path):
try:
self.getinfo(path)
except ResourceNotFoundError:
return False
else:
return True
def isdir(self,path):
try:
self.__cache.iternames(path).next()
return True
except StopIteration:
pass
try:
info = self.getinfo(path)
except ResourceNotFoundError:
return False
else:
return fs.utils.isdir(super(CacheFSMixin,self),path,info)
def isfile(self,path):
try:
self.__cache.iternames(path).next()
return False
except StopIteration:
pass
try:
info = self.getinfo(path)
except ResourceNotFoundError:
return False
else:
return fs.utils.isfile(super(CacheFSMixin,self),path,info)
def getinfo(self,path):
try:
ci = self.__get_cached_info(path)
if not ci.has_full_info:
raise KeyError
info = ci.info
except KeyError:
info = super(CacheFSMixin,self).getinfo(path)
self.__set_cached_info(path,CachedInfo(info))
return info
def listdir(self,path="",*args,**kwds):
return list(nm for (nm, _info) in self.listdirinfo(path,*args,**kwds))
def ilistdir(self,path="",*args,**kwds):
for (nm, _info) in self.ilistdirinfo(path,*args,**kwds):
yield nm
def listdirinfo(self,path="",*args,**kwds):
items = super(CacheFSMixin,self).listdirinfo(path,*args,**kwds)
with self.__cache_lock:
names = set()
for (nm,info) in items:
names.add(basename(nm))
cpath = pathjoin(path,basename(nm))
ci = CachedInfo(info)
self.__set_cached_info(cpath,ci)
to_del = []
for nm in self.__cache.iternames(path):
if nm not in names:
to_del.append(nm)
for nm in to_del:
self.__cache.clear(pathjoin(path,nm))
#try:
# pci = self.__cache[path]
#except KeyError:
# pci = CachedInfo.new_dir_stub()
# self.__cache[path] = pci
#pci.has_full_children = True
return items
def ilistdirinfo(self,path="",*args,**kwds):
items = super(CacheFSMixin,self).ilistdirinfo(path,*args,**kwds)
for (nm,info) in items:
cpath = pathjoin(path,basename(nm))
ci = CachedInfo(info)
self.__set_cached_info(cpath,ci)
yield (nm,info)
def getsize(self,path):
return self.getinfo(path)["size"]
def setcontents(self, path, contents="", chunk_size=64*1024):
supsc = super(CacheFSMixin,self).setcontents
res = supsc(path, contents, chunk_size=chunk_size)
with self.__cache_lock:
self.__cache.clear(path)
self.__cache[path] = CachedInfo.new_file_stub()
return res
def createfile(self, path):
super(CacheFSMixin,self).createfile(path)
with self.__cache_lock:
self.__cache.clear(path)
self.__cache[path] = CachedInfo.new_file_stub()
def makedir(self,path,*args,**kwds):
super(CacheFSMixin,self).makedir(path,*args,**kwds)
with self.__cache_lock:
self.__cache.clear(path)
self.__cache[path] = CachedInfo.new_dir_stub()
def remove(self,path):
super(CacheFSMixin,self).remove(path)
with self.__cache_lock:
self.__cache.clear(path)
def removedir(self,path,**kwds):
super(CacheFSMixin,self).removedir(path,**kwds)
with self.__cache_lock:
self.__cache.clear(path)
def rename(self,src,dst):
super(CacheFSMixin,self).rename(src,dst)
with self.__cache_lock:
for (subpath,ci) in self.__cache.iteritems(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
self.__cache.clear(src)
def copy(self,src,dst,**kwds):
super(CacheFSMixin,self).copy(src,dst,**kwds)
with self.__cache_lock:
for (subpath,ci) in self.__cache.iteritems(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
def copydir(self,src,dst,**kwds):
super(CacheFSMixin,self).copydir(src,dst,**kwds)
with self.__cache_lock:
for (subpath,ci) in self.__cache.iteritems(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
def move(self,src,dst,**kwds):
super(CacheFSMixin,self).move(src,dst,**kwds)
with self.__cache_lock:
for (subpath,ci) in self.__cache.iteritems(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
self.__cache.clear(src)
def movedir(self,src,dst,**kwds):
super(CacheFSMixin,self).movedir(src,dst,**kwds)
with self.__cache_lock:
for (subpath,ci) in self.__cache.iteritems(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
self.__cache.clear(src)
def settimes(self,path,*args,**kwds):
super(CacheFSMixin,self).settimes(path,*args,**kwds)
with self.__cache_lock:
self.__cache.pop(path,None)
class CacheFS(CacheFSMixin,WrapFS):
"""Simple FS wrapper to cache meta-data of a remote filesystems.
This FS mixin implements a simplistic cache that can help speed up
access to a remote filesystem. File and directory meta-data is cached
but the actual file contents are not.
"""
pass
| bsd-3-clause |
was4444/chromium.src | third_party/Python-Markdown/markdown/extensions/codehilite.py | 100 | 9769 | """
CodeHilite Extension for Python-Markdown
========================================
Adds code/syntax highlighting to standard Python-Markdown code blocks.
See <https://pythonhosted.org/Markdown/extensions/code_hilite.html>
for documentation.
Original code Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer
from pygments.formatters import get_formatter_by_name
pygments = True
except ImportError:
pygments = False
def parse_hl_lines(expr):
"""Support our syntax for emphasizing certain lines of code.
expr should be like '1 2' to emphasize lines 1 and 2 of a code block.
Returns a list of ints, the line numbers to emphasize.
"""
if not expr:
return []
try:
return list(map(int, expr.split()))
except ValueError:
return []
# ------------------ The Main CodeHilite Class ----------------------
class CodeHilite(object):
"""
Determine language of source code, and pass it into pygments hilighter.
Basic Usage:
>>> code = CodeHilite(src = 'some text')
>>> html = code.hilite()
* src: Source string or any object with a .readline attribute.
* linenums: (Boolean) Set line numbering to 'on' (True),
'off' (False) or 'auto'(None). Set to 'auto' by default.
* guess_lang: (Boolean) Turn language auto-detection
'on' or 'off' (on by default).
* css_class: Set class name of wrapper div ('codehilite' by default).
* hl_lines: (List of integers) Lines to emphasize, 1-indexed.
Low Level Usage:
>>> code = CodeHilite()
>>> code.src = 'some text' # String or anything with a .readline attr.
>>> code.linenos = True # Turns line numbering on or of.
>>> html = code.hilite()
"""
def __init__(self, src=None, linenums=None, guess_lang=True,
css_class="codehilite", lang=None, style='default',
noclasses=False, tab_length=4, hl_lines=None, use_pygments=True):
self.src = src
self.lang = lang
self.linenums = linenums
self.guess_lang = guess_lang
self.css_class = css_class
self.style = style
self.noclasses = noclasses
self.tab_length = tab_length
self.hl_lines = hl_lines or []
self.use_pygments = use_pygments
def hilite(self):
"""
Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with
optional line numbers. The output should then be styled with css to
your liking. No styles are applied by default - only styling hooks
(i.e.: <span class="k">).
returns : A string of html.
"""
self.src = self.src.strip('\n')
if self.lang is None:
self._parseHeader()
if pygments and self.use_pygments:
try:
lexer = get_lexer_by_name(self.lang)
except ValueError:
try:
if self.guess_lang:
lexer = guess_lexer(self.src)
else:
lexer = get_lexer_by_name('text')
except ValueError:
lexer = get_lexer_by_name('text')
formatter = get_formatter_by_name('html',
linenos=self.linenums,
cssclass=self.css_class,
style=self.style,
noclasses=self.noclasses,
hl_lines=self.hl_lines)
return highlight(self.src, lexer, formatter)
else:
# just escape and build markup usable by JS highlighting libs
txt = self.src.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
classes = []
if self.lang:
classes.append('language-%s' % self.lang)
if self.linenums:
classes.append('linenums')
class_str = ''
if classes:
class_str = ' class="%s"' % ' '.join(classes)
return '<pre class="%s"><code%s>%s</code></pre>\n' % \
(self.css_class, class_str, txt)
def _parseHeader(self):
"""
Determines language of a code block from shebang line and whether said
line should be removed or left in place. If the sheband line contains a
path (even a single /) then it is assumed to be a real shebang line and
left alone. However, if no path is given (e.i.: #!python or :::python)
then it is assumed to be a mock shebang for language identifitation of
a code fragment and removed from the code block prior to processing for
code highlighting. When a mock shebang (e.i: #!python) is found, line
numbering is turned on. When colons are found in place of a shebang
(e.i.: :::python), line numbering is left in the current state - off
by default.
Also parses optional list of highlight lines, like:
:::python hl_lines="1 3"
"""
import re
# split text into lines
lines = self.src.split("\n")
# pull first line to examine
fl = lines.pop(0)
c = re.compile(r'''
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w+-]*) # The language
\s* # Arbitrary whitespace
# Optional highlight lines, single- or double-quote-delimited
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?
''', re.VERBOSE)
# search first line for shebang
m = c.search(fl)
if m:
# we have a match
try:
self.lang = m.group('lang').lower()
except IndexError:
self.lang = None
if m.group('path'):
# path exists - restore first line
lines.insert(0, fl)
if self.linenums is None and m.group('shebang'):
# Overridable and Shebang exists - use line numbers
self.linenums = True
self.hl_lines = parse_hl_lines(m.group('hl_lines'))
else:
# No match
lines.insert(0, fl)
self.src = "\n".join(lines).strip("\n")
# ------------------ The Markdown Extension -------------------------------
class HiliteTreeprocessor(Treeprocessor):
""" Hilight source code in code blocks. """
def run(self, root):
""" Find code blocks and store in htmlStash. """
blocks = root.iter('pre')
for block in blocks:
if len(block) == 1 and block[0].tag == 'code':
code = CodeHilite(
block[0].text,
linenums=self.config['linenums'],
guess_lang=self.config['guess_lang'],
css_class=self.config['css_class'],
style=self.config['pygments_style'],
noclasses=self.config['noclasses'],
tab_length=self.markdown.tab_length,
use_pygments=self.config['use_pygments']
)
placeholder = self.markdown.htmlStash.store(code.hilite(),
safe=True)
# Clear codeblock in etree instance
block.clear()
# Change to p element which will later
# be removed when inserting raw html
block.tag = 'p'
block.text = placeholder
class CodeHiliteExtension(Extension):
""" Add source code hilighting to markdown codeblocks. """
def __init__(self, *args, **kwargs):
# define default configs
self.config = {
'linenums': [None,
"Use lines numbers. True=yes, False=no, None=auto"],
'guess_lang': [True,
"Automatic language detection - Default: True"],
'css_class': ["codehilite",
"Set class name for wrapper <div> - "
"Default: codehilite"],
'pygments_style': ['default',
'Pygments HTML Formatter Style '
'(Colorscheme) - Default: default'],
'noclasses': [False,
'Use inline styles instead of CSS classes - '
'Default false'],
'use_pygments': [True,
'Use Pygments to Highlight code blocks. '
'Disable if using a JavaScript library. '
'Default: True']
}
super(CodeHiliteExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
""" Add HilitePostprocessor to Markdown instance. """
hiliter = HiliteTreeprocessor(md)
hiliter.config = self.getConfigs()
md.treeprocessors.add("hilite", hiliter, "<inline")
md.registerExtension(self)
def makeExtension(*args, **kwargs):
return CodeHiliteExtension(*args, **kwargs)
| bsd-3-clause |
blazek/QGIS | tests/code_layout/test_qgsdoccoverage.py | 15 | 5336 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for API documentation coverage.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '01/02/2015'
__copyright__ = 'Copyright 2016, The QGIS Project'
import os
import sys
try:
from qgis.static_testing import unittest
except ImportError:
import unittest
from termcolor import colored
from doxygen_parser import DoxygenParser
from acceptable_missing_doc import ACCEPTABLE_MISSING_DOCS, ACCEPTABLE_MISSING_ADDED_NOTE, ACCEPTABLE_MISSING_BRIEF
# TO regenerate the list:
# uncomment the lines under the `# GEN LIST`
# $ export PYTHONPATH=build/output/python
# $ export QGIS_PREFIX_PATH=build/output
# $ python tests/src/python/test_qgsdoccoverage.py
# copy the output to the file:
# tests/src/python/acceptable_missing_doc.py
# in `ACCEPTABLE_MISSING_DOCS = { <past> }`.
class TestQgsDocCoverage(unittest.TestCase):
def testCoverage(self):
print('CTEST_FULL_OUTPUT')
prefixPath = os.environ['QGIS_PREFIX_PATH']
docPath = os.path.join(prefixPath, '..', 'doc', 'api', 'xml')
parser = DoxygenParser(docPath, ACCEPTABLE_MISSING_DOCS, ACCEPTABLE_MISSING_ADDED_NOTE, ACCEPTABLE_MISSING_BRIEF)
coverage = 100.0 * parser.documented_members / parser.documentable_members
missing = parser.documentable_members - parser.documented_members
print("---------------------------------")
print(("{} total documentable members".format(parser.documentable_members)))
print(("{} total contain valid documentation".format(parser.documented_members)))
print(("Total documentation coverage {}%".format(coverage)))
print("---------------------------------")
print(("{} members missing documentation".format(missing)))
print("---------------------------------")
print("Unacceptable missing documentation:")
if parser.undocumented_members:
for cls, props in list(parser.undocumented_members.items()):
print(('\n\nClass {}, {}/{} members documented\n'.format(colored(cls, 'yellow'), props['documented'], props['members'])))
for mem in props['missing_members']:
print((colored(' "' + mem + '"', 'yellow', attrs=['bold'])))
if parser.noncompliant_members:
for cls, props in list(parser.noncompliant_members.items()):
print(('\n\nClass {}, non-compliant members found\n'.format(colored(cls, 'yellow'))))
for p in props:
for mem, error in p.items():
print((colored(' ' + mem + ': ' + error, 'yellow', attrs=['bold'])))
if parser.broken_links:
for cls, props in list(parser.broken_links.items()):
print(('\n\nClass {}, broken see also links found\n'.format(colored(cls, 'yellow'))))
for member, links in props.items():
for l in links:
print((colored(' ' + member + ': ' + l, 'yellow', attrs=['bold'])))
# self.assertEquals(len(parser.undocumented_string), 0, 'FAIL: new undocumented members have been introduced, please add documentation for these members')
if parser.classes_missing_group:
print("---------------------------------")
print('\n')
print((colored('{} classes have been added without Doxygen group tag ("\ingroup"):'.format(len(parser.classes_missing_group)), 'yellow')))
print('')
print((' ' + '\n '.join([colored(cls, 'yellow', attrs=['bold']) for cls in parser.classes_missing_group])))
if parser.classes_missing_version_added:
print("---------------------------------")
print('\n')
print((colored('{} classes have been added without a version added doxygen note ("\since QGIS x.xx"):'.format(len(parser.classes_missing_version_added)), 'yellow')))
print('')
print((' ' + '\n '.join([colored(cls, 'yellow', attrs=['bold']) for cls in parser.classes_missing_version_added])))
if parser.classes_missing_brief:
print("---------------------------------")
print('\n')
print((colored('{} classes have been added without at least a brief description:'.format(len(parser.classes_missing_brief)), 'yellow')))
print('')
print((' ' + '\n '.join([colored(cls, 'yellow', attrs=['bold']) for cls in parser.classes_missing_brief])))
sys.stdout.flush()
self.assertTrue(not parser.undocumented_members, 'Undocumented members found')
self.assertTrue(not parser.classes_missing_group, 'Classes without \\group tag found')
self.assertTrue(not parser.classes_missing_version_added, 'Classes without \\since version tag found')
self.assertTrue(not parser.classes_missing_brief, 'Classes without \\brief description found')
self.assertTrue(not parser.noncompliant_members, 'Non compliant members found')
self.assertTrue(not parser.broken_links, 'Broken links found')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
yeraydiazdiaz/nonrel-blog | django/core/serializers/__init__.py | 101 | 3969 | """
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_query_set)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv" : "path.to.csv.serializer",
"txt" : "path.to.txt.serializer",
}
"""
from django.conf import settings
from django.utils import importlib
from django.core.serializers.base import SerializerDoesNotExist
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml" : "django.core.serializers.xml_serializer",
"python" : "django.core.serializers.python",
"json" : "django.core.serializers.json",
}
# Check for PyYaml and register the serializer if it's available.
try:
import yaml
BUILTIN_SERIALIZERS["yaml"] = "django.core.serializers.pyyaml"
except ImportError:
pass
_serializers = {}
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
module = importlib.import_module(serializer_module)
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return _serializers.keys()
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in _serializers.iteritems() if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is a instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
| bsd-3-clause |
crosswalk-project/chromium-crosswalk-efl | tools/perf/page_sets/page_cycler/dom.py | 34 | 1349 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class DomPage(page_module.Page):
def __init__(self, url, page_set):
super(DomPage, self).__init__(url=url, page_set=page_set)
class DomPageSet(page_set_module.PageSet):
""" DOM page_cycler benchmark """
def __init__(self):
super(DomPageSet, self).__init__(
# pylint: disable=C0301
serving_dirs=set(['../../../../data/page_cycler/dom']))
urls_list = [
'file://../../../../data/page_cycler/dom/HTMLDocument_write/',
'file://../../../../data/page_cycler/dom/Document_getElementById/',
'file://../../../../data/page_cycler/dom/DOMWindow_document/',
'file://../../../../data/page_cycler/dom/DOMWindow_window/',
'file://../../../../data/page_cycler/dom/Element_getAttribute/',
'file://../../../../data/page_cycler/dom/HTMLCollection_length/',
'file://../../../../data/page_cycler/dom/HTMLElement_className/',
'file://../../../../data/page_cycler/dom/HTMLElement_id/',
'file://../../../../data/page_cycler/dom/NodeList_length/'
]
for url in urls_list:
self.AddPage(DomPage(url, self))
| bsd-3-clause |
phunehehe/microgram | to-nix/python/python2nix/__main__.py | 3 | 3044 | import sys
import requests
from python2nix import pip_deps
PACKAGE = """\
{{ {args_str} }}:
buildPythonPackage rec {{
name = "{name}";
src = fetchurl {{
url = "{url}";
md5 = "{md5}";
}};
propagatedBuildInputs = [ {inputs_str} ];
meta = with stdenv.lib; {{
description = "{description}";
homepage = {homepage};
license = {license};
}};
}}"""
LICENSE_MAP = {
'APL2': 'licenses.asl20',
'ASL 2': 'licenses.asl20',
'Apache 2.0': 'licenses.asl20',
'BSD License': 'licenses.bsd',
'BSD or Apache License, Version 2.0': 'licenses.bsd',
'BSD': 'licenses.bsd',
'MIT License': 'licenses.mit',
'MIT license': 'licenses.mit',
'MIT': 'licenses.mit',
'PSF or ZPL': 'licenses.psfl',
'PSF': 'licenses.psfl',
'http://www.apache.org/licenses/LICENSE-2.0': 'licenses.asl20',
'http://www.opensource.org/licenses/mit-license.php': 'licenses.mit',
}
_missing = object()
def guess_license(info):
l = info['info']['license']
license = LICENSE_MAP.get(l, _missing)
if license is _missing:
sys.stderr.write('WARNING: unknown license (please update LICENSE_MAP): ' + l + '\n')
return 'unknown'
return license
_pip_dependency_cache = {}
def pip_dump_dependencies(name): # memoized version
if name in _pip_dependency_cache:
return _pip_dependency_cache[name]
ret = pip_deps.pip_dump_dependencies(name)
_pip_dependency_cache[name] = ret
return ret
def build_inputs(name):
reqs, vsns = pip_dump_dependencies(name)
def get_workaround(adict, name):
v = adict.get(name)
if not v:
name = name.replace('_', '-') # pypi workaround ?
v = adict.get(name)
return v
return [name.lower() for name, specs in get_workaround(reqs, name)]
def package_to_info(package):
url = "https://pypi.python.org/pypi/{}/json".format(package)
r = requests.get(url)
try:
return r.json()
except Exception as e:
sys.stderr.write('package_to_info failed: {}\n'.format(r))
raise e
def info_to_expr(info):
name = info['info']['name'].lower()
inputs = build_inputs(name)
inputs_str = ' '.join(build_inputs(name))
args = [ 'buildPythonPackage', 'fetchurl', 'stdenv' ] + inputs
args_str = ', '.join(args)
url = None
md5 = None
for url_item in info['urls']:
url_ext = url_item['url']
if url_ext.endswith('zip') or url_ext.endswith('tar.gz'):
url = url_item['url']
md5 = url_item['md5_digest']
break
if url is None:
raise Exception('No download url found :-(')
description = info['info']['description'].split('\n')[0]
homepage = info['info']['home_page'] or '""'
license = guess_license(info)
return PACKAGE.format(**locals())
def main():
if len(sys.argv) < 2 or "--help" in sys.argv:
print "Usage: python2nix <PACKAGE_NAME>"
else:
print info_to_expr(package_to_info(sys.argv[1]))
if __name__ == '__main__':
main()
| mit |
corcor67/SMPL_M8_GPE | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
pam-bot/SMSQuery | lib/twilio/rest/resources/base.py | 8 | 13752 | import logging
import os
import platform
from six import (
integer_types,
string_types,
binary_type,
iteritems
)
from ...compat import urlparse
from ...compat import urlencode
from ... import __version__
from ...exceptions import TwilioException
from ..exceptions import TwilioRestException
from .connection import Connection
from .imports import parse_qs, httplib2, json
from .util import (
transform_params,
parse_rfc2822_date,
UNSET_TIMEOUT
)
logger = logging.getLogger('twilio')
class Response(object):
"""
Take a httplib2 response and turn it into a requests response
"""
def __init__(self, httplib_resp, content, url):
self.content = content
self.cached = False
self.status_code = int(httplib_resp.status)
self.ok = self.status_code < 400
self.url = url
def get_cert_file():
""" Get the cert file location or bail """
# XXX - this currently fails test coverage because we don't actually go
# over the network anywhere. Might be good to have a test that stands up a
# local server and authenticates against it.
try:
# Apparently __file__ is not available in all places so wrapping this
# in a try/catch
current_path = os.path.realpath(__file__)
ca_cert_path = os.path.join(current_path, "..", "..", "..",
"conf", "cacert.pem")
return os.path.abspath(ca_cert_path)
except Exception:
# None means use the default system file
return None
def make_request(method, url, params=None, data=None, headers=None,
cookies=None, files=None, auth=None, timeout=None,
allow_redirects=False, proxies=None):
"""Sends an HTTP request
:param str method: The HTTP method to use
:param str url: The URL to request
:param dict params: Query parameters to append to the URL
:param dict data: Parameters to go in the body of the HTTP request
:param dict headers: HTTP Headers to send with the request
:param float timeout: Socket/Read timeout for the request
:return: An http response
:rtype: A :class:`Response <models.Response>` object
See the requests documentation for explanation of all these parameters
Currently proxies, files, and cookies are all ignored
"""
http = httplib2.Http(
timeout=timeout,
ca_certs=get_cert_file(),
proxy_info=Connection.proxy_info(),
)
http.follow_redirects = allow_redirects
if auth is not None:
http.add_credentials(auth[0], auth[1])
def encode_atom(atom):
if isinstance(atom, (integer_types, binary_type)):
return atom
elif isinstance(atom, string_types):
return atom.encode('utf-8')
else:
raise ValueError('list elements should be an integer, '
'binary, or string')
if data is not None:
udata = {}
for k, v in iteritems(data):
key = k.encode('utf-8')
if isinstance(v, (list, tuple, set)):
udata[key] = [encode_atom(x) for x in v]
elif isinstance(v, (integer_types, binary_type, string_types)):
udata[key] = encode_atom(v)
else:
raise ValueError('data should be an integer, '
'binary, or string, or sequence ')
data = urlencode(udata, doseq=True)
if params is not None:
enc_params = urlencode(params, doseq=True)
if urlparse(url).query:
url = '%s&%s' % (url, enc_params)
else:
url = '%s?%s' % (url, enc_params)
resp, content = http.request(url, method, headers=headers, body=data)
# Format httplib2 request as requests object
return Response(resp, content.decode('utf-8'), url)
def make_twilio_request(method, uri, **kwargs):
"""
Make a request to Twilio. Throws an error
:return: a requests-like HTTP response
:rtype: :class:`RequestsResponse`
:raises TwilioRestException: if the response is a 400
or 500-level response.
"""
headers = kwargs.get("headers", {})
user_agent = "twilio-python/%s (Python %s)" % (
__version__,
platform.python_version(),
)
headers["User-Agent"] = user_agent
headers["Accept-Charset"] = "utf-8"
if method == "POST" and "Content-Type" not in headers:
headers["Content-Type"] = "application/x-www-form-urlencoded"
kwargs["headers"] = headers
if "Accept" not in headers:
headers["Accept"] = "application/json"
uri += ".json"
resp = make_request(method, uri, **kwargs)
if not resp.ok:
try:
error = json.loads(resp.content)
code = error["code"]
message = "%s: %s" % (code, error["message"])
except:
code = None
message = resp.content
raise TwilioRestException(status=resp.status_code, method=method,
uri=resp.url, msg=message, code=code)
return resp
class Resource(object):
"""A REST Resource"""
name = "Resource"
def __init__(self, base_uri, auth, timeout=UNSET_TIMEOUT):
self.base_uri = base_uri
self.auth = auth
self.timeout = timeout
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __hash__(self):
return hash(frozenset(self.__dict__))
def __ne__(self, other):
return not self.__eq__(other)
def request(self, method, uri, **kwargs):
"""
Send an HTTP request to the resource.
:raises: a :exc:`~twilio.TwilioRestException`
"""
if 'timeout' not in kwargs and self.timeout is not UNSET_TIMEOUT:
kwargs['timeout'] = self.timeout
resp = make_twilio_request(method, uri, auth=self.auth, **kwargs)
logger.debug(resp.content)
if method == "DELETE":
return resp, {}
else:
return resp, json.loads(resp.content)
@property
def uri(self):
format = (self.base_uri, self.name)
return "%s/%s" % format
class InstanceResource(Resource):
""" The object representation of an instance response from the Twilio API
:param parent: The parent list class for this instance resource.
For example, the parent for a :class:`~twilio.rest.resources.Call`
would be a :class:`~twilio.rest.resources.Calls` object.
:type parent: :class:`~twilio.rest.resources.ListResource`
:param str sid: The 34-character unique identifier for this instance
"""
subresources = []
id_key = "sid"
def __init__(self, parent, sid):
self.parent = parent
self.name = sid
super(InstanceResource, self).__init__(
parent.uri,
parent.auth,
parent.timeout
)
def load(self, entries):
if "from" in entries.keys():
entries["from_"] = entries["from"]
del entries["from"]
if "uri" in entries.keys():
del entries["uri"]
for key in entries.keys():
if key.startswith("date_") and isinstance(entries[key], str):
entries[key] = parse_rfc2822_date(entries[key])
self.__dict__.update(entries)
def load_subresources(self):
"""
Load all subresources
"""
for resource in self.subresources:
list_resource = resource(
self.uri,
self.parent.auth,
self.parent.timeout
)
self.__dict__[list_resource.key] = list_resource
def update_instance(self, **kwargs):
""" Make a POST request to the API to update an object's properties
:return: None, this is purely side effecting
:raises: a :class:`~twilio.rest.RestException` on failure
"""
a = self.parent.update(self.name, **kwargs)
self.load(a.__dict__)
def delete_instance(self):
""" Make a DELETE request to the API to delete the object
:return: None, this is purely side effecting
:raises: a :class:`~twilio.rest.RestException` on failure
"""
return self.parent.delete(self.name)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.name[0:5])
class ListResource(Resource):
name = "Resources"
instance = InstanceResource
def __init__(self, *args, **kwargs):
super(ListResource, self).__init__(*args, **kwargs)
try:
self.key
except AttributeError:
self.key = self.name.lower()
def get(self, sid):
""" Get an instance resource by its sid
Usage:
.. code-block:: python
message = client.messages.get("SM1234")
print message.body
:rtype: :class:`~twilio.rest.resources.InstanceResource`
:raises: a :exc:`~twilio.TwilioRestException` if a resource with that
sid does not exist, or the request fails
"""
return self.get_instance(sid)
def get_instance(self, sid):
"""Request the specified instance resource"""
uri = "%s/%s" % (self.uri, sid)
resp, item = self.request("GET", uri)
return self.load_instance(item)
def get_instances(self, params):
"""
Query the list resource for a list of InstanceResources.
Raises a :exc:`~twilio.TwilioRestException` if requesting a page of
results that does not exist.
:param dict params: List of URL parameters to be included in request
:param int page: The page of results to retrieve (most recent at 0)
:param int page_size: The number of results to be returned.
:returns: -- the list of resources
"""
params = transform_params(params)
resp, page = self.request("GET", self.uri, params=params)
if self.key not in page:
raise TwilioException("Key %s not present in response" % self.key)
return [self.load_instance(ir) for ir in page[self.key]]
def create_instance(self, body):
"""
Create an InstanceResource via a POST to the List Resource
:param dict body: Dictionary of POST data
"""
resp, instance = self.request("POST", self.uri,
data=transform_params(body))
if resp.status_code not in (200, 201):
raise TwilioRestException(resp.status_code,
self.uri, "Resource not created")
return self.load_instance(instance)
def delete_instance(self, sid):
"""
Delete an InstanceResource via DELETE
body: string -- HTTP Body for the quest
"""
uri = "%s/%s" % (self.uri, sid)
resp, instance = self.request("DELETE", uri)
return resp.status_code == 204
def update_instance(self, sid, body):
"""
Update an InstanceResource via a POST
sid: string -- String identifier for the list resource
body: dictionary -- Dict of items to POST
"""
uri = "%s/%s" % (self.uri, sid)
resp, entry = self.request("POST", uri, data=transform_params(body))
return self.load_instance(entry)
def count(self):
""" .. deprecated:: 3.6.5
Get the total number of instances for this resource
Note: this query can be slow if you have many instances.
:return: the total number of instances
:rtype: int
:raises: a :exc:`~twilio.TwilioRestException` if the request fails
Example usage:
.. code-block:: python
print client.calls.count() # prints 323
"""
# XXX: this should make a request with PageSize=1 to return as quickly
# as possible
resp, page = self.request("GET", self.uri)
return page["total"]
def iter(self, **kwargs):
""" Return all instance resources using an iterator
This will fetch a page of resources from the API and yield them in
turn. When the page is exhausted, this will make a request to the API
to retrieve the next page. Hence you may notice a pattern - the library
will loop through 50 objects very quickly, but there will be a delay
retrieving the 51st as the library must make another request to the API
for resources.
Example usage:
.. code-block:: python
for message in client.messages:
print message.sid
"""
params = transform_params(kwargs)
while True:
resp, page = self.request("GET", self.uri, params=params)
if self.key not in page:
raise StopIteration()
for ir in page[self.key]:
yield self.load_instance(ir)
if not page.get('next_page_uri', ''):
raise StopIteration()
o = urlparse(page['next_page_uri'])
params.update(parse_qs(o.query))
def load_instance(self, data):
instance = self.instance(self, data[self.instance.id_key])
instance.load(data)
instance.load_subresources()
return instance
def __str__(self):
return '<%s (%s)>' % (self.__class__.__name__, self.count())
def list(self, **kw):
"""Query the list resource for a list of InstanceResources.
:param int page: The page of results to retrieve (most recent at 0)
:param int page_size: The number of results to be returned.
"""
return self.get_instances(kw)
| gpl-2.0 |
Designist/pybuilder | setup.py | 1 | 1823 | #!/usr/bin/env python
#
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script allows to support installation via:
# pip install git+git://github.com/pybuilder/pybuilder.git@<branch>
#
# THIS IS A HACK, DO NOT RUN LOCALLY
#
import os
import subprocess
import sys
import glob
import shutil
script_dir = os.path.dirname(os.path.realpath(__file__))
build_script = os.path.join(script_dir, "build.py")
exit_code = 0
try:
subprocess.check_call([build_script, "clean", "install_dependencies", "publish"])
dist_dir = glob.glob(os.path.join(script_dir, "target", "dist", "*"))[0]
for src_file in glob.glob(os.path.join(dist_dir, "*")):
file_name = os.path.basename(src_file)
target_file_name = os.path.join(script_dir, file_name)
if os.path.exists(target_file_name):
if os.path.isdir(target_file_name):
os.removedirs(target_file_name)
else:
os.remove(target_file_name)
shutil.move(src_file, script_dir)
setup_args = sys.argv[1:]
subprocess.check_call(["./setup.py"] + setup_args, cwd=script_dir)
except subprocess.CalledProcessError as e:
exit_code = e.returncode
sys.exit(exit_code)
| apache-2.0 |
steveklabnik/servo | tests/wpt/css-tests/tools/wptserve/wptserve/request.py | 136 | 16506 | import base64
import cgi
import Cookie
import os
import StringIO
import tempfile
import urlparse
import stash
from utils import HTTPException
missing = object()
class Server(object):
"""Data about the server environment
.. attribute:: config
Environment configuration information with information about the
various servers running, their hostnames and ports.
.. attribute:: stash
Stash object holding state stored on the server between requests.
"""
config = None
def __init__(self, request):
self._stash = None
self._request = request
@property
def stash(self):
if self._stash is None:
address, authkey = stash.load_env_config()
self._stash = stash.Stash(self._request.url_parts.path, address, authkey)
return self._stash
class InputFile(object):
max_buffer_size = 1024*1024
def __init__(self, rfile, length):
"""File-like object used to provide a seekable view of request body data"""
self._file = rfile
self.length = length
self._file_position = 0
if length > self.max_buffer_size:
self._buf = tempfile.TemporaryFile(mode="rw+b")
else:
self._buf = StringIO.StringIO()
@property
def _buf_position(self):
rv = self._buf.tell()
assert rv <= self._file_position
return rv
def read(self, bytes=-1):
assert self._buf_position <= self._file_position
if bytes < 0:
bytes = self.length - self._buf_position
bytes_remaining = min(bytes, self.length - self._buf_position)
if bytes_remaining == 0:
return ""
if self._buf_position != self._file_position:
buf_bytes = min(bytes_remaining, self._file_position - self._buf_position)
old_data = self._buf.read(buf_bytes)
bytes_remaining -= buf_bytes
else:
old_data = ""
assert self._buf_position == self._file_position, (
"Before reading buffer position (%i) didn't match file position (%i)" %
(self._buf_position, self._file_position))
new_data = self._file.read(bytes_remaining)
self._buf.write(new_data)
self._file_position += bytes_remaining
assert self._buf_position == self._file_position, (
"After reading buffer position (%i) didn't match file position (%i)" %
(self._buf_position, self._file_position))
return old_data + new_data
def tell(self):
return self._buf_position
def seek(self, offset):
if offset > self.length or offset < 0:
raise ValueError
if offset <= self._file_position:
self._buf.seek(offset)
else:
self.read(offset - self._file_position)
def readline(self, max_bytes=None):
if max_bytes is None:
max_bytes = self.length - self._buf_position
if self._buf_position < self._file_position:
data = self._buf.readline(max_bytes)
if data.endswith("\n") or len(data) == max_bytes:
return data
else:
data = ""
assert self._buf_position == self._file_position
initial_position = self._file_position
found = False
buf = []
max_bytes -= len(data)
while not found:
readahead = self.read(min(2, max_bytes))
max_bytes -= len(readahead)
for i, c in enumerate(readahead):
if c == "\n":
buf.append(readahead[:i+1])
found = True
break
if not found:
buf.append(readahead)
if not readahead or not max_bytes:
break
new_data = "".join(buf)
data += new_data
self.seek(initial_position + len(new_data))
return data
def readlines(self):
rv = []
while True:
data = self.readline()
if data:
rv.append(data)
else:
break
return rv
def next(self):
data = self.readline()
if data:
return data
else:
raise StopIteration
def __iter__(self):
return self
class Request(object):
"""Object representing a HTTP request.
.. attribute:: doc_root
The local directory to use as a base when resolving paths
.. attribute:: route_match
Regexp match object from matching the request path to the route
selected for the request.
.. attribute:: protocol_version
HTTP version specified in the request.
.. attribute:: method
HTTP method in the request.
.. attribute:: request_path
Request path as it appears in the HTTP request.
.. attribute:: url_base
The prefix part of the path; typically / unless the handler has a url_base set
.. attribute:: url
Absolute URL for the request.
.. attribute:: headers
List of request headers.
.. attribute:: raw_input
File-like object representing the body of the request.
.. attribute:: url_parts
Parts of the requested URL as obtained by urlparse.urlsplit(path)
.. attribute:: request_line
Raw request line
.. attribute:: headers
RequestHeaders object providing a dictionary-like representation of
the request headers.
.. attribute:: body
Request body as a string
.. attribute:: GET
MultiDict representing the parameters supplied with the request.
Note that these may be present on non-GET requests; the name is
chosen to be familiar to users of other systems such as PHP.
.. attribute:: POST
MultiDict representing the request body parameters. Most parameters
are present as string values, but file uploads have file-like
values.
.. attribute:: cookies
Cookies object representing cookies sent with the request with a
dictionary-like interface.
.. attribute:: auth
Object with username and password properties representing any
credentials supplied using HTTP authentication.
.. attribute:: server
Server object containing information about the server environment.
"""
def __init__(self, request_handler):
self.doc_root = request_handler.server.router.doc_root
self.route_match = None # Set by the router
self.protocol_version = request_handler.protocol_version
self.method = request_handler.command
scheme = request_handler.server.scheme
host = request_handler.headers.get("Host")
port = request_handler.server.server_address[1]
if host is None:
host = request_handler.server.server_address[0]
else:
if ":" in host:
host, port = host.split(":", 1)
self.request_path = request_handler.path
self.url_base = "/"
if self.request_path.startswith(scheme + "://"):
self.url = request_handler.path
else:
self.url = "%s://%s:%s%s" % (scheme,
host,
port,
self.request_path)
self.url_parts = urlparse.urlsplit(self.url)
self._raw_headers = request_handler.headers
self.request_line = request_handler.raw_requestline
self._headers = None
self.raw_input = InputFile(request_handler.rfile,
int(self.headers.get("Content-Length", 0)))
self._body = None
self._GET = None
self._POST = None
self._cookies = None
self._auth = None
self.server = Server(self)
def __repr__(self):
return "<Request %s %s>" % (self.method, self.url)
@property
def GET(self):
if self._GET is None:
params = urlparse.parse_qsl(self.url_parts.query, keep_blank_values=True)
self._GET = MultiDict()
for key, value in params:
self._GET.add(key, value)
return self._GET
@property
def POST(self):
if self._POST is None:
#Work out the post parameters
pos = self.raw_input.tell()
self.raw_input.seek(0)
fs = cgi.FieldStorage(fp=self.raw_input,
environ={"REQUEST_METHOD": self.method},
headers=self.headers,
keep_blank_values=True)
self._POST = MultiDict.from_field_storage(fs)
self.raw_input.seek(pos)
return self._POST
@property
def cookies(self):
if self._cookies is None:
parser = Cookie.BaseCookie()
cookie_headers = self.headers.get("cookie", "")
parser.load(cookie_headers)
cookies = Cookies()
for key, value in parser.iteritems():
cookies[key] = CookieValue(value)
self._cookies = cookies
return self._cookies
@property
def headers(self):
if self._headers is None:
self._headers = RequestHeaders(self._raw_headers)
return self._headers
@property
def body(self):
if self._body is None:
pos = self.raw_input.tell()
self.raw_input.seek(0)
self._body = self.raw_input.read()
self.raw_input.seek(pos)
return self._body
@property
def auth(self):
if self._auth is None:
self._auth = Authentication(self.headers)
return self._auth
class RequestHeaders(dict):
"""Dictionary-like API for accessing request headers."""
def __init__(self, items):
for key, value in zip(items.keys(), items.values()):
key = key.lower()
if key in self:
self[key].append(value)
else:
dict.__setitem__(self, key, [value])
def __getitem__(self, key):
"""Get all headers of a certain (case-insensitive) name. If there is
more than one, the values are returned comma separated"""
values = dict.__getitem__(self, key.lower())
if len(values) == 1:
return values[0]
else:
return ", ".join(values)
def __setitem__(self, name, value):
raise Exception
def get(self, key, default=None):
"""Get a string representing all headers with a particular value,
with multiple headers separated by a comma. If no header is found
return a default value
:param key: The header name to look up (case-insensitive)
:param default: The value to return in the case of no match
"""
try:
return self[key]
except KeyError:
return default
def get_list(self, key, default=missing):
"""Get all the header values for a particular field name as
a list"""
try:
return dict.__getitem__(self, key.lower())
except KeyError:
if default is not missing:
return default
else:
raise
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def iteritems(self):
for item in self:
yield item, self[item]
def itervalues(self):
for item in self:
yield self[item]
class CookieValue(object):
"""Representation of cookies.
Note that cookies are considered read-only and the string value
of the cookie will not change if you update the field values.
However this is not enforced.
.. attribute:: key
The name of the cookie.
.. attribute:: value
The value of the cookie
.. attribute:: expires
The expiry date of the cookie
.. attribute:: path
The path of the cookie
.. attribute:: comment
The comment of the cookie.
.. attribute:: domain
The domain with which the cookie is associated
.. attribute:: max_age
The max-age value of the cookie.
.. attribute:: secure
Whether the cookie is marked as secure
.. attribute:: httponly
Whether the cookie is marked as httponly
"""
def __init__(self, morsel):
self.key = morsel.key
self.value = morsel.value
for attr in ["expires", "path",
"comment", "domain", "max-age",
"secure", "version", "httponly"]:
setattr(self, attr.replace("-", "_"), morsel[attr])
self._str = morsel.OutputString()
def __str__(self):
return self._str
def __repr__(self):
return self._str
def __eq__(self, other):
"""Equality comparison for cookies. Compares to other cookies
based on value alone and on non-cookies based on the equality
of self.value with the other object so that a cookie with value
"ham" compares equal to the string "ham"
"""
if hasattr(other, "value"):
return self.value == other.value
return self.value == other
class MultiDict(dict):
"""Dictionary type that holds multiple values for each
key"""
#TODO: this should perhaps also order the keys
def __init__(self):
pass
def __setitem__(self, name, value):
dict.__setitem__(self, name, [value])
def add(self, name, value):
if name in self:
dict.__getitem__(self, name).append(value)
else:
dict.__setitem__(self, name, [value])
def __getitem__(self, key):
"""Get the first value with a given key"""
#TODO: should this instead be the last value?
return self.first(key)
def first(self, key, default=missing):
"""Get the first value with a given key
:param key: The key to lookup
:param default: The default to return if key is
not found (throws if nothing is
specified)
"""
if key in self and dict.__getitem__(self, key):
return dict.__getitem__(self, key)[0]
elif default is not missing:
return default
raise KeyError
def last(self, key, default=missing):
"""Get the last value with a given key
:param key: The key to lookup
:param default: The default to return if key is
not found (throws if nothing is
specified)
"""
if key in self and dict.__getitem__(self, key):
return dict.__getitem__(self, key)[-1]
elif default is not missing:
return default
raise KeyError
def get_list(self, key):
"""Get all values with a given key as a list
:param key: The key to lookup
"""
return dict.__getitem__(self, key)
@classmethod
def from_field_storage(cls, fs):
self = cls()
if fs.list is None:
return self
for key in fs:
values = fs[key]
if not isinstance(values, list):
values = [values]
for value in values:
if value.filename:
value = value
else:
value = value.value
self.add(key, value)
return self
class Cookies(MultiDict):
"""MultiDict specialised for Cookie values"""
def __init__(self):
pass
def __getitem__(self, key):
return self.last(key)
class Authentication(object):
"""Object for dealing with HTTP Authentication
.. attribute:: username
The username supplied in the HTTP Authorization
header, or None
.. attribute:: password
The password supplied in the HTTP Authorization
header, or None
"""
def __init__(self, headers):
self.username = None
self.password = None
auth_schemes = {"Basic": self.decode_basic}
if "authorization" in headers:
header = headers.get("authorization")
auth_type, data = header.split(" ", 1)
if auth_type in auth_schemes:
self.username, self.password = auth_schemes[auth_type](data)
else:
raise HTTPException(400, "Unsupported authentication scheme %s" % auth_type)
def decode_basic(self, data):
decoded_data = base64.decodestring(data)
return decoded_data.split(":", 1)
| mpl-2.0 |
jasonkying/pip | tests/unit/test_locations.py | 19 | 3847 | """
locations.py tests
"""
import os
import sys
import shutil
import tempfile
import getpass
from mock import Mock
from pip.locations import distutils_scheme
if sys.platform == 'win32':
pwd = Mock()
else:
import pwd
class TestLocations:
def setup(self):
self.tempdir = tempfile.mkdtemp()
self.st_uid = 9999
self.username = "example"
self.patch()
def teardown(self):
self.revert_patch()
shutil.rmtree(self.tempdir, ignore_errors=True)
def patch(self):
""" first store and then patch python methods pythons """
self.tempfile_gettempdir = tempfile.gettempdir
self.old_os_fstat = os.fstat
if sys.platform != 'win32':
# os.geteuid and pwd.getpwuid are not implemented on windows
self.old_os_geteuid = os.geteuid
self.old_pwd_getpwuid = pwd.getpwuid
self.old_getpass_getuser = getpass.getuser
# now patch
tempfile.gettempdir = lambda: self.tempdir
getpass.getuser = lambda: self.username
os.geteuid = lambda: self.st_uid
os.fstat = lambda fd: self.get_mock_fstat(fd)
if sys.platform != 'win32':
pwd.getpwuid = lambda uid: self.get_mock_getpwuid(uid)
def revert_patch(self):
""" revert the patches to python methods """
tempfile.gettempdir = self.tempfile_gettempdir
getpass.getuser = self.old_getpass_getuser
if sys.platform != 'win32':
# os.geteuid and pwd.getpwuid are not implemented on windows
os.geteuid = self.old_os_geteuid
pwd.getpwuid = self.old_pwd_getpwuid
os.fstat = self.old_os_fstat
def get_mock_fstat(self, fd):
""" returns a basic mock fstat call result.
Currently only the st_uid attribute has been set.
"""
result = Mock()
result.st_uid = self.st_uid
return result
def get_mock_getpwuid(self, uid):
""" returns a basic mock pwd.getpwuid call result.
Currently only the pw_name attribute has been set.
"""
result = Mock()
result.pw_name = self.username
return result
class TestDisutilsScheme:
def test_root_modifies_appropiately(self):
norm_scheme = distutils_scheme("example")
root_scheme = distutils_scheme("example", root="/test/root/")
for key, value in norm_scheme.items():
expected = os.path.join("/test/root/", os.path.abspath(value)[1:])
assert os.path.abspath(root_scheme[key]) == expected
def test_distutils_config_file_read(self, tmpdir, monkeypatch):
f = tmpdir.mkdir("config").join("setup.cfg")
f.write("[install]\ninstall-scripts=/somewhere/else")
from distutils.dist import Distribution
# patch the function that returns what config files are present
monkeypatch.setattr(
Distribution,
'find_config_files',
lambda self: [f],
)
scheme = distutils_scheme('example')
assert scheme['scripts'] == '/somewhere/else'
# when we request install-lib, we should install everything (.py &
# .so) into that path; i.e. ensure platlib & purelib are set to
# this path
def test_install_lib_takes_precedence(self, tmpdir, monkeypatch):
f = tmpdir.mkdir("config").join("setup.cfg")
f.write("[install]\ninstall-lib=/somewhere/else/")
from distutils.dist import Distribution
# patch the function that returns what config files are present
monkeypatch.setattr(
Distribution,
'find_config_files',
lambda self: [f],
)
scheme = distutils_scheme('example')
assert scheme['platlib'] == '/somewhere/else/'
assert scheme['purelib'] == '/somewhere/else/'
| mit |
nens/sufriblib | setup.py | 1 | 1083 | from setuptools import setup
version = '0.5.dev0'
long_description = '\n\n'.join([
open('README.rst').read(),
open('CREDITS.rst').read(),
open('CHANGES.rst').read(),
])
install_requires = [
'setuptools',
'pyproj',
],
tests_require = [
'nose',
'coverage',
]
setup(name='sufriblib',
version=version,
description="A library for working with SUFRIB 2.1 files (.RIB and .RMB files, sewer system measurement data)",
long_description=long_description,
# Get strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=[],
keywords=[],
author='Remco Gerlich',
author_email='remco.gerlich@nelen-schuurmans.nl',
url='',
license='GPL',
packages=['sufriblib'],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
extras_require={'test': tests_require},
entry_points={
'console_scripts': [
'sufribcat=sufriblib.scripts:sufribcat',
]},
)
| gpl-3.0 |
pdevetto/super-duper-disco | movies/migrations/0001_initial.py | 1 | 1056 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-11-28 14:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Director',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('year', models.DateTimeField(verbose_name='Movie year')),
('director', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movies.Director')),
],
),
]
| gpl-3.0 |
Dfelker/ansible | lib/ansible/plugins/shell/powershell.py | 34 | 5527 | # (c) 2014, Chris Church <chris@ninemoreminutes.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import os
import re
import random
import shlex
import time
_common_args = ['PowerShell', '-NoProfile', '-NonInteractive']
# Primarily for testing, allow explicitly specifying PowerShell version via
# an environment variable.
_powershell_version = os.environ.get('POWERSHELL_VERSION', None)
if _powershell_version:
_common_args = ['PowerShell', '-Version', _powershell_version] + _common_args[1:]
class ShellModule(object):
def env_prefix(self, **kwargs):
return ''
def join_path(self, *args):
return os.path.join(*args).replace('/', '\\')
def path_has_trailing_slash(self, path):
# Allow Windows paths to be specified using either slash.
return path.endswith('/') or path.endswith('\\')
def chmod(self, mode, path):
return ''
def remove(self, path, recurse=False):
path = self._escape(path)
if recurse:
return self._encode_script('''Remove-Item "%s" -Force -Recurse;''' % path)
else:
return self._encode_script('''Remove-Item "%s" -Force;''' % path)
def mkdtemp(self, basefile, system=False, mode=None):
basefile = self._escape(basefile)
# FIXME: Support system temp path!
return self._encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile)
def expand_user(self, user_home_path):
# PowerShell only supports "~" (not "~username"). Resolve-Path ~ does
# not seem to work remotely, though by default we are always starting
# in the user's home directory.
if user_home_path == '~':
script = 'Write-Host (Get-Location).Path'
elif user_home_path.startswith('~\\'):
script = 'Write-Host ((Get-Location).Path + "%s")' % _escape(user_home_path[1:])
else:
script = 'Write-Host "%s"' % _escape(user_home_path)
return self._encode_script(script)
def checksum(self, path, *args, **kwargs):
path = self._escape(path)
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
$sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
$fp = [System.IO.File]::Open("%(path)s", [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
}
ElseIf (Test-Path -PathType Container "%(path)s")
{
Write-Host "3";
}
Else
{
Write-Host "1";
}
''' % dict(path=path)
return self._encode_script(script)
def build_module_command(self, env_string, shebang, cmd, rm_tmp=None):
cmd = cmd.encode('utf-8')
cmd_parts = shlex.split(cmd, posix=False)
if not cmd_parts[0].lower().endswith('.ps1'):
cmd_parts[0] = '%s.ps1' % cmd_parts[0]
script = self._build_file_cmd(cmd_parts)
if rm_tmp:
rm_tmp = self._escape(rm_tmp)
script = '%s; Remove-Item "%s" -Force -Recurse;' % (script, rm_tmp)
return self._encode_script(script)
def _escape(self, value, include_vars=False):
'''Return value escaped for use in PowerShell command.'''
# http://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences
# http://stackoverflow.com/questions/764360/a-list-of-string-replacements-in-python
subs = [('\n', '`n'), ('\r', '`r'), ('\t', '`t'), ('\a', '`a'),
('\b', '`b'), ('\f', '`f'), ('\v', '`v'), ('"', '`"'),
('\'', '`\''), ('`', '``'), ('\x00', '`0')]
if include_vars:
subs.append(('$', '`$'))
pattern = '|'.join('(%s)' % re.escape(p) for p, s in subs)
substs = [s for p, s in subs]
replace = lambda m: substs[m.lastindex - 1]
return re.sub(pattern, replace, value)
def _encode_script(self, script, as_list=False):
'''Convert a PowerShell script to a single base64-encoded command.'''
script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
encoded_script = base64.b64encode(script.encode('utf-16-le'))
cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
if as_list:
return cmd_parts
return ' '.join(cmd_parts)
def _build_file_cmd(self, cmd_parts):
'''Build command line to run a file, given list of file name plus args.'''
return ' '.join(_common_args + ['-ExecutionPolicy', 'Unrestricted', '-File'] + ['"%s"' % x for x in cmd_parts])
| gpl-3.0 |
fafaman/scrapy | scrapy/loader/processors.py | 145 | 2850 | """
This module provides some commonly used processors for Item Loaders.
See documentation in docs/topics/loaders.rst
"""
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.datatypes import MergeDict
from .common import wrap_loader_context
class MapCompose(object):
def __init__(self, *functions, **default_loader_context):
self.functions = functions
self.default_loader_context = default_loader_context
def __call__(self, value, loader_context=None):
values = arg_to_iter(value)
if loader_context:
context = MergeDict(loader_context, self.default_loader_context)
else:
context = self.default_loader_context
wrapped_funcs = [wrap_loader_context(f, context) for f in self.functions]
for func in wrapped_funcs:
next_values = []
for v in values:
next_values += arg_to_iter(func(v))
values = next_values
return values
class Compose(object):
def __init__(self, *functions, **default_loader_context):
self.functions = functions
self.stop_on_none = default_loader_context.get('stop_on_none', True)
self.default_loader_context = default_loader_context
def __call__(self, value, loader_context=None):
if loader_context:
context = MergeDict(loader_context, self.default_loader_context)
else:
context = self.default_loader_context
wrapped_funcs = [wrap_loader_context(f, context) for f in self.functions]
for func in wrapped_funcs:
if value is None and self.stop_on_none:
break
value = func(value)
return value
class TakeFirst(object):
def __call__(self, values):
for value in values:
if value is not None and value != '':
return value
class Identity(object):
def __call__(self, values):
return values
class SelectJmes(object):
"""
Query the input string for the jmespath (given at instantiation),
and return the answer
Requires : jmespath(https://github.com/jmespath/jmespath)
Note: SelectJmes accepts only one input element at a time.
"""
def __init__(self, json_path):
self.json_path = json_path
import jmespath
self.compiled_path = jmespath.compile(self.json_path)
def __call__(self, value):
"""Query value for the jmespath query and return answer
:param value: a data structure (dict, list) to extract from
:return: Element extracted according to jmespath query
"""
return self.compiled_path.search(value)
class Join(object):
def __init__(self, separator=u' '):
self.separator = separator
def __call__(self, values):
return self.separator.join(values)
| bsd-3-clause |
jun66j5/trac-ja | sample-plugins/workflow/VoteOperation.py | 1 | 2994 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2013 Edgewall Software
# Copyright (C) 2007 Eli Carter <retracile@gmail.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from genshi.builder import tag
from trac.core import implements,Component
from trac.ticket.api import ITicketActionController
from trac.ticket.default_workflow import ConfigurableTicketWorkflow
from trac.ticket.model import Priority, Ticket
#from trac.perm import IPermissionRequestor # (TODO)
revision = "$Rev: 6326 $"
url = "$URL: https://svn.edgewall.org/repos/trac/trunk/sample-plugins/workflow/VoteOperation.py $"
class VoteOperation(Component):
"""Provides a simplistic vote feature.
This is a sample action controller illustrating how to create additional
''operations''.
Don't forget to add `VoteOperation` to the workflow
option in the `[ticket]` section in TracIni.
If there is no other workflow option, the line will look like this:
{{{
workflow = ConfigurableTicketWorkflow,VoteOperation
}}}
"""
implements(ITicketActionController)
def get_ticket_actions(self, req, ticket):
controller = ConfigurableTicketWorkflow(self.env)
return controller.get_actions_by_operation_for_req(req, ticket, 'vote')
def get_all_status(self):
return []
def render_ticket_action_control(self, req, ticket, action):
id = 'vote_%s_result' % (action, )
selected_value = req.args.get(id, 'for')
options = ['for', 'against']
return ("vote",
tag.select([tag.option(x, selected=(x == selected_value or
None))
for x in options], name=id, id=id),
"Vote on the issue, raising or lowering its priority")
def get_ticket_changes(self, req, ticket, action):
id = 'vote_%s_result' % (action, )
selected = req.args.get(id, 'for')
priorities = list(Priority.select(self.env))
orig_ticket = Ticket(self.env, ticket.id)
current_priority = int(Priority(self.env, name=
orig_ticket['priority']).value)
if selected == 'for':
# priorities are 1-based, not 0-based
new_value = max(1, current_priority - 1)
else:
maxval = max([int(p.value) for p in priorities])
new_value = min(maxval, current_priority + 1)
return {'priority': [p.name for p in priorities
if int(p.value) == new_value][0]}
def apply_action_side_effects(self, req, ticket, action):
pass
| bsd-3-clause |
mderomph-coolblue/dd-agent | setup.py | 4 | 5017 | # stdlib
from datetime import date
import os
import sys
# 3p
from setuptools import find_packages, setup
from requests.certs import where
# project
from config import get_version
from utils.jmx import JMX_FETCH_JAR_NAME
# Extra arguments to pass to the setup function
extra_args = {}
# Prereqs of the build. Won't get installed when deploying the egg.
setup_requires = []
# Prereqs of the install. Will install when deploying the egg.
install_requires = []
# Modified on mac
app_name = 'datadog-agent'
# plist (used only on mac)
plist = None
if sys.platform == 'win32':
from glob import glob
# noqa for flake8, these imports are probably here to force packaging of these modules
import py2exe # noqa
import pysnmp_mibs # noqa
import pyVim # noqa
import pyVmomi # noqa
# That's just a copy/paste of requirements.txt
for reqfile in ('requirements.txt', 'requirements-opt.txt'):
with open(reqfile) as f:
for line in f:
line = line.strip()
if line.startswith('#') or not line:
continue
# we skip psycopg2 now because don't want to install PG
# on windows
if 'psycopg2' in line:
continue
install_requires.append(line)
# windows-specific deps
install_requires.append('pywin32==217')
# Modules to force-include in the exe
include_modules = [
# 3p
'wmi',
'win32service',
'win32serviceutil',
'win32event',
'simplejson',
'adodbapi',
'pycurl',
'tornado.curl_httpclient',
'pymongo',
'pymysql',
'psutil',
'pg8000',
'redis',
'requests',
'pysnmp',
'pysnmp.smi.mibs.*',
'pysnmp.smi.mibs.instances.*',
'pysnmp_mibs.*',
'pysnmp.entity.rfc3413.oneliner.*',
'pyVim.*',
'pyVmomi.*',
'paramiko',
'Crypto',
'winrandom',
'uptime',
'pythoncom',
'dns.resolver',
'dns.rdtypes.ANY.*',
'dns.rdtypes.IN.*',
# agent
'checks.network_checks',
'checks.wmi_check',
'checks.libs.vmware.*',
'httplib2',
'utils.containers',
# pup
'tornado.websocket',
'tornado.web',
'tornado.ioloop',
]
class Target(object):
def __init__(self, **kw):
self.__dict__.update(kw)
self.version = get_version()
self.company_name = 'Datadog, Inc.'
self.copyright = 'Copyright 2013 Datadog, Inc.'
self.cmdline_style = 'pywin32'
agent_svc = Target(name='Datadog Agent', modules='win32.agent', dest_base='ddagent')
extra_args = {
'options': {
'py2exe': {
'includes': ','.join(include_modules),
'optimize': 0,
'compressed': True,
'bundle_files': 3,
'excludes': ['numpy'],
'dll_excludes': ["IPHLPAPI.DLL", "NSI.dll", "WINNSI.DLL", "WTSAPI32.dll"],
'ascii':False,
},
},
'console': ['win32\shell.py'],
'service': [agent_svc],
'windows': [{'script': 'win32\gui.py',
'dest_base': "agent-manager",
'uac_info': "requireAdministrator", # The manager needs to be administrator to stop/start the service
'icon_resources': [(1, r"packaging\datadog-agent\win32\install_files\dd_agent_win_256.ico")],
}],
'data_files': [
("Microsoft.VC90.CRT", glob(r'C:\Python27\redist\*.*')),
('jmxfetch', [r'checks\libs\%s' % JMX_FETCH_JAR_NAME]),
('gohai', [r'gohai\gohai.exe']),
('', [where()]), # CA certificates bundled with `requests`
],
}
elif sys.platform == 'darwin':
app_name = 'Datadog Agent'
from plistlib import Plist
plist = Plist.fromFile(os.path.dirname(os.path.realpath(__file__)) + '/packaging/Info.plist')
plist.update(dict(
CFBundleGetInfoString="{0}, Copyright (c) 2009-{1}, Datadog Inc.".format(
get_version(), date.today().year),
CFBundleVersion=get_version()
))
extra_args = {
'app': ['gui.py'],
'data_files': [
'images',
'status.html',
],
'options': {
'py2app': {
'optimize': 0,
'iconfile': 'packaging/Agent.icns',
'plist': plist
}
}
}
setup(
name=app_name,
version=get_version(),
description="DevOps' best friend",
author='DataDog',
author_email='dev@datadoghq.com',
url='http://www.datadoghq.com',
install_requires=install_requires,
setup_requires=setup_requires,
packages=find_packages(),
include_package_data=True,
test_suite='nose.collector',
zip_safe=False,
**extra_args
)
| bsd-3-clause |
sinhrks/chainer | chainer/links/connection/linear.py | 2 | 2272 | import numpy
from chainer.functions.connection import linear
from chainer import link
class Linear(link.Link):
"""Linear layer (a.k.a. fully-connected layer).
This is a link that wraps the :func:`~chainer.functions.linear` function,
and holds a weight matrix ``W`` and optionally a bias vector ``b`` as
parameters.
The weight matrix ``W`` is initialized with i.i.d. Gaussian samples, each
of which has zero mean and deviation :math:`\\sqrt{1/\\text{in_size}}`. The
bias vector ``b`` is of size ``out_size``. Each element is initialized with
the ``bias`` value. If ``nobias`` argument is set to True, then this link
does not hold a bias vector.
Args:
in_size (int): Dimension of input vectors.
out_size (int): Dimension of output vectors.
wscale (float): Scaling factor of the weight matrix.
bias (float): Initial bias value.
nobias (bool): If True, then this function does not use the bias.
initialW (2-D array): Initial weight value. If ``None``, then this
function uses to initialize ``wscale``.
initial_bias (1-D array): Initial bias value. If ``None``, then this
function uses to initialize ``bias``.
.. seealso:: :func:`~chainer.functions.linear`
Attributes:
W (~chainer.Variable): Weight parameter.
b (~chainer.Variable): Bias parameter.
"""
def __init__(self, in_size, out_size, wscale=1, bias=0, nobias=False,
initialW=None, initial_bias=None):
super(Linear, self).__init__(W=(out_size, in_size))
if initialW is None:
initialW = numpy.random.normal(
0, wscale * numpy.sqrt(1. / in_size), (out_size, in_size))
self.W.data[...] = initialW
if nobias:
self.b = None
else:
self.add_param('b', out_size)
if initial_bias is None:
initial_bias = bias
self.b.data[...] = initial_bias
def __call__(self, x):
"""Applies the linear layer.
Args:
x (~chainer.Variable): Batch of input vectors.
Returns:
~chainer.Variable: Output of the linear layer.
"""
return linear.linear(x, self.W, self.b)
| mit |
CloudVLab/professional-services | tools/gsuite-exporter/setup.py | 1 | 2139 | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gsuite-exporter',
version='0.0.3',
description='GSuite Admin API Exporter',
long_description=long_description,
long_description_content_type='text/markdown',
author='Google Inc.',
author_email='ocervello@google.com',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='gsuite exporter stackdriver',
install_requires=[
'google-api-python-client',
'python-dateutil',
'requests'
],
entry_points={
'console_scripts': [
'gsuite-exporter=gsuite_exporter.cli:main',
],
},
python_requires='>=2.7'
)
| apache-2.0 |
hughperkins/clBLAS | src/scripts/perf/errorHandler.py | 29 | 2824 | # ########################################################################
# Copyright 2013 Advanced Micro Devices, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
#---------------------------------File Note------------------------------------
#Date: 27 January 2012
#This file defines all the error code and error handler mechanism
#--------------------------------Global Variables------------------------------
UINS_CAT = 100
WIN_REG_SEARCH_FAIL = 101
UNIMPL_APP = 200
SYS_ERR = 300
TIME_OUT = 400
DIM_INCO_FILE_FMT = 500 #incorrect file format for dimension
DIM_FILE_VAL_INCO = 501 #Value coming from dimension file is incorrect
#__errorTable : Defines all the errors in the system. Add a new error code and
# error message here
"""Error table is defined as private to this module"""
errorTable = {
UINS_CAT: 'Application is not able to find the installed catalyst',
WIN_REG_SEARCH_FAIL: 'Windows Registry search for catalysts version is unsuccessful',
UNIMPL_APP: 'Unimplemented Application requirement',
SYS_ERR: 'System error occurred - Please check the source code',
TIME_OUT: 'Operation is timed out',
DIM_INCO_FILE_FMT: 'incorrect file format for dimension - Not able to find dimension',
DIM_FILE_VAL_INCO: 'Value coming from dimension file is incorrect'
}
#--------------------------------Class Definitions-----------------------------
class TimeoutException(Exception):
pass
"""Base class for handling all the application generated exception"""
class ApplicationException(Exception):
def __init__(self, fileName, errno, msg = ""):
self.fileName = fileName
self.errno = errno
self.mess = errorTable[errno] + msg
self.message = 'Application ERROR:'+repr(self.fileName+'-'+str(self.errno)+'-'+self.mess)
def __str__(self):
return repr(self.fileName+'-'+str(self.errno)+'-'+self.mess)
#--------------------------------Global Function-------------------------------
if __name__ == '__main__':
#print errorTable
try:
raise ApplicationException('errorHandler', SYS_ERR)
except:
print 'Generic exception'
| apache-2.0 |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/trace.py | 153 | 29890 | #!/usr/bin/env python
# portions copyright 2001, Autonomous Zones Industries, Inc., all rights...
# err... reserved and offered to the public under the terms of the
# Python 2.2 license.
# Author: Zooko O'Whielacronx
# http://zooko.com/
# mailto:zooko@zooko.com
#
# Copyright 2000, Mojam Media, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1999, Bioreason, Inc., all rights reserved.
# Author: Andrew Dalke
#
# Copyright 1995-1997, Automatrix, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.
#
#
# Permission to use, copy, modify, and distribute this Python software and
# its associated documentation for any purpose without fee is hereby
# granted, provided that the above copyright notice appears in all copies,
# and that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of neither Automatrix,
# Bioreason or Mojam Media be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior permission.
#
"""program/module to trace Python program or function execution
Sample use, command line:
trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs
trace.py -t --ignore-dir '$prefix' spam.py eggs
trace.py --trackcalls spam.py eggs
Sample use, programmatically
import sys
# create a Trace object, telling it what to ignore, and whether to
# do tracing or line-counting or both.
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,], trace=0,
count=1)
# run the new command using the given tracer
tracer.run('main()')
# make a report, placing output in /tmp
r = tracer.results()
r.write_results(show_missing=True, coverdir="/tmp")
"""
import linecache
import os
import re
import sys
import time
import token
import tokenize
import inspect
import gc
import dis
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
try:
import threading
except ImportError:
_settrace = sys.settrace
def _unsettrace():
sys.settrace(None)
else:
def _settrace(func):
threading.settrace(func)
sys.settrace(func)
def _unsettrace():
sys.settrace(None)
threading.settrace(None)
def usage(outfile):
outfile.write("""Usage: %s [OPTIONS] <file> [ARGS]
Meta-options:
--help Display this help then exit.
--version Output version information then exit.
Otherwise, exactly one of the following three options must be given:
-t, --trace Print each line to sys.stdout before it is executed.
-c, --count Count the number of times each line is executed
and write the counts to <module>.cover for each
module executed, in the module's directory.
See also `--coverdir', `--file', `--no-report' below.
-l, --listfuncs Keep track of which functions are executed at least
once and write the results to sys.stdout after the
program exits.
-T, --trackcalls Keep track of caller/called pairs and write the
results to sys.stdout after the program exits.
-r, --report Generate a report from a counts file; do not execute
any code. `--file' must specify the results file to
read, which must have been created in a previous run
with `--count --file=FILE'.
Modifiers:
-f, --file=<file> File to accumulate counts over several runs.
-R, --no-report Do not generate the coverage report files.
Useful if you want to accumulate over several runs.
-C, --coverdir=<dir> Directory where the report files. The coverage
report for <package>.<module> is written to file
<dir>/<package>/<module>.cover.
-m, --missing Annotate executable lines that were not executed
with '>>>>>> '.
-s, --summary Write a brief summary on stdout for each file.
(Can only be used with --count or --report.)
-g, --timing Prefix each line with the time since the program started.
Only used while tracing.
Filters, may be repeated multiple times:
--ignore-module=<mod> Ignore the given module(s) and its submodules
(if it is a package). Accepts comma separated
list of module names
--ignore-dir=<dir> Ignore files in the given directory (multiple
directories can be joined by os.pathsep).
""" % sys.argv[0])
PRAGMA_NOCOVER = "#pragma NO COVER"
# Simple rx to find lines with no code.
rx_blank = re.compile(r'^\s*(#.*)?$')
class Ignore:
def __init__(self, modules = None, dirs = None):
self._mods = modules or []
self._dirs = dirs or []
self._dirs = map(os.path.normpath, self._dirs)
self._ignore = { '<string>': 1 }
def names(self, filename, modulename):
if modulename in self._ignore:
return self._ignore[modulename]
# haven't seen this one before, so see if the module name is
# on the ignore list. Need to take some care since ignoring
# "cmp" musn't mean ignoring "cmpcache" but ignoring
# "Spam" must also mean ignoring "Spam.Eggs".
for mod in self._mods:
if mod == modulename: # Identical names, so ignore
self._ignore[modulename] = 1
return 1
# check if the module is a proper submodule of something on
# the ignore list
n = len(mod)
# (will not overflow since if the first n characters are the
# same and the name has not already occurred, then the size
# of "name" is greater than that of "mod")
if mod == modulename[:n] and modulename[n] == '.':
self._ignore[modulename] = 1
return 1
# Now check that __file__ isn't in one of the directories
if filename is None:
# must be a built-in, so we must ignore
self._ignore[modulename] = 1
return 1
# Ignore a file when it contains one of the ignorable paths
for d in self._dirs:
# The '+ os.sep' is to ensure that d is a parent directory,
# as compared to cases like:
# d = "/usr/local"
# filename = "/usr/local.py"
# or
# d = "/usr/local.py"
# filename = "/usr/local.py"
if filename.startswith(d + os.sep):
self._ignore[modulename] = 1
return 1
# Tried the different ways, so we don't ignore this module
self._ignore[modulename] = 0
return 0
def modname(path):
"""Return a plausible module name for the patch."""
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
def fullmodname(path):
"""Return a plausible module name for the path."""
# If the file 'path' is part of a package, then the filename isn't
# enough to uniquely identify it. Try to do the right thing by
# looking in sys.path for the longest matching prefix. We'll
# assume that the rest is the package name.
comparepath = os.path.normcase(path)
longest = ""
for dir in sys.path:
dir = os.path.normcase(dir)
if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep:
if len(dir) > len(longest):
longest = dir
if longest:
base = path[len(longest) + 1:]
else:
base = path
# the drive letter is never part of the module name
drive, base = os.path.splitdrive(base)
base = base.replace(os.sep, ".")
if os.altsep:
base = base.replace(os.altsep, ".")
filename, ext = os.path.splitext(base)
return filename.lstrip(".")
class CoverageResults:
def __init__(self, counts=None, calledfuncs=None, infile=None,
callers=None, outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy() # map (filename, lineno) to count
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
# Try to merge existing counts file.
try:
counts, calledfuncs, callers = \
pickle.load(open(self.infile, 'rb'))
self.update(self.__class__(counts, calledfuncs, callers))
except (IOError, EOFError, ValueError), err:
print >> sys.stderr, ("Skipping counts file %r: %s"
% (self.infile, err))
def update(self, other):
"""Merge in the data from another CoverageResults"""
counts = self.counts
calledfuncs = self.calledfuncs
callers = self.callers
other_counts = other.counts
other_calledfuncs = other.calledfuncs
other_callers = other.callers
for key in other_counts.keys():
counts[key] = counts.get(key, 0) + other_counts[key]
for key in other_calledfuncs.keys():
calledfuncs[key] = 1
for key in other_callers.keys():
callers[key] = 1
def write_results(self, show_missing=True, summary=False, coverdir=None):
"""
@param coverdir
"""
if self.calledfuncs:
print
print "functions called:"
calls = self.calledfuncs.keys()
calls.sort()
for filename, modulename, funcname in calls:
print ("filename: %s, modulename: %s, funcname: %s"
% (filename, modulename, funcname))
if self.callers:
print
print "calling relationships:"
calls = self.callers.keys()
calls.sort()
lastfile = lastcfile = ""
for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) in calls:
if pfile != lastfile:
print
print "***", pfile, "***"
lastfile = pfile
lastcfile = ""
if cfile != pfile and lastcfile != cfile:
print " -->", cfile
lastcfile = cfile
print " %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc)
# turn the counts data ("(filename, lineno) = count") into something
# accessible on a per-file basis
per_file = {}
for filename, lineno in self.counts.keys():
lines_hit = per_file[filename] = per_file.get(filename, {})
lines_hit[lineno] = self.counts[(filename, lineno)]
# accumulate summary info, if needed
sums = {}
for filename, count in per_file.iteritems():
# skip some "files" we don't care about...
if filename == "<string>":
continue
if filename.startswith("<doctest "):
continue
if filename.endswith((".pyc", ".pyo")):
filename = filename[:-1]
if coverdir is None:
dir = os.path.dirname(os.path.abspath(filename))
modulename = modname(filename)
else:
dir = coverdir
if not os.path.exists(dir):
os.makedirs(dir)
modulename = fullmodname(filename)
# If desired, get a list of the line numbers which represent
# executable content (returned as a dict for better lookup speed)
if show_missing:
lnotab = find_executable_linenos(filename)
else:
lnotab = {}
source = linecache.getlines(filename)
coverpath = os.path.join(dir, modulename + ".cover")
n_hits, n_lines = self.write_results_file(coverpath, source,
lnotab, count)
if summary and n_lines:
percent = 100 * n_hits // n_lines
sums[modulename] = n_lines, percent, modulename, filename
if summary and sums:
mods = sums.keys()
mods.sort()
print "lines cov% module (path)"
for m in mods:
n_lines, percent, modulename, filename = sums[m]
print "%5d %3d%% %s (%s)" % sums[m]
if self.outfile:
# try and store counts and module info into self.outfile
try:
pickle.dump((self.counts, self.calledfuncs, self.callers),
open(self.outfile, 'wb'), 1)
except IOError, err:
print >> sys.stderr, "Can't save counts files because %s" % err
def write_results_file(self, path, lines, lnotab, lines_hit):
"""Return a coverage results file in path."""
try:
outfile = open(path, "w")
except IOError, err:
print >> sys.stderr, ("trace: Could not open %r for writing: %s"
"- skipping" % (path, err))
return 0, 0
n_lines = 0
n_hits = 0
for i, line in enumerate(lines):
lineno = i + 1
# do the blank/comment match to try to mark more lines
# (help the reader find stuff that hasn't been covered)
if lineno in lines_hit:
outfile.write("%5d: " % lines_hit[lineno])
n_hits += 1
n_lines += 1
elif rx_blank.match(line):
outfile.write(" ")
else:
# lines preceded by no marks weren't hit
# Highlight them if so indicated, unless the line contains
# #pragma: NO COVER
if lineno in lnotab and not PRAGMA_NOCOVER in lines[i]:
outfile.write(">>>>>> ")
n_lines += 1
else:
outfile.write(" ")
outfile.write(lines[i].expandtabs(8))
outfile.close()
return n_hits, n_lines
def find_lines_from_code(code, strs):
"""Return dict where keys are lines in the line number table."""
linenos = {}
for _, lineno in dis.findlinestarts(code):
if lineno not in strs:
linenos[lineno] = 1
return linenos
def find_lines(code, strs):
"""Return lineno dict for all code objects reachable from code."""
# get all of the lineno information from the code of this scope level
linenos = find_lines_from_code(code, strs)
# and check the constants for references to other code objects
for c in code.co_consts:
if inspect.iscode(c):
# find another code object, so recurse into it
linenos.update(find_lines(c, strs))
return linenos
def find_strings(filename):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
f = open(filename)
for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
f.close()
return d
def find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
prog = open(filename, "rU").read()
except IOError, err:
print >> sys.stderr, ("Not printing coverage data for %r: %s"
% (filename, err))
return {}
code = compile(prog, filename, "exec")
strs = find_strings(filename)
return find_lines(code, strs)
class Trace:
def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0,
ignoremods=(), ignoredirs=(), infile=None, outfile=None,
timing=False):
"""
@param count true iff it should count number of times each
line is executed
@param trace true iff it should print out each line that is
being counted
@param countfuncs true iff it should just output a list of
(filename, modulename, funcname,) for functions
that were called at least once; This overrides
`count' and `trace'
@param ignoremods a list of the names of modules to ignore
@param ignoredirs a list of the names of directories to ignore
all of the (recursive) contents of
@param infile file from which to read stored counts to be
added into the results
@param outfile file in which to write the results
@param timing true iff timing information be displayed
"""
self.infile = infile
self.outfile = outfile
self.ignore = Ignore(ignoremods, ignoredirs)
self.counts = {} # keys are (filename, linenumber)
self.blabbed = {} # for debugging
self.pathtobasename = {} # for memoizing os.path.basename
self.donothing = 0
self.trace = trace
self._calledfuncs = {}
self._callers = {}
self._caller_cache = {}
self.start_time = None
if timing:
self.start_time = time.time()
if countcallers:
self.globaltrace = self.globaltrace_trackcallers
elif countfuncs:
self.globaltrace = self.globaltrace_countfuncs
elif trace and count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace_and_count
elif trace:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace
elif count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_count
else:
# Ahem -- do nothing? Okay.
self.donothing = 1
def run(self, cmd):
import __main__
dict = __main__.__dict__
self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals=None, locals=None):
if globals is None: globals = {}
if locals is None: locals = {}
if not self.donothing:
_settrace(self.globaltrace)
try:
exec cmd in globals, locals
finally:
if not self.donothing:
_unsettrace()
def runfunc(self, func, *args, **kw):
result = None
if not self.donothing:
sys.settrace(self.globaltrace)
try:
result = func(*args, **kw)
finally:
if not self.donothing:
sys.settrace(None)
return result
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = classes[0].__name__
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def globaltrace_trackcallers(self, frame, why, arg):
"""Handler for call events.
Adds information about who called who to the self._callers dict.
"""
if why == 'call':
# XXX Should do a better job of identifying methods
this_func = self.file_module_function_of(frame)
parent_func = self.file_module_function_of(frame.f_back)
self._callers[(parent_func, this_func)] = 1
def globaltrace_countfuncs(self, frame, why, arg):
"""Handler for call events.
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
"""
if why == 'call':
this_func = self.file_module_function_of(frame)
self._calledfuncs[this_func] = 1
def globaltrace_lt(self, frame, why, arg):
"""Handler for call events.
If the code block being entered is to be ignored, returns `None',
else returns self.localtrace.
"""
if why == 'call':
code = frame.f_code
filename = frame.f_globals.get('__file__', None)
if filename:
# XXX modname() doesn't work right for packages, so
# the ignore support won't work right for packages
modulename = modname(filename)
if modulename is not None:
ignore_it = self.ignore.names(filename, modulename)
if not ignore_it:
if self.trace:
print (" --- modulename: %s, funcname: %s"
% (modulename, code.co_name))
return self.localtrace
else:
return None
def localtrace_trace_and_count(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
if self.start_time:
print '%.2f' % (time.time() - self.start_time),
bname = os.path.basename(filename)
print "%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_trace(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if self.start_time:
print '%.2f' % (time.time() - self.start_time),
bname = os.path.basename(filename)
print "%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_count(self, frame, why, arg):
if why == "line":
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
return self.localtrace
def results(self):
return CoverageResults(self.counts, infile=self.infile,
outfile=self.outfile,
calledfuncs=self._calledfuncs,
callers=self._callers)
def _err_exit(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.exit(1)
def main(argv=None):
import getopt
if argv is None:
argv = sys.argv
try:
opts, prog_argv = getopt.getopt(argv[1:], "tcrRf:d:msC:lTg",
["help", "version", "trace", "count",
"report", "no-report", "summary",
"file=", "missing",
"ignore-module=", "ignore-dir=",
"coverdir=", "listfuncs",
"trackcalls", "timing"])
except getopt.error, msg:
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Try `%s --help' for more information\n"
% sys.argv[0])
sys.exit(1)
trace = 0
count = 0
report = 0
no_report = 0
counts_file = None
missing = 0
ignore_modules = []
ignore_dirs = []
coverdir = None
summary = 0
listfuncs = False
countcallers = False
timing = False
for opt, val in opts:
if opt == "--help":
usage(sys.stdout)
sys.exit(0)
if opt == "--version":
sys.stdout.write("trace 2.0\n")
sys.exit(0)
if opt == "-T" or opt == "--trackcalls":
countcallers = True
continue
if opt == "-l" or opt == "--listfuncs":
listfuncs = True
continue
if opt == "-g" or opt == "--timing":
timing = True
continue
if opt == "-t" or opt == "--trace":
trace = 1
continue
if opt == "-c" or opt == "--count":
count = 1
continue
if opt == "-r" or opt == "--report":
report = 1
continue
if opt == "-R" or opt == "--no-report":
no_report = 1
continue
if opt == "-f" or opt == "--file":
counts_file = val
continue
if opt == "-m" or opt == "--missing":
missing = 1
continue
if opt == "-C" or opt == "--coverdir":
coverdir = val
continue
if opt == "-s" or opt == "--summary":
summary = 1
continue
if opt == "--ignore-module":
for mod in val.split(","):
ignore_modules.append(mod.strip())
continue
if opt == "--ignore-dir":
for s in val.split(os.pathsep):
s = os.path.expandvars(s)
# should I also call expanduser? (after all, could use $HOME)
s = s.replace("$prefix",
os.path.join(sys.prefix, "lib",
"python" + sys.version[:3]))
s = s.replace("$exec_prefix",
os.path.join(sys.exec_prefix, "lib",
"python" + sys.version[:3]))
s = os.path.normpath(s)
ignore_dirs.append(s)
continue
assert 0, "Should never get here"
if listfuncs and (count or trace):
_err_exit("cannot specify both --listfuncs and (--trace or --count)")
if not (count or trace or report or listfuncs or countcallers):
_err_exit("must specify one of --trace, --count, --report, "
"--listfuncs, or --trackcalls")
if report and no_report:
_err_exit("cannot specify both --report and --no-report")
if report and not counts_file:
_err_exit("--report requires a --file")
if no_report and len(prog_argv) == 0:
_err_exit("missing name of file to run")
# everything is ready
if report:
results = CoverageResults(infile=counts_file, outfile=counts_file)
results.write_results(missing, summary=summary, coverdir=coverdir)
else:
sys.argv = prog_argv
progname = prog_argv[0]
sys.path[0] = os.path.split(progname)[0]
t = Trace(count, trace, countfuncs=listfuncs,
countcallers=countcallers, ignoremods=ignore_modules,
ignoredirs=ignore_dirs, infile=counts_file,
outfile=counts_file, timing=timing)
try:
with open(progname) as fp:
code = compile(fp.read(), progname, 'exec')
# try to emulate __main__ namespace as much as possible
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
t.runctx(code, globs, globs)
except IOError, err:
_err_exit("Cannot run file %r because: %s" % (sys.argv[0], err))
except SystemExit:
pass
results = t.results()
if not no_report:
results.write_results(missing, summary=summary, coverdir=coverdir)
if __name__=='__main__':
main()
| gpl-2.0 |
UTNkar/moore | src/home/migrations/0027_manual_paragraph_data.py | 1 | 1268 | # Generated by Django 2.2.10 on 2020-04-02 21:08
from django.db import migrations
from itertools import chain
from utils.data_migrations import stream_field_filter_map
def richtext_to_paragraph(block):
return {
'type': 'paragraph',
'value': {
'text': block['value'],
'alignment': "Left"
}
}
def paragraph_to_richtext(block):
return {
'type': 'paragraph',
'value': block['value']['text'],
}
def apply_to_all_pages(apps, mapper):
HomePage = apps.get_model('home', 'HomePage')
WebPage = apps.get_model('home', 'WebPage')
hps = HomePage.objects.all()
wps = WebPage.objects.all();
for obj in chain(hps, wps):
obj.body_en = stream_field_filter_map(obj.body_en, "paragraph", mapper)
obj.body_sv = stream_field_filter_map(obj.body_sv, "paragraph", mapper)
obj.save();
def forwards(apps, schema_editor):
apply_to_all_pages(apps, richtext_to_paragraph)
def backwards(apps, schema_editor):
apply_to_all_pages(apps, paragraph_to_richtext)
class Migration(migrations.Migration):
dependencies = [
('home', '0026_auto_20200402_2308'),
]
operations = [
migrations.RunPython(forwards, backwards)
]
| agpl-3.0 |
matmutant/sl4a | python/src/Lib/encodings/euc_jisx0213.py | 816 | 1051 | #
# euc_jisx0213.py: Python Unicode Codec for EUC_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
JonnyH/pyra-kernel | GTA04/root/vibra.py | 2 | 3531 | #!/usr/bin/env python
import fcntl, struct, time, array
#
# There are two steps to creating a rumble effect
# 1/ describe the effect and give it to the driver using an
# ioctl.
# There a 3 paramaters:
# strength: from 0 to 0xffff - this code takes a value from 0 to
# 1 and scales it
# duration: milliseconds
# delay until start: milliseconds.
#
# 2/ write a request to play a specific effect.
#
# It is possible to have multiple effects active. If they have
# different delays they will start at different times.
# This demo shows combining 3 non-overlapping effects to make
# a simple vibration pattern
#
# An effect is created with f.new_vibe(strength, duration, delay)
# That effect can then be started with 'play' and stopped with 'stop'.
# EVIOCRMFF = _IOW('E', 0x81, int)
# dir: 2 WRITE = 1 == 0x40000
# size 14 4
# type 8 'E' == 0x45
# nr: 8 0x81
#
EVIOCRMFF = 0x40044581
# EVIOCSFF _IOC(_IOC_WRITE, 'E', 0x80, sizeof(struct ff_effect))
EVIOCSFF = 0x402c4580
class Vibra:
def __init__(self, file = "/dev/input/rumble"):
self.f = open(file, "r+")
def close(self):
self.f.close()
def new_vibe(self, strength, length, delay):
# strength is from 0 to 1
# length and delay are in millisecs
# this is 'struct ff_effect' from "linux/input.h"
effect = struct.pack('HhHHHHHxxHH',
0x50, -1, 0, # FF_RUMBLE, id, direction
0, 0, # trigger (button interval)
length, delay,
int(strength * 0xFFFF), 0)
a = array.array('h', effect)
fcntl.ioctl(self.f, EVIOCSFF, a, True)
return a[1]
id = a[1]
return (ev_play, ev_stop)
def multi_vibe(self, length, repeats = 1, delay = None, strength = 1):
start = 0
if delay == None:
delay = length
v = []
for i in range(0, repeats):
v.append(self.new_vibe(strength, length, start))
start += length + delay
return v
def play(self, id):
# this is 'struct input_event': sec, nsec, type, code, value
if type(id) == tuple or type(id) == list:
ev_play = ''
for i in id:
ev_play = ev_play + struct.pack('LLHHi', 0, 0, 0x15, i, 1)
else:
ev_play = struct.pack('LLHHi', 0, 0, 0x15, id, 1)
self.f.write(ev_play)
self.f.flush()
def stop(self, id):
# this is 'struct input_event': sec, nsec, type, code, value
if type(id) == tuple or type(id) == list:
ev_stop = ''
for i in id:
ev_stop = ev_stop + struct.pack('LLHHi', 0, 0, 0x15, i, 0)
else:
ev_stop = struct.pack('LLHHi', 0, 0, 0x15, id, 0)
self.f.write(ev_stop)
self.f.flush()
def forget(self, id):
if type(id) == tuple or type(id) == list:
for i in id:
fcntl.ioctl(self.f, EVIOCRMFF, i)
else:
fcntl.ioctl(self.f, EVIOCRMFF, id)
if __name__ == '__main__':
f = Vibra("/dev/input/rumble")
# rumble for 300ms, pause for 100ms, rumble for 300ms, pause for 200ms
# then half-speed rumble for 600ms
p1 = f.new_vibe(1, 300, 0)
p2 = f.new_vibe(1, 300,400)
p3 = f.new_vibe(0.5, 600, 900)
f.play((p1, p2, p3))
time.sleep(2)
f.forget((p1, p2, p3))
f.play(f.multi_vibe(200, 14, delay=100))
time.sleep(5)
| gpl-2.0 |
serge-sans-paille/pythran | pythran/transformations/remove_nested_functions.py | 1 | 4044 | """ RemoveNestedFunctions turns nested function into top-level functions. """
from pythran.analyses import GlobalDeclarations, ImportedIds
from pythran.passmanager import Transformation
from pythran.tables import MODULES
from pythran.conversion import mangle
import pythran.metadata as metadata
import gast as ast
class _NestedFunctionRemover(ast.NodeTransformer):
def __init__(self, parent):
ast.NodeTransformer.__init__(self)
self.parent = parent
self.identifiers = set(self.global_declarations.keys())
def __getattr__(self, attr):
return getattr(self.parent, attr)
def visit_FunctionDef(self, node):
self.update = True
if MODULES['functools'] not in self.global_declarations.values():
import_ = ast.Import([ast.alias('functools', mangle('functools'))])
self.ctx.module.body.insert(0, import_)
functools_module = MODULES['functools']
self.global_declarations[mangle('functools')] = functools_module
self.ctx.module.body.append(node)
former_name = node.name
seed = 0
new_name = "pythran_{}{}"
while new_name.format(former_name, seed) in self.identifiers:
seed += 1
new_name = new_name.format(former_name, seed)
self.identifiers.add(new_name)
ii = self.gather(ImportedIds, node)
binded_args = [ast.Name(iin, ast.Load(), None, None)
for iin in sorted(ii)]
node.args.args = ([ast.Name(iin, ast.Param(), None, None)
for iin in sorted(ii)] +
node.args.args)
metadata.add(node, metadata.Local())
class Renamer(ast.NodeTransformer):
def visit_Call(self, node):
self.generic_visit(node)
if (isinstance(node.func, ast.Name) and
node.func.id == former_name):
node.func.id = new_name
node.args = (
[ast.Name(iin, ast.Load(), None, None)
for iin in sorted(ii)] +
node.args
)
return node
Renamer().visit(node)
node.name = new_name
self.global_declarations[node.name] = node
proxy_call = ast.Name(new_name, ast.Load(), None, None)
new_node = ast.Assign(
[ast.Name(former_name, ast.Store(), None, None)],
ast.Call(
ast.Attribute(
ast.Name(mangle('functools'), ast.Load(), None, None),
"partial",
ast.Load()
),
[proxy_call] + binded_args,
[],
),
None)
self.generic_visit(node)
return new_node
class RemoveNestedFunctions(Transformation):
"""
Replace nested function by top-level functions.
Also add a call to a bind intrinsic that
generates a local function with some arguments binded.
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> node = ast.parse("def foo(x):\\n def bar(y): return x+y\\n bar(12)")
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(RemoveNestedFunctions, node)
>>> print(pm.dump(backend.Python, node))
import functools as __pythran_import_functools
def foo(x):
bar = __pythran_import_functools.partial(pythran_bar0, x)
bar(12)
def pythran_bar0(x, y):
return (x + y)
"""
def __init__(self):
super(RemoveNestedFunctions, self).__init__(GlobalDeclarations)
def visit_Module(self, node):
# keep original node as it's updated by _NestedFunctionRemover
for stmt in node.body:
self.visit(stmt)
return node
def visit_FunctionDef(self, node):
nfr = _NestedFunctionRemover(self)
node.body = [nfr.visit(stmt) for stmt in node.body]
self.update |= nfr.update
return node
| bsd-3-clause |
lduarte1991/edx-platform | cms/djangoapps/contentstore/tests/test_import_draft_order.py | 25 | 2843 | """
Tests Draft import order.
"""
from django.conf import settings
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.xml_importer import import_course_from_xml
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
# This test is in the CMS module because the test configuration to use a draft
# modulestore is dependent on django.
class DraftReorderTestCase(ModuleStoreTestCase):
def test_order(self):
"""
Verify that drafts are imported in the correct order.
"""
store = modulestore()
course_items = import_course_from_xml(
store, self.user.id, TEST_DATA_DIR, ['import_draft_order'], create_if_not_present=True
)
course_key = course_items[0].id
sequential = store.get_item(course_key.make_usage_key('sequential', '0f4f7649b10141b0bdc9922dcf94515a'))
verticals = sequential.children
# The order that files are read in from the file system is not guaranteed (cannot rely on
# alphabetical ordering, for example). Therefore, I have added a lot of variation in filename and desired
# ordering so that the test reliably failed with the bug, at least on Linux.
#
# 'a', 'b', 'c', 'd', and 'z' are all drafts, with 'index_in_children_list' of
# 2 , 4 , 6 , 5 , and 0 respectively.
#
# '5a05be9d59fc4bb79282c94c9e6b88c7' and 'second' are public verticals.
self.assertEqual(7, len(verticals))
self.assertEqual(course_key.make_usage_key('vertical', 'z'), verticals[0])
self.assertEqual(course_key.make_usage_key('vertical', '5a05be9d59fc4bb79282c94c9e6b88c7'), verticals[1])
self.assertEqual(course_key.make_usage_key('vertical', 'a'), verticals[2])
self.assertEqual(course_key.make_usage_key('vertical', 'second'), verticals[3])
self.assertEqual(course_key.make_usage_key('vertical', 'b'), verticals[4])
self.assertEqual(course_key.make_usage_key('vertical', 'd'), verticals[5])
self.assertEqual(course_key.make_usage_key('vertical', 'c'), verticals[6])
# Now also test that the verticals in a second sequential are correct.
sequential = store.get_item(course_key.make_usage_key('sequential', 'secondseq'))
verticals = sequential.children
# 'asecond' and 'zsecond' are drafts with 'index_in_children_list' 0 and 2, respectively.
# 'secondsubsection' is a public vertical.
self.assertEqual(3, len(verticals))
self.assertEqual(course_key.make_usage_key('vertical', 'asecond'), verticals[0])
self.assertEqual(course_key.make_usage_key('vertical', 'secondsubsection'), verticals[1])
self.assertEqual(course_key.make_usage_key('vertical', 'zsecond'), verticals[2])
| agpl-3.0 |
viru/ansible | test/units/parsing/test_unquote.py | 152 | 2073 | # coding: utf-8
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from nose import tools
from ansible.compat.tests import unittest
from ansible.parsing.splitter import unquote
# Tests using nose's test generators cannot use unittest base class.
# http://nose.readthedocs.org/en/latest/writing_tests.html#test-generators
class TestUnquote:
UNQUOTE_DATA = (
(u'1', u'1'),
(u'\'1\'', u'1'),
(u'"1"', u'1'),
(u'"1 \'2\'"', u'1 \'2\''),
(u'\'1 "2"\'', u'1 "2"'),
(u'\'1 \'2\'\'', u'1 \'2\''),
(u'"1\\"', u'"1\\"'),
(u'\'1\\\'', u'\'1\\\''),
(u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'),
(u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'),
(u'"', u'"'),
(u'\'', u'\''),
# Not entirely sure these are good but they match the current
# behaviour
(u'"1""2"', u'1""2'),
(u'\'1\'\'2\'', u'1\'\'2'),
(u'"1" 2 "3"', u'1" 2 "3'),
(u'"1"\'2\'"3"', u'1"\'2\'"3'),
)
def check_unquote(self, quoted, expected):
tools.eq_(unquote(quoted), expected)
def test_unquote(self):
for datapoint in self.UNQUOTE_DATA:
yield self.check_unquote, datapoint[0], datapoint[1]
| gpl-3.0 |
jhonatajh/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/make_global_settings/wrapper/gyptest-wrapper.py | 229 | 1444 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies *_wrapper in make_global_settings.
"""
import os
import sys
import TestGyp
test_format = ['ninja']
if sys.platform in ('linux2', 'darwin'):
test_format += ['make']
test = TestGyp.TestGyp(formats=test_format)
old_env = dict(os.environ)
os.environ['GYP_CROSSCOMPILE'] = '1'
test.run_gyp('wrapper.gyp')
os.environ.clear()
os.environ.update(old_env)
if test.format == 'make':
cc_expected = """ifneq (,$(filter $(origin CC), undefined default))
CC = $(abspath distcc) $(abspath clang)
endif
"""
link_expected = 'LINK ?= $(abspath distlink) $(abspath clang++)'
test.must_contain('Makefile', cc_expected)
test.must_contain('Makefile', link_expected)
if test.format == 'ninja':
cc_expected = ('cc = ' + os.path.join('..', '..', 'distcc') + ' ' +
os.path.join('..', '..', 'clang'))
cc_host_expected = ('cc_host = ' + os.path.join('..', '..', 'ccache') + ' ' +
os.path.join('..', '..', 'clang'))
ld_expected = 'ld = ../../distlink $cc'
if sys.platform == 'win32':
ld_expected = 'link.exe'
test.must_contain('out/Default/build.ninja', cc_expected)
test.must_contain('out/Default/build.ninja', cc_host_expected)
test.must_contain('out/Default/build.ninja', ld_expected)
test.pass_test()
| gpl-3.0 |
Hikasgai/HikasgaiApp | placeForMe/settings.py | 1 | 4053 | """
Django settings for gettingstarted project, on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: change this before deploying to production!
SECRET_KEY = 'i+acxn5(akgsn!sr4^qgf(^m&*@+g1@u^t@=8s@axc41ml*f=s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'usuarios',
'asignaturas'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'placeForMe.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'placeForMe.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
MEDIA_ROOT = 'media'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| mit |
systemsoverload/aquameta | core/002-filesystem/pgfs/pgfs.py | 2 | 10817 | #!/usr/bin/env python2
from fuse import FUSE, FuseOSError, Operations, LoggingMixIn, fuse_get_context, ENOENT, EROFS
from os.path import normpath
from psycopg2 import connect, ProgrammingError, DataError, InternalError
from psycopg2.extensions import QuotedString
from stat import S_IFDIR, S_IFREG
from sys import exit
from time import time, sleep
import argparse, getpass
class PostgresFS(LoggingMixIn, Operations):
def __init__(self, database, port=5432, host='localhost', username=None, password=None):
self.database = database
self.port = port
self.host = host
self.username = username
self.password = password
if host:
self.conn = connect(database=database, port=port, host=host, user=username, password=password)
else:
self.conn = connect(database=database, port=port, user=username, password=password)
self.write_buffer = None # Used to hold what we write when flush is called
def _row_exists(self, schema, table, pk):
exists = False
cur = self.conn.cursor()
try:
cur.execute("SELECT 1 FROM \"{}\".\"{}\" where id = '{}' LIMIT 1".format(schema, table, pk))
except DataError:
cur.close()
return False;
result = cur.fetchone()
exists = True if result else False
cur.close()
return exists
def _schema_exists(self, schema_name):
exists = False
cur = self.conn.cursor()
cur.execute("SELECT 1 FROM meta.schema where name = '{}' LIMIT 1".format(schema_name))
result = cur.fetchone()
exists = True if result else False
cur.close()
return exists
def _get_pk_data(self, schema, tablename, pk, col, offset=0, limit=0):
print "Updating data"
if self._schema_exists(schema):
cur = self.conn.cursor()
print "Pulling data for {}.{}.{}.{}, O: {}, L: {}".format(schema, tablename, pk, col, offset, limit)
try:
if limit != 0:
offset = offset + 1
cur.execute("SELECT substring({col}::text from {off} for {lim}) FROM \"{s}\".\"{t}\" where id = '{pk}'".format(s=schema, t=tablename, pk=pk, col=col, off=offset, lim=limit))
else:
cur.execute("SELECT \"{col}\" FROM \"{s}\".\"{t}\" where id = '{pk}'".format(s=schema, t=tablename, pk=pk, col=col))
except Exception as e:
print "LIMIT: {} OFFSET: {} ERROR: {}".format(limit, offset, e)
self.conn.rollback()
cur.close()
raise FuseOSError(ENOENT)
print "Cursor status: {}".format(cur.statusmessage)
data = cur.fetchall()
cur.close()
formatted = str(data[0][0]).encode('utf-8', errors='replace')
return formatted
raise FuseOSError(ENOENT)
return None
def _set_pk_data(self, schema, tablename, pk, col):
print "Updating data"
if self._schema_exists(schema) and self.write_buffer is not None:
cur = self.conn.cursor()
try:
to_write = QuotedString(self.write_buffer).getquoted()
cur.execute("UPDATE \"{s}\".\"{t}\" SET {col} = {wb} WHERE id = '{pk}'".format(s=schema, wb=to_write, t=tablename, pk=pk, col=col))
print "Cursor status: {}".format(cur.statusmessage)
self.conn.commit()
except InternalError as e:
self.conn.rollback()
return False
else:
return True
finally:
cur.close()
self.write_buffer = None
elif self.write_buffer is not None:
self.write_buffer = None
raise FuseOSError(ENOENT)
return False
def read(self, path, size, offset, fh):
print "Calling read"
if size == 0L:
return 0
split = normpath(path).split("/")
print "Reading {}".format(path)
if len(split) == 5:
schema = split[1]
tablename = split[2]
pk = split[3]
col = split[4]
data = self._get_pk_data(schema, tablename, pk, col, offset=offset, limit=long(size))
return data
raise FuseOSError(ENOENT)
def create(self, path, mode, fh=None):
print "Create called."
raise FuseOSError(EROFS)
def flush(self, path, fh):
print "Calling flush"
split = normpath(path).split("/")
# We only write rows
if len(split) == 5 and self.write_buffer:
schema = split[1]
tablename = split[2]
pk = split[3]
col = split[4]
self._set_pk_data(schema, tablename, pk, col)
return 0
def unlink(self, path):
print "Calling unlink"
split = normpath(path).split("/")
# We only write rows
if len(split) == 5:
return 0
raise FuseOSError(ENOENT)
def write(self, path, data, offset, fh):
print "Write called"
if self.write_buffer is None:
# This is the first write to a file, so we need to get the data for it
split = normpath(path).split("/")
# We only write rows
if len(split) == 5:
schema = split[1]
tablename = split[2]
pk = split[3]
col = split[4]
if self._schema_exists(schema) and self._row_exists(schema, tablename, pk):
tdata = self._get_pk_data(schema, tablename, pk, col)
self.write_buffer = str(tdata).encode('utf-8', errors='replace')
else:
raise FuseOSError(ENOENT)
# Replace the chunk of the write-buffer with the data we were passed in
self.write_buffer = self.write_buffer[0:offset] + data
return len(data)
def truncate(self, path, length, fh=None):
print "Want to truncate: {}".format(length)
if self.write_buffer is None:
# This is the first write to a file, so we need to get the data for it
split = normpath(path).split("/")
# We only write rows
if len(split) == 5:
schema = split[1]
tablename = split[2]
pk = split[3]
col = split[4]
tdata = self._get_pk_data(schema, tablename, pk, col)
self.write_buffer = str(tdata).encode('utf-8', errors='replace')
self.write_buffer = self.write_buffer[:length]
return 0
def getattr(self, path, fh=None):
print "Calling getattr"
to_return = None
uid, gid, pid = fuse_get_context()
split = normpath(path).split("/")
if len(split) != 5:
to_return = {
'st_atime': time(),
'st_gid': 0,
'st_uid': 0,
'st_mode': S_IFDIR | 0755,
'st_mtime': 667908000,
'st_ctime': 667908000,
'st_size': 4096
}
else:
schema = split[1]
tablename = split[2]
pk = split[3]
col = split[4]
formatted = self._get_pk_data(schema, tablename, pk, col)
if formatted is not None:
to_return = {
'st_atime': time(),
'st_gid': 0,
'st_uid': 0,
'st_mode': S_IFREG | 0666,
'st_mtime': 667908000,
'st_ctime': 667908000,
'st_size': len(formatted)
}
else:
raise FuseOSError(ENOENT)
return to_return
def readdir(self, path, fh):
cur = self.conn.cursor()
to_return = None #['.', '..']
normalized = normpath(path)
split = normalized.split("/")
print "Reading dir {}".format(path)
if len(path) == 1:
all_schemas = cur.execute("SELECT name FROM meta.schema")
to_return = [x[0] for x in cur.fetchall()]
elif len(split) == 2 and split[1] != '':
schema = split[1]
if self._schema_exists(schema):
cur.execute("select t.name from meta.table t join meta.schema s on s.id = t.schema_id where s.name = '{schema}';".format(schema=schema))
to_return = [x[0] for x in cur.fetchall()]
else:
raise FuseOSError(ENOENT)
elif len(split) == 3:
schema = split[1]
tablename = split[2]
if self._schema_exists(schema):
try:
cur.execute("SELECT id FROM \"{}\".\"{}\"".format(schema, tablename))
except ProgrammingError:
self.conn.rollback()
cur.close()
raise FuseOSError(ENOENT)
to_return = [str(x[0]) for x in cur.fetchall()]
else:
raise FuseOSError(ENOENT)
elif len(split) == 4:
schema = split[1]
tablename = split[2]
pk = split[3]
if self._schema_exists(schema) and self._row_exists(schema, tablename, pk):
query = "SELECT c.name from meta.\"column\" c join meta.relation t on c.relation_id = t.id join meta.\"schema\" s on t.schema_id = s.id where s.name = '{schema}' and t.name = '{tablename}';".format(tablename=tablename, schema=schema)
cur.execute(query)
all_cols = cur.fetchall()
to_return = [str(x[0]) for x in all_cols]
else:
raise FuseOSError(ENOENT)
cur.close()
if not to_return:
raise FuseOSError(ENOENT)
return to_return + ['.', '..']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Mount a postgresql database with FUSE.")
parser.add_argument('mount_point', type=str)
parser.add_argument('--port', dest='port', type=int)
parser.add_argument('--host', dest='host', type=str)
parser.add_argument('-d', '--database', dest='database', required=True, type=str)
parser.add_argument('-u', '--username', dest='username', type=str)
parser.add_argument('-p', '--password', dest='password', type=str)
args = parser.parse_args()
fsthing = PostgresFS(args.database,
port=args.port if args.port else 5432,
username=args.username if args.username else getpass.getuser(),
host=args.host if args.host else None,
password=args.password if args.password else None
)
fuse = FUSE(fsthing, args.mount_point, foreground=True, nothreads=True)
| gpl-3.0 |
sanjuro/RCJK | confirm/views.py | 33 | 1184 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django import http
from django import template
from django.conf import settings
from django.template import loader
from common import api
from common import decorator
from common import exception
from common import util
@decorator.login_required
def confirm_email(request, code):
rel_ref = api.activation_activate_email(request.user,
request.user.nick,
code)
return util.RedirectFlash(request.user.url() + "/overview",
"Email address '%s' confirmed." % rel_ref.target)
| apache-2.0 |
sdcooke/django | tests/forms_tests/models.py | 261 | 3805 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import itertools
import tempfile
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
callable_default_counter = itertools.count()
def callable_default():
return next(callable_default_counter)
temp_storage = FileSystemStorage(location=tempfile.mkdtemp())
class BoundaryModel(models.Model):
positive_integer = models.PositiveIntegerField(null=True, blank=True)
class Defaults(models.Model):
name = models.CharField(max_length=255, default='class default value')
def_date = models.DateField(default=datetime.date(1980, 1, 1))
value = models.IntegerField(default=42)
callable_default = models.IntegerField(default=callable_default)
class ChoiceModel(models.Model):
"""For ModelChoiceField and ModelMultipleChoiceField tests."""
CHOICES = [
('', 'No Preference'),
('f', 'Foo'),
('b', 'Bar'),
]
INTEGER_CHOICES = [
(None, 'No Preference'),
(1, 'Foo'),
(2, 'Bar'),
]
STRING_CHOICES_WITH_NONE = [
(None, 'No Preference'),
('f', 'Foo'),
('b', 'Bar'),
]
name = models.CharField(max_length=10)
choice = models.CharField(max_length=2, blank=True, choices=CHOICES)
choice_string_w_none = models.CharField(
max_length=2, blank=True, null=True, choices=STRING_CHOICES_WITH_NONE)
choice_integer = models.IntegerField(choices=INTEGER_CHOICES, blank=True,
null=True)
@python_2_unicode_compatible
class ChoiceOptionModel(models.Model):
"""Destination for ChoiceFieldModel's ForeignKey.
Can't reuse ChoiceModel because error_message tests require that it have no instances."""
name = models.CharField(max_length=10)
class Meta:
ordering = ('name',)
def __str__(self):
return 'ChoiceOption %d' % self.pk
def choice_default():
return ChoiceOptionModel.objects.get_or_create(name='default')[0].pk
def choice_default_list():
return [choice_default()]
def int_default():
return 1
def int_list_default():
return [1]
class ChoiceFieldModel(models.Model):
"""Model with ForeignKey to another model, for testing ModelForm
generation with ModelChoiceField."""
choice = models.ForeignKey(
ChoiceOptionModel,
models.CASCADE,
blank=False,
default=choice_default,
)
choice_int = models.ForeignKey(
ChoiceOptionModel,
models.CASCADE,
blank=False,
related_name='choice_int',
default=int_default,
)
multi_choice = models.ManyToManyField(
ChoiceOptionModel,
blank=False,
related_name='multi_choice',
default=choice_default_list,
)
multi_choice_int = models.ManyToManyField(
ChoiceOptionModel,
blank=False,
related_name='multi_choice_int',
default=int_list_default,
)
class OptionalMultiChoiceModel(models.Model):
multi_choice = models.ManyToManyField(
ChoiceOptionModel,
blank=False,
related_name='not_relevant',
default=choice_default,
)
multi_choice_optional = models.ManyToManyField(
ChoiceOptionModel,
blank=True,
related_name='not_relevant2',
)
class FileModel(models.Model):
file = models.FileField(storage=temp_storage, upload_to='tests')
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return '%s' % self.name
class Cheese(models.Model):
name = models.CharField(max_length=100)
class Article(models.Model):
content = models.TextField()
| bsd-3-clause |
landism/pants | contrib/go/src/python/pants/contrib/go/tasks/go_task.py | 3 | 6874 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import re
import subprocess
from collections import namedtuple
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.task.task import Task
from pants.util.memo import memoized_method, memoized_property
from twitter.common.collections.orderedset import OrderedSet
from pants.contrib.go.subsystems.go_distribution import GoDistribution
from pants.contrib.go.targets.go_binary import GoBinary
from pants.contrib.go.targets.go_library import GoLibrary
from pants.contrib.go.targets.go_local_source import GoLocalSource
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.targets.go_target import GoTarget
class GoTask(Task):
@classmethod
def subsystem_dependencies(cls):
return super(GoTask, cls).subsystem_dependencies() + (GoDistribution.Factory,)
@staticmethod
def is_binary(target):
return isinstance(target, GoBinary)
@staticmethod
def is_local_lib(target):
return isinstance(target, GoLibrary)
@staticmethod
def is_remote_lib(target):
return isinstance(target, GoRemoteLibrary)
@staticmethod
def is_local_src(target):
return isinstance(target, GoLocalSource)
@staticmethod
def is_go(target):
return isinstance(target, GoTarget)
@memoized_property
def go_dist(self):
return GoDistribution.Factory.global_instance().create()
@memoized_property
def import_oracle(self):
"""Return an import oracle that can help look up and categorize imports.
:rtype: :class:`ImportOracle`
"""
return ImportOracle(go_dist=self.go_dist, workunit_factory=self.context.new_workunit)
@memoized_property
def goos_goarch(self):
"""Return concatenated $GOOS and $GOARCH environment variables, separated by an underscore.
Useful for locating where the Go compiler is placing binaries ("$GOPATH/pkg/$GOOS_$GOARCH").
:rtype: string
"""
return '{goos}_{goarch}'.format(goos=self._lookup_go_env_var('GOOS'),
goarch=self._lookup_go_env_var('GOARCH'))
def _lookup_go_env_var(self, var):
return self.go_dist.create_go_cmd('env', args=[var]).check_output().strip()
class ImportOracle(object):
"""Answers questions about Go imports."""
class ListDepsError(Exception):
"""Indicates a problem listing import paths for one or more packages."""
def __init__(self, go_dist, workunit_factory):
self._go_dist = go_dist
self._workunit_factory = workunit_factory
@memoized_property
def go_stdlib(self):
"""Return the set of all Go standard library import paths.
:rtype: frozenset of string
"""
out = self._go_dist.create_go_cmd('list', args=['std']).check_output()
return frozenset(out.strip().split())
# This simple regex mirrors the behavior of the relevant go code in practice (see
# repoRootForImportDynamic and surrounding code in
# https://github.com/golang/go/blob/7bc40ffb05d8813bf9b41a331b45d37216f9e747/src/cmd/go/vcs.go).
_remote_import_re = re.compile('[^.]+(?:\.[^.]+)+\/')
def is_remote_import(self, import_path):
"""Whether the specified import_path denotes a remote import."""
return self._remote_import_re.match(import_path) is not None
def is_go_internal_import(self, import_path):
"""Return `True` if the given import path will be satisfied directly by the Go distribution.
For example, both the go standard library ("archive/tar", "bufio", "fmt", etc.) and "C" imports
are satisfiable by a Go distribution via linking of internal Go code and external c standard
library code respectively.
:rtype: bool
"""
# The "C" package is a psuedo-package that links through to the c stdlib, see:
# http://blog.golang.org/c-go-cgo
return import_path == 'C' or import_path in self.go_stdlib
class ImportListing(namedtuple('ImportListing', ['pkg_name',
'imports',
'test_imports',
'x_test_imports'])):
"""Represents all the imports of a given package."""
@property
def all_imports(self):
"""Return all imports for this package, including any test imports.
:rtype: list of string
"""
return list(OrderedSet(self.imports + self.test_imports + self.x_test_imports))
@memoized_method
def list_imports(self, pkg, gopath=None):
"""Return a listing of the dependencies of the given package.
:param string pkg: The package whose files to list all dependencies of.
:param string gopath: An optional $GOPATH which points to a Go workspace containing `pkg`.
:returns: The import listing for `pkg` that represents all its dependencies.
:rtype: :class:`ImportOracle.ImportListing`
:raises: :class:`ImportOracle.ListDepsError` if there was a problem listing the dependencies
of `pkg`.
"""
go_cmd = self._go_dist.create_go_cmd('list', args=['-json', pkg], gopath=gopath)
with self._workunit_factory('list {}'.format(pkg), cmd=str(go_cmd),
labels=[WorkUnitLabel.TOOL]) as workunit:
# TODO(John Sirois): It would be nice to be able to tee the stdout to the workunit to we have
# a capture of the json available for inspection in the server console.
process = go_cmd.spawn(stdout=subprocess.PIPE, stderr=workunit.output('stderr'))
out, _ = process.communicate()
returncode = process.returncode
workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE)
if returncode != 0:
raise self.ListDepsError('Problem listing imports for {}: {} failed with exit code {}'
.format(pkg, go_cmd, returncode))
data = json.loads(out)
# XTestImports are for black box tests. These test files live inside the package dir but
# declare a different package and thus can only access the public members of the package's
# production code. This style of test necessarily means the test file will import the main
# package. For pants, this would lead to a cyclic self-dependency, so we omit the main
# package as implicitly included as its own dependency.
x_test_imports = [i for i in data.get('XTestImports', []) if i != pkg]
return self.ImportListing(pkg_name=data.get('Name'),
imports=data.get('Imports', []),
test_imports=data.get('TestImports', []),
x_test_imports=x_test_imports)
| apache-2.0 |
tako0910/android_kernel_htc_valentewx | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
2014c2g3/w16b_test | static/Brython3.1.1-20150328-091302/Lib/xml/dom/minicompat.py | 781 | 3228 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
StringTypes = (str,)
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name))
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
| agpl-3.0 |
Zarokka/exaile | xl/player/gst/engine.py | 4 | 23626 | # Copyright (C) 2008-2010 Adam Olsen
# Copyright (C) 2014-2015 Dustin Spicuzza
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from gi.repository import GLib
from gi.repository import Gst
import logging
import os
import urlparse
from xl import common
from xl import event
from . import gst_utils
from .dynamic_sink import DynamicAudioSink
from .sink import create_device
from xl.player.engine import ExaileEngine
from xl.player.track_fader import TrackFader
class ExaileGstEngine(ExaileEngine):
'''
Super shiny GStreamer-based engine that does all the things!
* Audio plugins to modify the output stream
* gapless playback
* crossfading (requires gst-plugins-bad)
* Dynamic audio device switching at runtime
Notes about crossfading:
The big change from previous attempts at crossfading is that unlike
the former unified engine, this tries to depend solely on the playbin
element to play audio files. The reason for this is that playbin
is 5000+ lines of battle-hardened C code that handles all of the
weird edges cases in gstreamer, and we don't wish to duplicate that in
Exaile if we can avoid it.
Instead, we can use multiple playbin instances that have duplicate
output audio devices. This makes crossfading a significantly simpler
proposition.
There are two modes for this thing:
* One is normal/gapless mode (no crossfade), and it uses a normal
playbin element and controls that directly. The playbin is wrapped
by the AudioStream object, and it's audio sink is a DynamicAudioSink
element with the
* The other is crossfading mode (which requires gst-plugins-bad to be
installed). Create multiple AudioStream objects, and they have a
DynamicAudioSink object hooked up to an interaudiosink.
You can register plugins to modify the output audio via the following
providers:
* gst_audio_filter: Multiple instances of this can be created, as they
get applied to each stream. It is recommended that
plugins inherit from :class:`.ElementBin`
'''
def __init__(self, name, player):
ExaileEngine.__init__(self, name, player)
self.logger = logging.getLogger('%s [%s]' % (__name__, name))
# Default settings
self.crossfade_enabled = False
self.crossfade_duration = 3000
self.audiosink_device = None
self.audiosink = None
self.custom_sink_pipe = None
# This means to fade in when the user plays a track, only enabled
# when crossfade isn't enabled
self.user_fade_enabled = False
self.user_fade_duration = 1000
# Key: option name; value: attribute on self
options = {
'%s/crossfading' % self.name: 'crossfade_enabled',
'%s/crossfade_duration' % self.name: 'crossfade_duration',
'%s/audiosink_device' % self.name: 'audiosink_device',
'%s/audiosink' % self.name: 'audiosink',
'%s/custom_sink_pipe' % self.name: 'custom_sink_pipe',
'%s/user_fade_enabled' % self.name: 'user_fade_enabled',
'%s/user_fade' % self.name: 'user_fade_duration'
}
self.settings_unsubscribe = common.subscribe_for_settings(self.name, options, self)
#
# Dynamic properties
#
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if not hasattr(self, 'initialized'):
return
if name in ['crossfade_enabled', 'crossfade_duration']:
self._reconfigure_crossfader()
if name in ['audiosink_device', 'audiosink', 'custom_sink_pipe']:
self._reconfigure_sink()
#
# API
#
def initialize(self):
object.__setattr__(self, 'initialized', True)
self.main_stream = AudioStream(self)
self.other_stream = None
self.crossfade_out = None
self.player.engine_load_volume()
self._reconfigure_crossfader()
def _reconfigure_crossfader(self):
self.logger.info("Reconfiguring crossfading")
cf_duration = None
if self.crossfade_enabled:
cf_duration = self.crossfade_duration/1000.0
if self.other_stream is None:
self.other_stream = AudioStream(self)
self.other_stream.set_user_volume(self.main_stream.get_user_volume())
self.other_stream.reconfigure_fader(cf_duration, cf_duration)
self.logger.info("Crossfade: enabled (%sms)", self.crossfade_duration)
else:
self.logger.info("Crossfade: disabled")
if self.other_stream is not None:
self.other_stream.destroy()
self.main_stream.reconfigure_fader(cf_duration, cf_duration)
def _reconfigure_sink(self):
self.logger.info("Reconfiguring audiosinks")
self.main_stream.reconfigure_sink()
if self.other_stream is not None:
self.other_stream.reconfigure_sink()
def destroy(self, permanent=True):
self.main_stream.destroy()
if self.other_stream is not None:
self.other_stream.destroy()
if permanent:
self.settings_unsubscribe()
#
# Engine API
#
def get_current_track(self):
return self.main_stream.current_track
def get_position(self):
return self.main_stream.get_position()
def get_state(self):
state = self.main_stream.get_gst_state()
if state == Gst.State.PLAYING:
return 'playing'
elif state == Gst.State.PAUSED:
return 'paused'
else:
return 'stopped'
def get_volume(self):
return self.main_stream.get_user_volume()
def on_track_stopoffset_changed(self, track):
for stream in [self.main_stream, self.other_stream]:
if stream is None or stream.current_track != track:
continue
# The fader executes the stop offset, so reconfigure it
if self.crossfade_enabled:
stream.reconfigure_fader(self.crossfade_duration,
self.crossfade_duration)
else:
stream.reconfigure_fader(None, None)
def pause(self):
self.main_stream.pause()
if self.other_stream is not None:
self.other_stream.stop()
def play(self, track, start_at, paused):
self._next_track(track, start_at, paused, False, False)
def seek(self, value):
return self.main_stream.seek(value)
def set_volume(self, volume):
self.main_stream.set_user_volume(volume)
if self.other_stream is not None:
self.other_stream.set_user_volume(volume)
def stop(self):
if self.other_stream is not None:
self.other_stream.stop()
prior_track = self.main_stream.stop(emit_eos=False)
self.player.engine_notify_track_end(prior_track, True)
def unpause(self):
self.main_stream.unpause()
#
# Engine private functions
#
def _autoadvance_track(self, still_fading=False):
track = self.player.engine_autoadvance_get_next_track()
if track:
play_args = self.player.engine_autoadvance_notify_next(track) + (False, True)
self._next_track(*play_args)
# If still fading, don't stop
elif not still_fading:
self.stop()
@common.idle_add()
def _eos_func(self, stream):
if stream == self.main_stream:
self._autoadvance_track()
def _error_func(self, stream, msg):
# Destroy the streams, and create a new one, just in case
self.player.engine_notify_error(msg)
self.destroy(permanent=False)
self.initialize()
def _next_track(self, track, start_at, paused, already_queued, autoadvance):
prior_track = self.main_stream.current_track
# Notify that the track is done
if prior_track is not None:
self.player.engine_notify_track_end(prior_track, False)
if self.crossfade_enabled:
self.main_stream, self.other_stream = self.other_stream, self.main_stream
self.main_stream.play(track, start_at, paused, already_queued,
self.crossfade_duration/1000.0,
self.crossfade_duration/1000.0)
self.other_stream.fader.fade_out_on_play()
elif self.user_fade_enabled and not autoadvance:
self.main_stream.play(track, start_at, paused, already_queued,
self.user_fade_duration/1000.0)
else:
self.main_stream.play(track, start_at, paused, already_queued)
self.player.engine_notify_track_start(track)
class AudioStream(object):
'''
An object that can play one or more tracks
'''
idx = 0
def __init__(self, engine):
AudioStream.idx += 1
self.name = '%s-audiostream-%s' % (engine.name, self.idx)
self.engine = engine
self.logger = logging.getLogger('%s [%s-a%s]' % (__name__, engine.name, self.idx))
# track being played by this stream
self.current_track = None
self.buffered_track = None
# This exists because if there is a sink error, it doesn't
# really make sense to recreate the sink -- it'll just fail
# again. Instead, wait for the user to try to play a track,
# and maybe the issue has resolved itself (plugged device in?)
self.needs_sink = True
self.last_position = 0
self.audio_filters = gst_utils.ProviderBin('gst_audio_filter',
'%s-filters' % self.name)
self.playbin = Gst.ElementFactory.make("playbin", "%s-playbin" % self.name)
if self.playbin is None:
raise TypeError("gstreamer 1.x base plugins not installed!")
gst_utils.disable_video_text(self.playbin)
self.playbin.connect("about-to-finish", self.on_about_to_finish)
video = Gst.ElementFactory.make("fakesink", '%s-fakevideo' % self.name)
video.set_property('sync', True)
self.playbin.set_property('video-sink', video)
self.audio_sink = DynamicAudioSink('%s-sink' % self.name)
self.playbin.set_property('audio-sink', self.audio_sink)
# Setup the bus
bus = self.playbin.get_bus()
bus.add_signal_watch()
bus.connect('message', self.on_message)
# Pulsesink changes volume behind our back, track it
self.playbin.connect('notify::volume', self.on_volume_change)
self.fader = TrackFader(self, self.on_fade_out_begin,
'%s-fade-%s' %(engine.name, self.idx))
def destroy(self):
self.fader.stop()
self.playbin.set_state(Gst.State.NULL)
self.playbin.get_bus().remove_signal_watch()
def reconfigure_sink(self):
self.needs_sink = False
sink = create_device(self.engine.name)
self.audio_sink.reconfigure(sink)
def reconfigure_fader(self, fade_in_duration, fade_out_duration):
if self.get_gst_state() != Gst.State.NULL:
self.fader.setup_track(self.current_track,
fade_in_duration, fade_out_duration,
is_update=True)
def get_gst_state(self):
return self.playbin.get_state(timeout=50*Gst.MSECOND)[1]
def get_position(self):
# TODO: This only works when pipeline is prerolled/ready?
if not self.get_gst_state() == Gst.State.PAUSED:
res, self.last_position = \
self.playbin.query_position(Gst.Format.TIME)
if res is False:
self.last_position = 0
return self.last_position
def get_volume(self):
return self.playbin.props.volume
def get_user_volume(self):
return self.fader.get_user_volume()
def pause(self):
# This caches the current last position before pausing
self.get_position()
self.playbin.set_state(Gst.State.PAUSED)
self.fader.pause()
def play(self, track, start_at, paused, already_queued,
fade_in_duration=None, fade_out_duration=None):
'''fade duration is in seconds'''
if not already_queued:
self.stop(emit_eos=False)
# For the moment, the only safe time to add/remove elements
# is when the playbin is NULL, so do that here..
if self.audio_filters.setup_elements():
self.logger.debug("Applying audio filters")
self.playbin.props.audio_filter = self.audio_filters
else:
self.logger.debug("Not applying audio filters")
self.playbin.props.audio_filter = None
if self.needs_sink:
self.reconfigure_sink()
self.current_track = track
self.last_position = 0
self.buffered_track = None
uri = track.get_loc_for_io()
self.logger.info("Playing %s", common.sanitize_url(uri))
# This is only set for gapless playback
if not already_queued:
self.playbin.set_property("uri", uri)
if urlparse.urlsplit(uri)[0] == "cdda":
self.notify_id = self.playbin.connect('source-setup',
self.on_source_setup, track)
# Start in paused mode if we need to seek
if paused or start_at is not None:
self.playbin.set_state(Gst.State.PAUSED)
elif not already_queued:
self.playbin.set_state(Gst.State.PLAYING)
self.fader.setup_track(track, fade_in_duration, fade_out_duration, now=0)
if start_at is not None:
self.seek(start_at)
if not paused:
self.playbin.set_state(Gst.State.PLAYING)
if paused:
self.fader.pause()
def seek(self, value):
'''value is in seconds'''
# TODO: Make sure that we're in a valid seekable state before seeking?
# wait up to 1s for the state to switch, else this fails
if self.playbin.get_state(timeout=1000*Gst.MSECOND)[0] != Gst.StateChangeReturn.SUCCESS:
# TODO: This error message is misleading, when does this ever happen?
# TODO: if the sink is incorrectly specified, this error happens first.
#self.engine._error_func(self, "Could not start at specified offset")
self.logger.warning("Error seeking to specified offset")
return False
new_position = int(Gst.SECOND * value)
seek_event = Gst.Event.new_seek(1.0, Gst.Format.TIME,
Gst.SeekFlags.FLUSH, Gst.SeekType.SET,
new_position,
Gst.SeekType.NONE, 0)
self.last_position = new_position
self.fader.seek(value)
return self.playbin.send_event(seek_event)
def set_volume(self, volume):
#self.logger.debug("Set playbin volume: %.2f", volume)
# TODO: strange issue where pulse sets the system audio volume
# when exaile starts up...
self.playbin.props.volume = volume
def set_user_volume(self, volume):
self.logger.debug("Set user volume: %.2f", volume)
self.fader.set_user_volume(volume)
def stop(self, emit_eos=True):
prior_track = self.current_track
self.current_track = None
self.playbin.set_state(Gst.State.NULL)
self.fader.stop()
if emit_eos:
self.engine._eos_func(self)
return prior_track
def unpause(self):
# gstreamer does not buffer paused network streams, so if the user
# is unpausing a stream, just restart playback
current = self.current_track
if not (current.is_local() or current.get_tag_raw('__length')):
self.playbin.set_state(Gst.State.READY)
self.playbin.set_state(Gst.State.PLAYING)
self.fader.unpause()
#
# Events
#
def on_about_to_finish(self, *args):
'''
This function exists solely to allow gapless playback for audio
formats that support it. Setting the URI property of the playbin
will queue the track for playback immediately after the previous
track.
.. note:: This is called from the gstreamer thread
'''
if self.engine.crossfade_enabled:
return
track = self.engine.player.engine_autoadvance_get_next_track(gapless=True)
if track:
uri = track.get_loc_for_io()
self.playbin.set_property('uri', uri)
self.buffered_track = track
self.logger.debug("Gapless transition: queuing %s", common.sanitize_url(uri))
def on_fade_out_begin(self):
if self.engine.crossfade_enabled:
self.engine._autoadvance_track(still_fading=True)
def on_message(self, bus, message):
'''
This is called on the main thread
'''
if message.type == Gst.MessageType.BUFFERING:
percent = message.parse_buffering()
if not percent < 100:
self.logger.info('Buffering complete')
if percent % 5 == 0:
event.log_event('playback_buffering', self.engine.player, percent)
elif message.type == Gst.MessageType.TAG:
""" Update track length and optionally metadata from gstreamer's parser.
Useful for streams and files mutagen doesn't understand. """
current = self.current_track
if not current.is_local():
gst_utils.parse_stream_tags(current, message.parse_tag())
if current and not current.get_tag_raw('__length'):
res, raw_duration = self.playbin.query_duration(Gst.Format.TIME)
if not res:
self.logger.error("Couldn't query duration")
raw_duration = 0
duration = float(raw_duration)/Gst.SECOND
if duration > 0:
current.set_tag_raw('__length', duration)
elif message.type == Gst.MessageType.EOS and \
not self.get_gst_state() == Gst.State.PAUSED:
self.engine._eos_func(self)
elif message.type == Gst.MessageType.STREAM_START and \
message.src == self.playbin and \
self.buffered_track is not None:
# This handles starting the next track during gapless transition
buffered_track = self.buffered_track
self.buffered_track = None
play_args = self.engine.player.engine_autoadvance_notify_next(buffered_track) + (True, True)
self.engine._next_track(*play_args)
elif message.type == Gst.MessageType.STATE_CHANGED:
# This idea from quodlibet: pulsesink will not notify us when
# volume changes if the stream is paused, so do it when the
# state changes.
if message.src == self.audio_sink:
self.playbin.notify("volume")
elif message.type == Gst.MessageType.ERROR:
# Error handling code is from quodlibet
gerror, debug_info = message.parse_error()
message_text = ""
if gerror:
message_text = gerror.message.rstrip(".")
if message_text == "":
# The most readable part is always the last..
message_text = debug_info[debug_info.rfind(':') + 1:]
# .. unless there's nothing in it.
if ' ' not in message_text:
if debug_info.startswith('playsink'):
message_text += _(': Possible audio device error, is it plugged in?')
self.logger.error("Playback error: %s", message_text)
self.logger.debug("- Extra error info: %s", debug_info)
envname = 'GST_DEBUG_DUMP_DOT_DIR'
if envname not in os.environ:
import xl.xdg
os.environ[envname] = xl.xdg.get_logs_dir()
Gst.debug_bin_to_dot_file(self.playbin, Gst.DebugGraphDetails.ALL, self.name)
self.logger.debug("- Pipeline debug info written to file '%s/%s.dot'",
os.environ[envname], self.name)
self.engine._error_func(self, message_text)
# TODO: Missing plugin error handling from quod libet
# -- http://cgit.freedesktop.org/gstreamer/gstreamer/tree/docs/design/part-missing-plugins.txt
return True
def on_source_setup(self, playbin, source, track):
# this is for handling multiple CD devices properly
device = track.get_loc_for_io().split("#")[-1]
source.props.device = device
playbin.disconnect(self.notify_id)
def on_volume_change(self, e, p):
real = self.playbin.props.volume
vol, is_same = self.fader.calculate_user_volume(real)
if not is_same:
GLib.idle_add(self.engine.player.engine_notify_user_volume_change, vol)
| gpl-2.0 |
SrNetoChan/QGIS | python/plugins/db_manager/info_viewer.py | 67 | 5508 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import QTextBrowser, QApplication
from qgis.utils import OverrideCursor
from .db_plugins.plugin import BaseError, DbError, DBPlugin, Schema, Table
from .dlg_db_error import DlgDbError
class InfoViewer(QTextBrowser):
def __init__(self, parent=None):
QTextBrowser.__init__(self, parent)
self.setOpenLinks(False)
self.item = None
self.dirty = False
self._clear()
self._showPluginInfo()
self.anchorClicked.connect(self._linkClicked)
def _linkClicked(self, url):
if self.item is None:
return
if url.scheme() == "action":
with OverrideCursor(Qt.WaitCursor):
try:
if self.item.runAction(url.path()):
self.refresh()
except BaseError as e:
DlgDbError.showError(e, self)
def refresh(self):
self.setDirty(True)
self.showInfo(self.item)
def showInfo(self, item):
if item == self.item and not self.dirty:
return
self._clear()
if item is None:
return
if isinstance(item, DBPlugin):
self._showDatabaseInfo(item)
elif isinstance(item, Schema):
self._showSchemaInfo(item)
elif isinstance(item, Table):
self._showTableInfo(item)
else:
return
self.item = item
item.aboutToChange.connect(self.setDirty)
def setDirty(self, val=True):
self.dirty = val
def _clear(self):
if self.item is not None:
# skip exception on RuntimeError fixes #6892
try:
self.item.aboutToChange.disconnect(self.setDirty)
except RuntimeError:
pass
self.item = None
self.dirty = False
self.item = None
self.setHtml("")
def _showPluginInfo(self):
from .db_plugins import getDbPluginErrors
html = u'<div style="background-color:#ffffcc;"><h1> ' + self.tr("DB Manager") + '</h1></div>'
html += '<div style="margin-left:8px;">'
for msg in getDbPluginErrors():
html += u"<p>%s" % msg
self.setHtml(html)
def _showDatabaseInfo(self, connection):
html = u'<div style="background-color:#ccffcc;"><h1> %s</h1></div>' % connection.connectionName()
html += '<div style="margin-left:8px;">'
try:
if connection.database() is None:
html += connection.info().toHtml()
else:
html += connection.database().info().toHtml()
except DbError as e:
html += u'<p style="color:red">%s</p>' % str(e).replace('\n', '<br>')
html += '</div>'
self.setHtml(html)
def _showSchemaInfo(self, schema):
html = u'<div style="background-color:#ffcccc;"><h1> %s</h1></div>' % schema.name
html += '<div style="margin-left:8px;">'
try:
html += schema.info().toHtml()
except DbError as e:
html += u'<p style="color:red">%s</p>' % str(e).replace('\n', '<br>')
html += "</div>"
self.setHtml(html)
def _showTableInfo(self, table):
html = u'<div style="background-color:#ccccff"><h1> %s</h1></div>' % table.name
html += '<div style="margin-left:8px;">'
try:
html += table.info().toHtml()
except DbError as e:
html += u'<p style="color:red">%s</p>' % str(e).replace('\n', '<br>')
html += '</div>'
self.setHtml(html)
return True
def setHtml(self, html):
# convert special tags :)
html = str(html).replace('<warning>', '<img src=":/db_manager/warning"> ')
# add default style
html = u"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<style type="text/css">
.section { margin-top: 25px; }
table.header th { background-color: #dddddd; }
table.header td { background-color: #f5f5f5; }
table.header th, table.header td { padding: 0px 10px; }
table td { padding-right: 20px; }
.underline { text-decoration:underline; }
</style>
</head>
<body>
%s <br>
</body>
</html>
""" % html
# print ">>>>>\n", html, "\n<<<<<<"
return QTextBrowser.setHtml(self, html)
| gpl-2.0 |
necioerrante/kernel | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
pombredanne/invenio | modules/bibindex/lib/bibindex_engine_stemmer.py | 1 | 18378 | ## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibIndex stemmer facility based on the Porter Stemming Algorithm.
<http://tartarus.org/~martin/PorterStemmer/>
"""
__revision__ = "$Id$"
from thread import get_ident
from invenio.bibindex_engine_stemmer_greek import greek_stemmer
_stemmers = {}
try:
### Let's try to use SnowBall PyStemmer
import Stemmer
_lang_map = {
'danish' : 'da',
'dutch' : 'nl',
'english' : 'en',
'finnish' : 'fi',
'french' : 'fr',
'german' : 'de',
'hungarian' : 'hu',
'italian' : 'it',
'norwegian' : 'no',
'portuguese' : 'pt',
'romanian' : 'ro',
'russian' : 'ru',
'spanish' : 'es',
'swedish' : 'sv',
'turkish' : 'tr'
}
def is_stemmer_available_for_language(lang):
"""Return true if stemmer for language LANG is available.
Return false otherwise.
"""
thread_ident = get_ident()
if not _stemmers.has_key(thread_ident):
_stemmers[thread_ident] = _create_stemmers()
return _stemmers[thread_ident].has_key(lang)
def stem(word, lang):
"""Return WORD stemmed according to language LANG (e.g. 'en')."""
if lang and is_stemmer_available_for_language(lang):
return _stemmers[get_ident()][lang].stemWord(word)
elif lang == 'el':
#TODO: first we have to capitalize the word
# and remove accents from the vowels
return greek_stemmer().stem_word(word)
else:
return word
def stemWords(words, lang):
"""Return WORDS stemmed according to language LANG (e.g. 'en')."""
if lang and is_stemmer_available_for_language(lang):
return _stemmers[get_ident()][lang].stemWords(words)
else:
return words
def get_stemming_language_map():
"""Return a diction of code language, language name for all the available
languages."""
ret = {}
for language_name, language_code in _lang_map.iteritems():
if is_stemmer_available_for_language(language_code):
ret[language_name] = language_code
return ret
def _create_stemmers():
"""Create stemmers dictionary for all possible languages."""
stemmers_initialized = {}
for src_lang in Stemmer.algorithms():
try:
dst_lang = _lang_map.get(src_lang)
if dst_lang:
stemmers_initialized[dst_lang] = Stemmer.Stemmer(src_lang, 40000)
except (TypeError, KeyError):
pass
return stemmers_initialized
except ImportError:
### Here is the original PorterStemmer class provided as a fallback,
### the "free of charge for any purpose" implementation of the Porter stemmer
### algorithm in Python. The Invenio API interface follows below.
class PorterStemmer:
"""
This is the Porter stemming algorithm, ported to Python from the
version coded up in ANSI C by the author. It may be be regarded
as canonical, in that it follows the algorithm presented in
Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,
no. 3, pp 130-137,
only differing from it at the points maked --DEPARTURE-- below.
See also http://www.tartarus.org/~martin/PorterStemmer
The algorithm as described in the paper could be exactly replicated
by adjusting the points of DEPARTURE, but this is barely necessary,
because (a) the points of DEPARTURE are definitely improvements, and
(b) no encoding of the Porter stemmer I have seen is anything like
as exact as this version, even with the points of DEPARTURE!
Vivake Gupta (v@nano.com)
Release 1: January 2001
"""
def __init__(self):
"""The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[k0],
b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is
readjusted downwards as the stemming progresses. Zero termination is
not in fact used in the algorithm.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called.
"""
self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0 # j is a general offset into the string
def cons(self, i):
"""cons(i) is TRUE <=> b[i] is a consonant."""
if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or self.b[i] == 'o' or self.b[i] == 'u':
return 0
if self.b[i] == 'y':
if i == self.k0:
return 1
else:
return (not self.cons(i - 1))
return 1
def m(self):
"""m() measures the number of consonant sequences between k0 and j.
if c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
n = 0
i = self.k0
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
while 1:
while 1:
if i > self.j:
return n
if self.cons(i):
break
i = i + 1
i = i + 1
n = n + 1
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
def vowelinstem(self):
"""vowelinstem() is TRUE <=> k0,...j contains a vowel"""
for i in range(self.k0, self.j + 1):
if not self.cons(i):
return 1
return 0
def doublec(self, j):
"""doublec(j) is TRUE <=> j,(j-1) contain a double consonant."""
if j < (self.k0 + 1):
return 0
if (self.b[j] != self.b[j-1]):
return 0
return self.cons(j)
def cvc(self, i):
"""cvc(i) is TRUE <=> i-2,i-1,i has the form consonant - vowel - consonant
and also if the second c is not w,x or y. this is used when trying to
restore an e at the end of a short e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if i < (self.k0 + 2) or not self.cons(i) or self.cons(i-1) or not self.cons(i-2):
return 0
ch = self.b[i]
if ch == 'w' or ch == 'x' or ch == 'y':
return 0
return 1
def ends(self, s):
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
return 0
if length > (self.k - self.k0 + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1
def setto(self, s):
"""setto(s) sets (j+1),...k to the characters in the string s, readjusting k."""
length = len(s)
self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:]
self.k = self.j + length
def r(self, s):
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
self.setto("i")
elif self.b[self.k - 1] != 's':
self.k = self.k - 1
if self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"): self.setto("ate")
elif self.ends("bl"): self.setto("ble")
elif self.ends("iz"): self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch == 'l' or ch == 's' or ch == 'z':
self.k = self.k + 1
elif (self.m() == 1 and self.cvc(self.k)):
self.setto("e")
def step1c(self):
"""step1c() turns terminal y to i when there is another vowel in the stem."""
if (self.ends("y") and self.vowelinstem()):
self.b = self.b[:self.k] + 'i' + self.b[self.k+1:]
def step2(self):
"""step2() maps double suffices to single ones.
so -ization ( = -ize plus -ation) maps to -ize etc. note that the
string before the suffix must give m() > 0.
"""
if self.b[self.k - 1] == 'a':
if self.ends("ational"): self.r("ate")
elif self.ends("tional"): self.r("tion")
elif self.b[self.k - 1] == 'c':
if self.ends("enci"): self.r("ence")
elif self.ends("anci"): self.r("ance")
elif self.b[self.k - 1] == 'e':
if self.ends("izer"): self.r("ize")
elif self.b[self.k - 1] == 'l':
if self.ends("bli"): self.r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self.ends("abli"): self.r("able")
elif self.ends("alli"): self.r("al")
elif self.ends("entli"): self.r("ent")
elif self.ends("eli"): self.r("e")
elif self.ends("ousli"): self.r("ous")
elif self.b[self.k - 1] == 'o':
if self.ends("ization"): self.r("ize")
elif self.ends("ation"): self.r("ate")
elif self.ends("ator"): self.r("ate")
elif self.b[self.k - 1] == 's':
if self.ends("alism"): self.r("al")
elif self.ends("iveness"): self.r("ive")
elif self.ends("fulness"): self.r("ful")
elif self.ends("ousness"): self.r("ous")
elif self.b[self.k - 1] == 't':
if self.ends("aliti"): self.r("al")
elif self.ends("iviti"): self.r("ive")
elif self.ends("biliti"): self.r("ble")
elif self.b[self.k - 1] == 'g': # --DEPARTURE--
if self.ends("logi"): self.r("log")
# To match the published algorithm, delete this phrase
def step3(self):
"""step3() dels with -ic-, -full, -ness etc. similar strategy to step2."""
if self.b[self.k] == 'e':
if self.ends("icate"): self.r("ic")
elif self.ends("ative"): self.r("")
elif self.ends("alize"): self.r("al")
elif self.b[self.k] == 'i':
if self.ends("iciti"): self.r("ic")
elif self.b[self.k] == 'l':
if self.ends("ical"): self.r("ic")
elif self.ends("ful"): self.r("")
elif self.b[self.k] == 's':
if self.ends("ness"): self.r("")
def step4(self):
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
if self.b[self.k - 1] == 'a':
if self.ends("al"): pass
else: return
elif self.b[self.k - 1] == 'c':
if self.ends("ance"): pass
elif self.ends("ence"): pass
else: return
elif self.b[self.k - 1] == 'e':
if self.ends("er"): pass
else: return
elif self.b[self.k - 1] == 'i':
if self.ends("ic"): pass
else: return
elif self.b[self.k - 1] == 'l':
if self.ends("able"): pass
elif self.ends("ible"): pass
else: return
elif self.b[self.k - 1] == 'n':
if self.ends("ant"): pass
elif self.ends("ement"): pass
elif self.ends("ment"): pass
elif self.ends("ent"): pass
else: return
elif self.b[self.k - 1] == 'o':
if self.ends("ion") and (self.b[self.j] == 's' or self.b[self.j] == 't'): pass
elif self.ends("ou"): pass
# takes care of -ous
else: return
elif self.b[self.k - 1] == 's':
if self.ends("ism"): pass
else: return
elif self.b[self.k - 1] == 't':
if self.ends("ate"): pass
elif self.ends("iti"): pass
else: return
elif self.b[self.k - 1] == 'u':
if self.ends("ous"): pass
else: return
elif self.b[self.k - 1] == 'v':
if self.ends("ive"): pass
else: return
elif self.b[self.k - 1] == 'z':
if self.ends("ize"): pass
else: return
else:
return
if self.m() > 1:
self.k = self.j
def step5(self):
"""step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
"""
self.j = self.k
if self.b[self.k] == 'e':
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k-1)):
self.k = self.k - 1
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k -1
def stem(self, p, i, j):
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The
stemmer adjusts the characters p[i] ... p[j] and returns the new
end-point of the string, k. Stemming never increases word length, so
i <= k <= j. To turn the stemmer into a module, declare 'stem' as
extern, and delete the remainder of this file.
"""
# copy the parameters into statics
self.b = p
self.k = j
self.k0 = i
if self.k <= self.k0 + 1:
return self.b # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.step1ab()
self.step1c()
self.step2()
self.step3()
self.step4()
self.step5()
return self.b[self.k0:self.k+1]
_stemmers[get_ident()] = PorterStemmer()
def is_stemmer_available_for_language(lang):
"""Return true if stemmer for language LANG is available.
Return false otherwise.
"""
return lang == 'en'
def stem(word, lang):
"""Return WORD stemmed according to language LANG (e.g. 'en')."""
if lang == 'en' and _stemmers and _stemmers.has_key(get_ident()):
#make sure _stemmers[get_ident()] is avail..
return _stemmers[get_ident()].stem(word, 0, len(word)-1)
elif lang == 'el':
#TODO: first we have to capitalize the word
# and remove accents from the vowels
return greek_stemmer().stem_word(word)
else:
return word
def stemWords(words, lang):
"""Return WORDS stemmed according to language LANG (e.g. 'en')."""
if lang == 'en' and _stemmers and _stemmers.has_key(get_ident()):
#make sure _stemmers[get_ident()] is avail..
return [_stemmers[get_ident()].stem(word, 0, len(word)-1) for word in words]
else:
return words
def get_stemming_language_map():
"""Return a diction of code language, language name for all the available
languages."""
return {'english' : 'en'}
if __name__ == '__main__':
# when invoked via CLI, simply stem the arguments:
import sys
if len(sys.argv) > 1:
for word in sys.argv[1:]:
print stem(word)
| gpl-2.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_examples/api/histogram_path_demo.py | 6 | 1464 | """
This example shows how to use a path patch to draw a bunch of
rectangles. The technique of using lots of Rectangle instances, or
the faster method of using PolyCollections, were implemented before we
had proper paths with moveto/lineto, closepoly etc in mpl. Now that
we have them, we can draw collections of regularly shaped objects with
homogeous properties more efficiently with a PathCollection. This
example makes a histogram -- its more work to set up the vertex arrays
at the outset, but it should be much faster for large numbers of
objects
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
fig = plt.figure()
ax = fig.add_subplot(111)
# histogram our data with numpy
data = np.random.randn(1000)
n, bins = np.histogram(data, 50)
# get the corners of the rectangles for the histogram
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
# we need a (numrects x numsides x 2) numpy array for the path helper
# function to build a compound path
XY = np.array([[left,left,right,right], [bottom,top,top,bottom]]).T
# get the Path object
barpath = path.Path.make_compound_path_from_polys(XY)
# make a patch out of it
patch = patches.PathPatch(barpath, facecolor='blue', edgecolor='gray', alpha=0.8)
ax.add_patch(patch)
# update the view limits
ax.set_xlim(left[0], right[-1])
ax.set_ylim(bottom.min(), top.max())
plt.show()
| mit |
gurneyalex/odoo | addons/account/models/digest.py | 6 | 1557 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models, _
from odoo.exceptions import AccessError
class Digest(models.Model):
_inherit = 'digest.digest'
kpi_account_total_revenue = fields.Boolean('Revenue')
kpi_account_total_revenue_value = fields.Monetary(compute='_compute_kpi_account_total_revenue_value')
def _compute_kpi_account_total_revenue_value(self):
if not self.env.user.has_group('account.group_account_invoice'):
raise AccessError(_("Do not have access, skip this data for user's digest email"))
for record in self:
start, end, company = record._get_kpi_compute_parameters()
self._cr.execute('''
SELECT SUM(line.debit)
FROM account_move_line line
JOIN account_move move ON move.id = line.move_id
JOIN account_journal journal ON journal.id = move.journal_id
WHERE line.company_id = %s AND line.date >= %s AND line.date < %s
AND journal.type = 'sale'
''', [company.id, start, end])
query_res = self._cr.fetchone()
record.kpi_account_total_revenue_value = query_res and query_res[0] or 0.0
def compute_kpis_actions(self, company, user):
res = super(Digest, self).compute_kpis_actions(company, user)
res['kpi_account_total_revenue'] = 'account.action_move_out_invoice_type&menu_id=%s' % self.env.ref('account.menu_finance').id
return res
| agpl-3.0 |
miguelinux/vbox | src/VBox/ValidationKit/testboxscript/testboxscript.py | 1 | 3651 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: testboxscript.py $
"""
TestBox Script Wrapper.
This script aimes at respawning the Test Box Script when it terminates
abnormally or due to an UPGRADE request.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 108245 $"
import subprocess
import sys
import os
import time
## @name Test Box script exit statuses (see also RTEXITCODE)
# @remarks These will _never_ change
# @{
TBS_EXITCODE_FAILURE = 1 # RTEXITCODE_FAILURE
TBS_EXITCODE_SYNTAX = 2 # RTEXITCODE_SYNTAX
TBS_EXITCODE_NEED_UPGRADE = 9
## @}
class TestBoxScriptWrapper(object): # pylint: disable=R0903
"""
Wrapper class
"""
TESTBOX_SCRIPT_FILENAME = 'testboxscript_real.py'
def __init__(self):
"""
Init
"""
self.task = None
def __del__(self):
"""
Cleanup
"""
if self.task is not None:
print 'Wait for child task...'
self.task.terminate()
self.task.wait()
print 'done. Exiting'
self.task = None;
def run(self):
"""
Start spawning the real TestBox script.
"""
# Figure out where we live first.
try:
__file__
except:
__file__ = sys.argv[0];
sTestBoxScriptDir = os.path.dirname(os.path.abspath(__file__));
# Construct the argument list for the real script (same dir).
sRealScript = os.path.join(sTestBoxScriptDir, TestBoxScriptWrapper.TESTBOX_SCRIPT_FILENAME);
asArgs = sys.argv[1:];
asArgs.insert(0, sRealScript);
if sys.executable is not None and len(sys.executable) > 0:
asArgs.insert(0, sys.executable);
# Look for --pidfile <name> and write a pid file.
sPidFile = None;
for i, _ in enumerate(asArgs):
if asArgs[i] == '--pidfile' and i + 1 < len(asArgs):
sPidFile = asArgs[i + 1];
break;
if asArgs[i] == '--':
break;
if sPidFile is not None and len(sPidFile) > 0:
oPidFile = open(sPidFile, 'w');
oPidFile.write(str(os.getpid()));
oPidFile.close();
# Execute the testbox script almost forever in a relaxed loop.
rcExit = TBS_EXITCODE_FAILURE;
while True:
self.task = subprocess.Popen(asArgs, shell=False);
rcExit = self.task.wait();
self.task = None;
if rcExit == TBS_EXITCODE_SYNTAX:
break;
# Relax.
time.sleep(1);
return rcExit;
if __name__ == '__main__':
sys.exit(TestBoxScriptWrapper().run());
| gpl-2.0 |
yangrongwei/gyp | test/generator-output/gyptest-rules.py | 198 | 1768 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies --generator-output= behavior when using rules.
"""
import TestGyp
# Android doesn't support --generator-output.
test = TestGyp.TestGyp(formats=['!android'])
test.writable(test.workpath('rules'), False)
test.run_gyp('rules.gyp',
'--generator-output=' + test.workpath('gypfiles'),
chdir='rules')
test.writable(test.workpath('rules'), True)
test.relocate('rules', 'relocate/rules')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/rules'), False)
test.writable(test.workpath('relocate/rules/build'), True)
test.writable(test.workpath('relocate/rules/subdir1/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/rules-out'), True)
test.build('rules.gyp', test.ALL, chdir='relocate/gypfiles')
expect = """\
Hello from program.c
Hello from function1.in1
Hello from function2.in1
Hello from define3.in0
Hello from define4.in0
"""
if test.format == 'xcode':
chdir = 'relocate/rules/subdir1'
else:
chdir = 'relocate/gypfiles'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/rules/subdir2/rules-out/file1.out',
"Hello from file1.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file2.out',
"Hello from file2.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file3.out',
"Hello from file3.in1\n")
test.must_match('relocate/rules/subdir2/rules-out/file4.out',
"Hello from file4.in1\n")
test.pass_test()
| bsd-3-clause |
aselle/tensorflow | tensorflow/python/debug/lib/debug_gradients.py | 16 | 15445 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Debugger: Tools for debugging gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import uuid
import six
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import variables
_GRADIENT_DEBUG_TAG = "gradient_debug_"
_gradient_debuggers = {}
def _tensor_to_grad_debug_op_name(tensor, grad_debugger_uuid):
op_name, slot = debug_graphs.parse_node_or_tensor_name(tensor.name)
return "%s_%d/%s%s" % (op_name, slot, _GRADIENT_DEBUG_TAG, grad_debugger_uuid)
def _parse_grad_debug_op_name(op_name):
"""Parse the name of a debug gradient op.
Args:
op_name: the name of the debug gradient op.
Returns:
1) The UUID of the GradientsDebugger that created the debug gradient op.
2) Name of the original tensor whose gradient is debugged by the debug
gradient op.
"""
name_items = op_name.split("/")
assert len(name_items) > 1
assert name_items[-1].startswith(_GRADIENT_DEBUG_TAG)
grad_debugger_uuid = name_items[-1][len(_GRADIENT_DEBUG_TAG):]
if "_" in grad_debugger_uuid:
grad_debugger_uuid = grad_debugger_uuid[:grad_debugger_uuid.index("_")]
orig_tensor_slot = int(name_items[-2][name_items[-2].rfind("_") + 1:])
orig_base_op_name = name_items[-2][:name_items[-2].rfind("_")]
orig_tensor_name = ("/".join(name_items[:-2] + [orig_base_op_name]) +
":%d" % orig_tensor_slot)
return grad_debugger_uuid, orig_tensor_name
class GradientsDebugger(object):
"""Gradients Debugger.
Allows retrieval of gradient tensors created by TensorFlow's automatic
differentiation algorithm, i.e., @{tf.gradients} and optimizer classes that
use it.
"""
# TODO(cais): Add examples code in the doc string?
def __init__(self, y_tensor=None):
"""Constructor of GradientsDebugger.
Args:
y_tensor: optional: the `tf.Tensor` to be differentiated, i.e., the tensor
on the numerator of the differentiation.
"""
self._uuid = uuid.uuid4().hex
_gradient_debuggers[self._uuid] = self
# A dict mapping x-tensor names to gradient tensor. x-tensor refers to the
# independent tf.Tensor, i.e., the tensor on the denominator of the
# differentiation.
self._gradient_tensors = {}
self._y_tensor = y_tensor
self._graph = None
if y_tensor:
self._graph = y_tensor.graph
self._is_active_context = False
@property
def y_tensor(self):
return self._y_tensor
@property
def graph(self):
return self._graph
def __enter__(self):
self._is_active_context = True
def __exit__(self, unused_type, unused_value, unused_traceback):
self._is_active_context = False
def identify_gradient(self, input_tensor):
"""Create a debug identity tensor that registers and forwards gradients.
The side effect of this method is that when gradient tensor(s) are created
with respect to the any paths that include the `input_tensor`, the gradient
tensor(s) with repsect to `input_tensor` will be registered with this
this `GradientsDebugger` instance and can later be retrieved, with the
methods `gradient_tensor` and `gradient_tensors`.
Example:
```python
x = tf.Variable(1.0)
y = tf.add(x, x)
grad_debugger = tf_debug.GradientsDebugger()
debug_y = grad_debugger.identify_gradient(y)
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
with grad_debugger:
train_op = tf.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to y.
y_grad = grad_debugger.gradient_tensor(y)
```
Args:
input_tensor: the input `tf.Tensor` object whose related gradient tensors
are to be reigstered with this `GradientsDebugger` instance when they
are created, e.g., during @{tf.gradients} calls or the construction
of optimization (training) op that uses @{tf.gradients}.
Returns:
A forwarded identity of `input_tensor`, as a `tf.Tensor`.
Raises:
ValueError: If an op with name that duplicates the gradient-debugging op
already exists in the graph (highly unlikely).
"""
# TODO(cais): Allow overriding gradient.
# TODO(cais): Implement value_stack.
grad_debug_op_name = _tensor_to_grad_debug_op_name(input_tensor, self._uuid)
# pylint: disable=protected-access
identity_op = (
gen_array_ops.debug_gradient_ref_identity
if input_tensor.dtype._is_ref_dtype else
gen_array_ops.debug_gradient_identity)
# pylint: enable=protected-access
debug_grad_identity = identity_op(input_tensor, name=grad_debug_op_name)
assert debug_grad_identity.dtype == input_tensor.dtype
if debug_grad_identity.op.name != grad_debug_op_name:
raise ValueError(
"The graph already contains an op named %s" % grad_debug_op_name)
return debug_grad_identity
def watch_gradients_by_tensors(self, graph, tensors):
"""Watch gradient tensors by x-tensor(s).
The side effect of this method is that when gradient tensor(s) are created
with respect to the any paths that include the `x_tensor`s, the gradient
tensor(s) with repsect to the tensor will be registered with this
this `GradientsDebugger` instance and can later be retrieved, with the
methods `gradient_tensor` and `gradient_tensors`.
Unlike the method `identify_gradient`, this method is used to retrieve
gradient tensors after the construction of the forward subgraph has
completed (but before the construction of the backward subgraph).
This method is the same as `watch_gradients_by_x_tensor_names` except that
the tensors are specified by the Python `tf.Tensor` or `tf.Variable`
objects, instead by name patterns.
Example:
```python
x = tf.Variable(1.0)
y = tf.add(x, x, name="y")
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
grad_debugger = tf_debug.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(y):
train_op = tf.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to y.
y_grad = grad_debugger.gradient_tensor(y)
# or
y_grad = grad_debugger.gradient_tensor("y:0")
```
Args:
graph: the `tf.Graph` to watch the gradients on.
tensors: a `tf.Tensor` or `tf.Variable` object, or a list of such objects.
Returns:
The GradientsDebugger instance itself.
"""
if not isinstance(tensors, list):
tensors = [tensors]
tensor_name_regex = []
for tensor in tensors:
tensor_name_regex.append(re.escape(tensor.name) + "$")
tensor_name_regex = "(" + "|".join(tensor_name_regex) + ")"
return self.watch_gradients_by_tensor_names(graph, tensor_name_regex)
def watch_gradients_by_tensor_names(self, graph, tensor_name_regex):
"""Watch gradient tensors by name(s) of the x-tensor(s).
The side effect of this method is that when gradient tensor(s) are created
with respect to the x-tensors, the gradient tensor(s) will be registered
with this `GradientsDebugger` instance and can later be retrieved.
Unlike the `identify_gradient` method, this method is used after the
construction of the forward graph has completed. Unlike the
`watch_gradients_by_tensor` method, this method does not use handles to the
tensors of interest; it uses their names.
This method is the same as `watch_gradients_by_tensors` except that the
x-tensors are specified by name patterns, instead of `tf.Tensor` or
`tf.Variable` objects.
Example:
```python
x = tf.Variable(1.0, name="x")
y = tf.add(x, x, name="y")
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
grad_debugger = tf_debug.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(r"(x|y):0$"):
train_op = tf.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to x and y.
x_grad = grad_debugger.gradient_tensor("x:0")
y_grad = grad_debugger.gradient_tensor("y:0")
```
Args:
graph: the `tf.Graph` to watch the gradients on.
tensor_name_regex: the regular-expression pattern of the name(s) of the
x-tensor(s) to watch. x-tensor refers to the tensors on the denominator
of the differentiation.
Returns:
The GradientsDebugger instance itself.
"""
tensor_name_pattern = re.compile(tensor_name_regex)
with graph.as_default():
for op in graph.get_operations():
for output in op.outputs:
if tensor_name_pattern.match(output.name):
debug_op = self.identify_gradient(output)
# Make a copy of output.consumers() since we'll modify the consumers
# TODO(skyewm): this is unnecessary once the C API is enabled
for consumer in list(output.consumers()):
if consumer == debug_op.op:
continue
# Locate the slot index of the original input.
for i, consumer_input in enumerate(consumer.inputs):
if consumer_input == output:
consumer._update_input(i, debug_op) # pylint: disable=protected-access
return self
def _check_same_graph(self, tensor):
if self._graph is None:
self._graph = tensor.graph
elif self._graph != tensor.graph:
raise ValueError(
"The graph of the value (%s) is not the same as the graph %s" %
(tensor.graph, self._graph))
def register_gradient_tensor(self,
x_tensor_name,
gradient_tensor):
"""Register the gradient tensor for an x-tensor.
Args:
x_tensor_name: (`str`) the name of the independent `tf.Tensor`, i.e.,
the tensor on the denominator of the differentiation.
gradient_tensor: the gradient `tf.Tensor`.
"""
if len(_gradient_debuggers) == 1 or self._is_active_context:
self._check_same_graph(gradient_tensor)
self._gradient_tensors[x_tensor_name] = gradient_tensor
def gradient_tensor(self, x_tensor):
"""Get the gradient tensor of an x-tensor.
Args:
x_tensor: (`tf.Tensor`, `tf.Variable` or `str`) The x-tensor object or its
name. x-tensor refers to the independent `tf.Tensor`, i.e., the tensor
on the denominator of the differentiation.
Returns:
If found, the gradient tensor.
Raises:
TypeError: If `x_tensor` is not a `tf.Tensor`, `tf.Variable` or `str`.
LookupError: If the `x_tensor` has not been registered with a gradient
tensor.
"""
x_tensor_name = self._get_tensor_name(x_tensor)
if x_tensor_name not in self._gradient_tensors:
raise LookupError(
"This GradientsDebugger has not received any gradient tensor for "
"x-tensor %s" % x_tensor_name)
return self._gradient_tensors[x_tensor_name]
def gradient_tensors(self):
"""Get the gradient tensors that this object is aware of.
Returns:
A dict mapping x-tensor names to gradient tensor objects. x-tensor refers
to the tensors on the denominator of the differentation.
"""
return self._gradient_tensors
def _get_tensor_name(self, tensor):
if isinstance(tensor, (ops.Tensor, variables.Variable)):
return tensor.name
elif isinstance(tensor, six.string_types):
return tensor
else:
raise TypeError(
"x_tensor must be a str or tf.Tensor or tf.Variable, "
"but instead has type %s" % type(tensor))
def clear_gradient_debuggers():
"""Clear all globally registered gradient debuggers."""
_gradient_debuggers.clear()
@ops.RegisterGradient("DebugGradientIdentity")
def _identify_gradient_grad(op, dy):
"""Gradient function for the DebugIdentity op."""
# TODO(cais): Allow overriding gradient.
grad_debugger_uuid, orig_tensor_name = _parse_grad_debug_op_name(op.name)
grad_debugger = _gradient_debuggers[grad_debugger_uuid]
grad_debugger.register_gradient_tensor(orig_tensor_name, dy)
return dy
@ops.RegisterGradient("DebugGradientRefIdentity")
def _identify_gradient_grad_ref(op, dy):
"""Gradient function for the DebugIdentity op."""
return _identify_gradient_grad(op, dy)
def gradient_values_from_dump(grad_debugger, x_tensor, dump):
"""Find gradient values from a `DebugDumpDir` object.
Args:
grad_debugger: the `tf_debug.GradientsDebugger` instance to be used.
x_tensor: (`tf.Tensor`, `tf.Variable` or `str`) The x-tensor object or its
name. x-tensor refers to the independent `tf.Tensor`, i.e., the tensor
on the denominator of the differentiation.
dump: A `tfdbg.DebugDumpDir` object.
Returns:
If this `GradientsDebugger` instance has the gradient tensor of `x_tensor`
registered: a list of `numpy.ndarray` representing the value of the
gradient tensor from `dump`. The list could be empty, if the gradient
tensor is not executed in the `tf.Session.run()` call that generated
the `dump`. The list could also contain multiple values of the gradient
tensor, e.g., if gradient tensor is computed repeatedly in a
`tf.while_loop` during the run that generated the `dump`.
Raises:
LookupError: If this `GradientsDebugger` instance does not have the
gradient tensor of `x_tensor` registered.
ValueError: If this `GradientsDebugger` has a `tf.Graph` object that
does not match the `tf.Graph` object of the `dump`.
TypeError: If `x_tensor` is not a `tf.Tensor`, `tf.Variable` or `str`.
"""
# TODO(cais): Use this method in LocalCLIDebugWrapperSession to present the
# gradient tensors to the TFDBG CLI.
# If possible, verify that the Python graph of the dump and that of this
# GradientsDebugger match.
if (dump.python_graph and grad_debugger.graph and
dump.python_graph != grad_debugger.graph):
raise ValueError(
"This GradientsDebugger instance has a graph (%s) that differs from "
"the graph of the DebugDumpDir object (%s)." %
(grad_debugger.graph, dump.python_graph))
gradient_tensor = grad_debugger.gradient_tensor(x_tensor)
node_name, output_slot = debug_graphs.parse_node_or_tensor_name(
gradient_tensor.name)
try:
return dump.get_tensors(node_name, output_slot, "DebugIdentity")
except debug_data.WatchKeyDoesNotExistInDebugDumpDirError:
return []
| apache-2.0 |
mbaijal/incubator-mxnet | example/caffe/train_model.py | 21 | 4207 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import logging
import os
def fit(args, network, data_loader, eval_metrics=None, batch_end_callback=None):
# kvstore
kv = mx.kvstore.create(args.kv_store)
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
if 'log_file' in args and args.log_file is not None:
log_file = args.log_file
log_dir = args.log_dir
log_file_full_name = os.path.join(log_dir, log_file)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
logger = logging.getLogger()
handler = logging.FileHandler(log_file_full_name)
formatter = logging.Formatter(head)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
logger.info('start with arguments %s', args)
else:
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
# load model
model_prefix = args.model_prefix
if model_prefix is not None:
model_prefix += "-%d" % (kv.rank)
model_args = {}
if args.load_epoch is not None:
assert model_prefix is not None
tmp = mx.model.FeedForward.load(model_prefix, args.load_epoch)
model_args = {'arg_params' : tmp.arg_params,
'aux_params' : tmp.aux_params,
'begin_epoch' : args.load_epoch}
# save model
save_model_prefix = args.save_model_prefix
if save_model_prefix is None:
save_model_prefix = model_prefix
checkpoint = None if save_model_prefix is None else mx.callback.do_checkpoint(save_model_prefix)
# data
(train, val) = data_loader(args, kv)
# train
devs = mx.cpu() if args.gpus is None else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
epoch_size = args.num_examples / args.batch_size
if args.kv_store == 'dist_sync':
epoch_size /= kv.num_workers
model_args['epoch_size'] = epoch_size
if 'lr_factor' in args and args.lr_factor < 1:
model_args['lr_scheduler'] = mx.lr_scheduler.FactorScheduler(
step = max(int(epoch_size * args.lr_factor_epoch), 1),
factor = args.lr_factor)
if 'clip_gradient' in args and args.clip_gradient is not None:
model_args['clip_gradient'] = args.clip_gradient
# disable kvstore for single device
if 'local' in kv.type and (
args.gpus is None or len(args.gpus.split(',')) is 1):
kv = None
mod = mx.mod.Module(network, context=devs)
if eval_metrics is None:
eval_metrics = ['accuracy']
## TopKAccuracy only allows top_k > 1
for top_k in [5, 10, 20]:
eval_metrics.append(mx.metric.create('top_k_accuracy', top_k=top_k))
if batch_end_callback is not None:
if not isinstance(batch_end_callback, list):
batch_end_callback = [batch_end_callback]
else:
batch_end_callback = []
batch_end_callback.append(mx.callback.Speedometer(args.batch_size, 50))
mod.fit(train_data=train, eval_metric=eval_metrics, eval_data=val, optimizer='sgd',
optimizer_params={'learning_rate':args.lr, 'momentum': 0.9, 'wd': 0.00001},
num_epoch=args.num_epochs, batch_end_callback=batch_end_callback,
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34),
kvstore=kv, epoch_end_callback=checkpoint, **model_args)
| apache-2.0 |
phil0522/anote | anote-web/anoteweb/data/anote_pb2.py | 1 | 9335 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: anote.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = _descriptor.FileDescriptor(
name='anote.proto',
package='anote.proto',
serialized_pb='\n\x0b\x61note.proto\x12\x0b\x61note.proto\"\xfc\x01\n\x04Task\x12\x0f\n\x07task_id\x18\x01 \x01(\x05\x12\r\n\x05title\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\x0b\n\x03tag\x18\x04 \x03(\t\x12\x0f\n\x07project\x18\x05 \x01(\t\x12\x11\n\tparent_id\x18\x06 \x01(\x05\x12\x10\n\x08\x61ncestor\x18\x07 \x03(\x05\x12\x13\n\x0b\x64\x65scription\x18\x08 \x01(\t\x12\x0f\n\x07note_id\x18\t \x03(\x05\x12\x1f\n\x04note\x18\n \x03(\x0b\x32\x11.anote.proto.Note\x12\x13\n\x0b\x63reate_time\x18\x0b \x01(\x05\x12\x13\n\x0bupdate_time\x18\x0c \x01(\x05\x12\x10\n\x08position\x18\r \x01(\x05\"6\n\x04Note\x12\x0f\n\x07task_id\x18\x01 \x01(\x05\x12\x0f\n\x07note_id\x18\x02 \x01(\x05\x12\x0c\n\x04text\x18\x03 \x01(\t\"6\n\x03Tag\x12\r\n\x05title\x18\x01 \x01(\t\x12\x12\n\noccurrence\x18\x02 \x01(\x05\x12\x0c\n\x04hide\x18\x03 \x01(\x08')
_TASK = _descriptor.Descriptor(
name='Task',
full_name='anote.proto.Task',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='task_id', full_name='anote.proto.Task.task_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='title', full_name='anote.proto.Task.title', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='status', full_name='anote.proto.Task.status', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tag', full_name='anote.proto.Task.tag', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='project', full_name='anote.proto.Task.project', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parent_id', full_name='anote.proto.Task.parent_id', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ancestor', full_name='anote.proto.Task.ancestor', index=6,
number=7, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='anote.proto.Task.description', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='note_id', full_name='anote.proto.Task.note_id', index=8,
number=9, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='note', full_name='anote.proto.Task.note', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='create_time', full_name='anote.proto.Task.create_time', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='update_time', full_name='anote.proto.Task.update_time', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='position', full_name='anote.proto.Task.position', index=12,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=29,
serialized_end=281,
)
_NOTE = _descriptor.Descriptor(
name='Note',
full_name='anote.proto.Note',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='task_id', full_name='anote.proto.Note.task_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='note_id', full_name='anote.proto.Note.note_id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='text', full_name='anote.proto.Note.text', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=283,
serialized_end=337,
)
_TAG = _descriptor.Descriptor(
name='Tag',
full_name='anote.proto.Tag',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='title', full_name='anote.proto.Tag.title', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='occurrence', full_name='anote.proto.Tag.occurrence', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hide', full_name='anote.proto.Tag.hide', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=339,
serialized_end=393,
)
_TASK.fields_by_name['note'].message_type = _NOTE
DESCRIPTOR.message_types_by_name['Task'] = _TASK
DESCRIPTOR.message_types_by_name['Note'] = _NOTE
DESCRIPTOR.message_types_by_name['Tag'] = _TAG
class Task(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TASK
# @@protoc_insertion_point(class_scope:anote.proto.Task)
class Note(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _NOTE
# @@protoc_insertion_point(class_scope:anote.proto.Note)
class Tag(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TAG
# @@protoc_insertion_point(class_scope:anote.proto.Tag)
# @@protoc_insertion_point(module_scope)
| mit |
sclabs/sitestatus-nonrel | django/utils/unittest/__init__.py | 571 | 3069 | """
unittest2
unittest2 is a backport of the new features added to the unittest testing
framework in Python 2.7. It is tested to run on Python 2.4 - 2.6.
To use unittest2 instead of unittest simply replace ``import unittest`` with
``import unittest2``.
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
import sys
# Django hackery to load the appropriate version of unittest
try:
# check the system path first
from unittest2 import *
except ImportError:
if sys.version_info >= (2,7):
# unittest2 features are native in Python 2.7
from unittest import *
else:
# otherwise use our bundled version
__all__ = ['TestResult', 'TestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailure', 'TextTestResult', '__version__', 'collector']
__version__ = '0.5.1'
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
from django.utils.unittest.collector import collector
from django.utils.unittest.result import TestResult
from django.utils.unittest.case import \
TestCase, FunctionTestCase, SkipTest, skip, skipIf,\
skipUnless, expectedFailure
from django.utils.unittest.suite import BaseTestSuite, TestSuite
from django.utils.unittest.loader import \
TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,\
findTestCases
from django.utils.unittest.main import TestProgram, main, main_
from django.utils.unittest.runner import TextTestRunner, TextTestResult
try:
from django.utils.unittest.signals import\
installHandler, registerResult, removeResult, removeHandler
except ImportError:
# Compatibility with platforms that don't have the signal module
pass
else:
__all__.extend(['installHandler', 'registerResult', 'removeResult',
'removeHandler'])
# deprecated
_TextTestResult = TextTestResult
__unittest = True
| bsd-3-clause |
kirca/odoo | addons/hr_timesheet_invoice/report/account_analytic_profit.py | 96 | 5733 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report import report_sxw
from openerp.osv import osv
class account_analytic_profit(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_analytic_profit, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'lines': self._lines,
'user_ids': self._user_ids,
'journal_ids': self._journal_ids,
'line': self._line,
})
def _user_ids(self, lines):
user_obj = self.pool['res.users']
ids=list(set([b.user_id.id for b in lines]))
return user_obj.browse(self.cr, self.uid, ids)
def _journal_ids(self, form, user_id):
line_obj = self.pool['account.analytic.line']
journal_obj = self.pool['account.analytic.journal']
line_ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', form['journal_ids'][0][2]),
('user_id', '=', user_id),
])
ids=list(set([b.journal_id.id for b in line_obj.browse(self.cr, self.uid, line_ids)]))
return journal_obj.browse(self.cr, self.uid, ids)
def _line(self, form, journal_ids, user_ids):
line_obj = self.pool['account.analytic.line']
product_obj = self.pool['product.product']
price_obj = self.pool['product.pricelist']
ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', journal_ids),
('user_id', 'in', user_ids),
])
res={}
for line in line_obj.browse(self.cr, self.uid, ids):
if line.account_id.pricelist_id:
if line.account_id.to_invoice:
if line.to_invoice:
id=line.to_invoice.id
name=line.to_invoice.name
discount=line.to_invoice.factor
else:
name="/"
discount=1.0
id = -1
else:
name="Fixed"
discount=0.0
id=0
pl=line.account_id.pricelist_id.id
price=price_obj.price_get(self.cr, self.uid, [pl], line.product_id.id, line.unit_amount or 1.0, line.account_id.partner_id.id)[pl]
else:
name="/"
discount=1.0
id = -1
price=0.0
if id not in res:
res[id]={'name': name, 'amount': 0, 'cost':0, 'unit_amount':0,'amount_th':0}
xxx = round(price * line.unit_amount * (1-(discount or 0.0)), 2)
res[id]['amount_th']+=xxx
if line.invoice_id:
self.cr.execute('select id from account_analytic_line where invoice_id=%s', (line.invoice_id.id,))
tot = 0
for lid in self.cr.fetchall():
lid2 = line_obj.browse(self.cr, self.uid, lid[0])
pl=lid2.account_id.pricelist_id.id
price=price_obj.price_get(self.cr, self.uid, [pl], lid2.product_id.id, lid2.unit_amount or 1.0, lid2.account_id.partner_id.id)[pl]
tot += price * lid2.unit_amount * (1-(discount or 0.0))
if tot:
procent = line.invoice_id.amount_untaxed / tot
res[id]['amount'] += xxx * procent
else:
res[id]['amount'] += xxx
else:
res[id]['amount'] += xxx
res[id]['cost']+=line.amount
res[id]['unit_amount']+=line.unit_amount
for id in res:
res[id]['profit']=res[id]['amount']+res[id]['cost']
res[id]['eff']=res[id]['cost'] and '%d' % (-res[id]['amount'] / res[id]['cost'] * 100,) or 0.0
return res.values()
def _lines(self, form):
line_obj = self.pool['account.analytic.line']
ids=line_obj.search(self.cr, self.uid, [
('date', '>=', form['date_from']),
('date', '<=', form['date_to']),
('journal_id', 'in', form['journal_ids'][0][2]),
('user_id', 'in', form['employee_ids'][0][2]),
])
return line_obj.browse(self.cr, self.uid, ids)
class report_account_analytic_profit(osv.AbstractModel):
_name = 'report.hr_timesheet_invoice.report_analyticprofit'
_inherit = 'report.abstract_report'
_template = 'hr_timesheet_invoice.report_analyticprofit'
_wrapped_report_class = account_analytic_profit
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yongshengwang/hue | build/env/lib/python2.7/site-packages/boto-2.38.0-py2.7.egg/boto/opsworks/layer1.py | 132 | 129869 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.opsworks import exceptions
class OpsWorksConnection(AWSQueryConnection):
"""
AWS OpsWorks
Welcome to the AWS OpsWorks API Reference . This guide provides
descriptions, syntax, and usage examples about AWS OpsWorks
actions and data types, including common parameters and error
codes.
AWS OpsWorks is an application management service that provides an
integrated experience for overseeing the complete application
lifecycle. For information about this product, go to the `AWS
OpsWorks`_ details page.
**SDKs and CLI**
The most common way to use the AWS OpsWorks API is by using the
AWS Command Line Interface (CLI) or by using one of the AWS SDKs
to implement applications in your preferred language. For more
information, see:
+ `AWS CLI`_
+ `AWS SDK for Java`_
+ `AWS SDK for .NET`_
+ `AWS SDK for PHP 2`_
+ `AWS SDK for Ruby`_
+ `AWS SDK for Node.js`_
+ `AWS SDK for Python(Boto)`_
**Endpoints**
AWS OpsWorks supports only one endpoint, opsworks.us-
east-1.amazonaws.com (HTTPS), so you must connect to that
endpoint. You can then use the API to direct AWS OpsWorks to
create stacks in any AWS Region.
**Chef Versions**
When you call CreateStack, CloneStack, or UpdateStack we recommend
you use the `ConfigurationManager` parameter to specify the Chef
version, 0.9, 11.4, or 11.10. The default value is currently
11.10. For more information, see `Chef Versions`_.
You can still specify Chef 0.9 for your stack, but new features
are not available for Chef 0.9 stacks, and support is scheduled to
end on July 24, 2014. We do not recommend using Chef 0.9 for new
stacks, and we recommend migrating your existing Chef 0.9 stacks
to Chef 11.10 as soon as possible.
"""
APIVersion = "2013-02-18"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "opsworks.us-east-1.amazonaws.com"
ServiceName = "OpsWorks"
TargetPrefix = "OpsWorks_20130218"
ResponseError = JSONResponseError
_faults = {
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"ValidationException": exceptions.ValidationException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(OpsWorksConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def assign_instance(self, instance_id, layer_ids):
"""
Assign a registered instance to a custom layer. You cannot use
this action with instances that were created with AWS
OpsWorks.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type layer_ids: list
:param layer_ids: The layer ID, which must correspond to a custom
layer. You cannot assign a registered instance to a built-in layer.
"""
params = {
'InstanceId': instance_id,
'LayerIds': layer_ids,
}
return self.make_request(action='AssignInstance',
body=json.dumps(params))
def assign_volume(self, volume_id, instance_id=None):
"""
Assigns one of the stack's registered Amazon EBS volumes to a
specified instance. The volume must first be registered with
the stack by calling RegisterVolume. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'VolumeId': volume_id, }
if instance_id is not None:
params['InstanceId'] = instance_id
return self.make_request(action='AssignVolume',
body=json.dumps(params))
def associate_elastic_ip(self, elastic_ip, instance_id=None):
"""
Associates one of the stack's registered Elastic IP addresses
with a specified instance. The address must first be
registered with the stack by calling RegisterElasticIp. For
more information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'ElasticIp': elastic_ip, }
if instance_id is not None:
params['InstanceId'] = instance_id
return self.make_request(action='AssociateElasticIp',
body=json.dumps(params))
def attach_elastic_load_balancer(self, elastic_load_balancer_name,
layer_id):
"""
Attaches an Elastic Load Balancing load balancer to a
specified layer. For more information, see `Elastic Load
Balancing`_.
You must create the Elastic Load Balancing instance
separately, by using the Elastic Load Balancing console, API,
or CLI. For more information, see ` Elastic Load Balancing
Developer Guide`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_load_balancer_name: string
:param elastic_load_balancer_name: The Elastic Load Balancing
instance's name.
:type layer_id: string
:param layer_id: The ID of the layer that the Elastic Load Balancing
instance is to be attached to.
"""
params = {
'ElasticLoadBalancerName': elastic_load_balancer_name,
'LayerId': layer_id,
}
return self.make_request(action='AttachElasticLoadBalancer',
body=json.dumps(params))
def clone_stack(self, source_stack_id, service_role_arn, name=None,
region=None, vpc_id=None, attributes=None,
default_instance_profile_arn=None, default_os=None,
hostname_theme=None, default_availability_zone=None,
default_subnet_id=None, custom_json=None,
configuration_manager=None, chef_configuration=None,
use_custom_cookbooks=None,
use_opsworks_security_groups=None,
custom_cookbooks_source=None, default_ssh_key_name=None,
clone_permissions=None, clone_app_ids=None,
default_root_device_type=None):
"""
Creates a clone of a specified stack. For more information,
see `Clone a Stack`_.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type source_stack_id: string
:param source_stack_id: The source stack ID.
:type name: string
:param name: The cloned stack name.
:type region: string
:param region: The cloned stack AWS region, such as "us-east-1". For
more information about AWS regions, see `Regions and Endpoints`_.
:type vpc_id: string
:param vpc_id: The ID of the VPC that the cloned stack is to be
launched into. It must be in the specified region. All instances
are launched into this VPC, and you cannot change the ID later.
+ If your account supports EC2 Classic, the default value is no VPC.
+ If your account does not support EC2 Classic, the default value is
the default VPC for the specified region.
If the VPC ID corresponds to a default VPC and you have specified
either the `DefaultAvailabilityZone` or the `DefaultSubnetId`
parameter only, AWS OpsWorks infers the value of the other
parameter. If you specify neither parameter, AWS OpsWorks sets
these parameters to the first valid Availability Zone for the
specified region and the corresponding default VPC subnet ID,
respectively.
If you specify a nondefault VPC ID, note the following:
+ It must belong to a VPC in your account that is in the specified
region.
+ You must specify a value for `DefaultSubnetId`.
For more information on how to use AWS OpsWorks with a VPC, see
`Running a Stack in a VPC`_. For more information on default VPC
and EC2 Classic, see `Supported Platforms`_.
:type attributes: map
:param attributes: A list of stack attributes and values as key/value
pairs to be added to the cloned stack.
:type service_role_arn: string
:param service_role_arn:
The stack AWS Identity and Access Management (IAM) role, which allows
AWS OpsWorks to work with AWS resources on your behalf. You must
set this parameter to the Amazon Resource Name (ARN) for an
existing IAM role. If you create a stack by using the AWS OpsWorks
console, it creates the role for you. You can obtain an existing
stack's IAM ARN programmatically by calling DescribePermissions.
For more information about IAM ARNs, see `Using Identifiers`_.
You must set this parameter to a valid service role ARN or the action
will fail; there is no default value. You can specify the source
stack's service role ARN, if you prefer, but you must do so
explicitly.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stacks's operating system, which must be set to
one of the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`. You specify the custom AMI you want to use
when you create instances.
The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The cloned stack's default
Availability Zone, which must be in the specified region. For more
information, see `Regions and Endpoints`_. If you also specify a
value for `DefaultSubnetId`, the subnet must be in the same zone.
For more information, see the `VpcId` parameter description.
:type default_subnet_id: string
:param default_subnet_id: The stack's default VPC subnet ID. This
parameter is required if you specify a value for the `VpcId`
parameter. All instances are launched into this subnet unless you
specify otherwise when you create the instance. If you also specify
a value for `DefaultAvailabilityZone`, the subnet must be in that
zone. For information on default values and when this parameter is
required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you clone
a stack we recommend that you use the configuration manager to
specify the Chef version, 0.9, 11.4, or 11.10. The default value is
currently 11.4.
:type chef_configuration: dict
:param chef_configuration: A `ChefConfiguration` object that specifies
whether to enable Berkshelf and the Berkshelf version on Chef 11.10
stacks. For more information, see `Create a New Stack`_.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether to use custom cookbooks.
:type use_opsworks_security_groups: boolean
:param use_opsworks_security_groups: Whether to associate the AWS
OpsWorks built-in security groups with the stack's layers.
AWS OpsWorks provides a standard set of built-in security groups, one
for each layer, which are associated with layers by default. With
`UseOpsworksSecurityGroups` you can instead provide your own custom
security groups. `UseOpsworksSecurityGroups` has the following
settings:
+ True - AWS OpsWorks automatically associates the appropriate built-in
security group with each layer (default setting). You can associate
additional security groups with a layer after you create it but you
cannot delete the built-in security group.
+ False - AWS OpsWorks does not associate built-in security groups with
layers. You must create appropriate EC2 security groups and
associate a security group with each layer that you create.
However, you can still manually associate a built-in security group
with a layer on creation; custom security groups are required only
for those layers that need custom settings.
For more information, see `Create a New Stack`_.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type clone_permissions: boolean
:param clone_permissions: Whether to clone the source stack's
permissions.
:type clone_app_ids: list
:param clone_app_ids: A list of source stack app IDs to be included in
the cloned stack.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the cloned stack, but
you can override it when you create an instance. For more
information, see `Storage for the Root Device`_.
"""
params = {
'SourceStackId': source_stack_id,
'ServiceRoleArn': service_role_arn,
}
if name is not None:
params['Name'] = name
if region is not None:
params['Region'] = region
if vpc_id is not None:
params['VpcId'] = vpc_id
if attributes is not None:
params['Attributes'] = attributes
if default_instance_profile_arn is not None:
params['DefaultInstanceProfileArn'] = default_instance_profile_arn
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if chef_configuration is not None:
params['ChefConfiguration'] = chef_configuration
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if use_opsworks_security_groups is not None:
params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if clone_permissions is not None:
params['ClonePermissions'] = clone_permissions
if clone_app_ids is not None:
params['CloneAppIds'] = clone_app_ids
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
return self.make_request(action='CloneStack',
body=json.dumps(params))
def create_app(self, stack_id, name, type, shortname=None,
description=None, data_sources=None, app_source=None,
domains=None, enable_ssl=None, ssl_configuration=None,
attributes=None, environment=None):
"""
Creates an app for a specified stack. For more information,
see `Creating Apps`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type shortname: string
:param shortname: The app's short name.
:type name: string
:param name: The app name.
:type description: string
:param description: A description of the app.
:type data_sources: list
:param data_sources: The app's data source.
:type type: string
:param type: The app type. Each supported type is associated with a
particular layer. For example, PHP applications are associated with
a PHP layer. AWS OpsWorks deploys an application to those instances
that are members of the corresponding layer.
:type app_source: dict
:param app_source: A `Source` object that specifies the app repository.
:type domains: list
:param domains: The app virtual host settings, with multiple domains
separated by commas. For example: `'www.example.com, example.com'`
:type enable_ssl: boolean
:param enable_ssl: Whether to enable SSL for the app.
:type ssl_configuration: dict
:param ssl_configuration: An `SslConfiguration` object with the SSL
configuration.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type environment: list
:param environment:
An array of `EnvironmentVariable` objects that specify environment
variables to be associated with the app. You can specify up to ten
environment variables. After you deploy the app, these variables
are defined on the associated app server instance.
This parameter is supported only by Chef 11.10 stacks. If you have
specified one or more environment variables, you cannot modify the
stack's Chef version.
"""
params = {'StackId': stack_id, 'Name': name, 'Type': type, }
if shortname is not None:
params['Shortname'] = shortname
if description is not None:
params['Description'] = description
if data_sources is not None:
params['DataSources'] = data_sources
if app_source is not None:
params['AppSource'] = app_source
if domains is not None:
params['Domains'] = domains
if enable_ssl is not None:
params['EnableSsl'] = enable_ssl
if ssl_configuration is not None:
params['SslConfiguration'] = ssl_configuration
if attributes is not None:
params['Attributes'] = attributes
if environment is not None:
params['Environment'] = environment
return self.make_request(action='CreateApp',
body=json.dumps(params))
def create_deployment(self, stack_id, command, app_id=None,
instance_ids=None, comment=None, custom_json=None):
"""
Runs deployment or stack commands. For more information, see
`Deploying Apps`_ and `Run Stack Commands`_.
**Required Permissions**: To use this action, an IAM user must
have a Deploy or Manage permissions level for the stack, or an
attached policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type app_id: string
:param app_id: The app ID. This parameter is required for app
deployments, but not for other deployment commands.
:type instance_ids: list
:param instance_ids: The instance IDs for the deployment targets.
:type command: dict
:param command: A `DeploymentCommand` object that specifies the
deployment command and any associated arguments.
:type comment: string
:param comment: A user-defined comment.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
"""
params = {'StackId': stack_id, 'Command': command, }
if app_id is not None:
params['AppId'] = app_id
if instance_ids is not None:
params['InstanceIds'] = instance_ids
if comment is not None:
params['Comment'] = comment
if custom_json is not None:
params['CustomJson'] = custom_json
return self.make_request(action='CreateDeployment',
body=json.dumps(params))
def create_instance(self, stack_id, layer_ids, instance_type,
auto_scaling_type=None, hostname=None, os=None,
ami_id=None, ssh_key_name=None,
availability_zone=None, virtualization_type=None,
subnet_id=None, architecture=None,
root_device_type=None, install_updates_on_boot=None,
ebs_optimized=None):
"""
Creates an instance in a specified stack. For more
information, see `Adding an Instance to a Layer`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type layer_ids: list
:param layer_ids: An array that contains the instance layer IDs.
:type instance_type: string
:param instance_type: The instance type. AWS OpsWorks supports all
instance types except Cluster Compute, Cluster GPU, and High Memory
Cluster. For more information, see `Instance Families and Types`_.
The parameter values that you use to specify the various types are
in the API Name column of the Available Instance Types table.
:type auto_scaling_type: string
:param auto_scaling_type: For load-based or time-based instances, the
type.
:type hostname: string
:param hostname: The instance host name.
:type os: string
:param os: The instance's operating system, which must be set to one of
the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`
The default option is the current Amazon Linux version. If you set this
parameter to `Custom`, you must use the CreateInstance action's
AmiId parameter to specify the custom AMI that you want to use. For
more information on the standard operating systems, see `Operating
Systems`_For more information on how to use custom AMIs with
OpsWorks, see `Using Custom AMIs`_.
:type ami_id: string
:param ami_id:
A custom AMI ID to be used to create the instance. The AMI should be
based on one of the standard AWS OpsWorks AMIs: Amazon Linux,
Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see
`Instances`_.
If you specify a custom AMI, you must set `Os` to `Custom`.
:type ssh_key_name: string
:param ssh_key_name: The instance SSH key name.
:type availability_zone: string
:param availability_zone: The instance Availability Zone. For more
information, see `Regions and Endpoints`_.
:type virtualization_type: string
:param virtualization_type: The instance's virtualization type,
`paravirtual` or `hvm`.
:type subnet_id: string
:param subnet_id: The ID of the instance's subnet. If the stack is
running in a VPC, you can use this parameter to override the
stack's default subnet ID value and direct AWS OpsWorks to launch
the instance in a different subnet.
:type architecture: string
:param architecture: The instance architecture. The default option is
`x86_64`. Instance types do not necessarily support both
architectures. For a list of the architectures that are supported
by the different instance types, see `Instance Families and
Types`_.
:type root_device_type: string
:param root_device_type: The instance root device type. For more
information, see `Storage for the Root Device`_.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True` to ensure that
your instances have the latest security updates.
:type ebs_optimized: boolean
:param ebs_optimized: Whether to create an Amazon EBS-optimized
instance.
"""
params = {
'StackId': stack_id,
'LayerIds': layer_ids,
'InstanceType': instance_type,
}
if auto_scaling_type is not None:
params['AutoScalingType'] = auto_scaling_type
if hostname is not None:
params['Hostname'] = hostname
if os is not None:
params['Os'] = os
if ami_id is not None:
params['AmiId'] = ami_id
if ssh_key_name is not None:
params['SshKeyName'] = ssh_key_name
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if virtualization_type is not None:
params['VirtualizationType'] = virtualization_type
if subnet_id is not None:
params['SubnetId'] = subnet_id
if architecture is not None:
params['Architecture'] = architecture
if root_device_type is not None:
params['RootDeviceType'] = root_device_type
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if ebs_optimized is not None:
params['EbsOptimized'] = ebs_optimized
return self.make_request(action='CreateInstance',
body=json.dumps(params))
def create_layer(self, stack_id, type, name, shortname, attributes=None,
custom_instance_profile_arn=None,
custom_security_group_ids=None, packages=None,
volume_configurations=None, enable_auto_healing=None,
auto_assign_elastic_ips=None,
auto_assign_public_ips=None, custom_recipes=None,
install_updates_on_boot=None,
use_ebs_optimized_instances=None,
lifecycle_event_configuration=None):
"""
Creates a layer. For more information, see `How to Create a
Layer`_.
You should use **CreateLayer** for noncustom layer types such
as PHP App Server only if the stack does not have an existing
layer of that type. A stack can have at most one instance of
each noncustom layer; if you attempt to create a second
instance, **CreateLayer** fails. A stack can have an arbitrary
number of custom layers, so you can call **CreateLayer** as
many times as you like for that layer type.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The layer stack ID.
:type type: string
:param type: The layer type. A stack cannot have more than one built-in
layer of the same type. It can have any number of custom layers.
:type name: string
:param name: The layer name, which is used by the console.
:type shortname: string
:param shortname: The layer short name, which is used internally by AWS
OpsWorks and by Chef recipes. The short name is also used as the
name for the directory where your app files are installed. It can
have a maximum of 200 characters, which are limited to the
alphanumeric characters, '-', '_', and '.'.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type custom_instance_profile_arn: string
:param custom_instance_profile_arn: The ARN of an IAM profile that to
be used for the layer's EC2 instances. For more information about
IAM ARNs, see `Using Identifiers`_.
:type custom_security_group_ids: list
:param custom_security_group_ids: An array containing the layer custom
security group IDs.
:type packages: list
:param packages: An array of `Package` objects that describe the layer
packages.
:type volume_configurations: list
:param volume_configurations: A `VolumeConfigurations` object that
describes the layer's Amazon EBS volumes.
:type enable_auto_healing: boolean
:param enable_auto_healing: Whether to disable auto healing for the
layer.
:type auto_assign_elastic_ips: boolean
:param auto_assign_elastic_ips: Whether to automatically assign an
`Elastic IP address`_ to the layer's instances. For more
information, see `How to Edit a Layer`_.
:type auto_assign_public_ips: boolean
:param auto_assign_public_ips: For stacks that are running in a VPC,
whether to automatically assign a public IP address to the layer's
instances. For more information, see `How to Edit a Layer`_.
:type custom_recipes: dict
:param custom_recipes: A `LayerCustomRecipes` object that specifies the
layer custom recipes.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
:type use_ebs_optimized_instances: boolean
:param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized
instances.
:type lifecycle_event_configuration: dict
:param lifecycle_event_configuration: A LifeCycleEventConfiguration
object that you can use to configure the Shutdown event to specify
an execution timeout and enable or disable Elastic Load Balancer
connection draining.
"""
params = {
'StackId': stack_id,
'Type': type,
'Name': name,
'Shortname': shortname,
}
if attributes is not None:
params['Attributes'] = attributes
if custom_instance_profile_arn is not None:
params['CustomInstanceProfileArn'] = custom_instance_profile_arn
if custom_security_group_ids is not None:
params['CustomSecurityGroupIds'] = custom_security_group_ids
if packages is not None:
params['Packages'] = packages
if volume_configurations is not None:
params['VolumeConfigurations'] = volume_configurations
if enable_auto_healing is not None:
params['EnableAutoHealing'] = enable_auto_healing
if auto_assign_elastic_ips is not None:
params['AutoAssignElasticIps'] = auto_assign_elastic_ips
if auto_assign_public_ips is not None:
params['AutoAssignPublicIps'] = auto_assign_public_ips
if custom_recipes is not None:
params['CustomRecipes'] = custom_recipes
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if use_ebs_optimized_instances is not None:
params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances
if lifecycle_event_configuration is not None:
params['LifecycleEventConfiguration'] = lifecycle_event_configuration
return self.make_request(action='CreateLayer',
body=json.dumps(params))
def create_stack(self, name, region, service_role_arn,
default_instance_profile_arn, vpc_id=None,
attributes=None, default_os=None, hostname_theme=None,
default_availability_zone=None, default_subnet_id=None,
custom_json=None, configuration_manager=None,
chef_configuration=None, use_custom_cookbooks=None,
use_opsworks_security_groups=None,
custom_cookbooks_source=None, default_ssh_key_name=None,
default_root_device_type=None):
"""
Creates a new stack. For more information, see `Create a New
Stack`_.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type name: string
:param name: The stack name.
:type region: string
:param region: The stack AWS region, such as "us-east-1". For more
information about Amazon regions, see `Regions and Endpoints`_.
:type vpc_id: string
:param vpc_id: The ID of the VPC that the stack is to be launched into.
It must be in the specified region. All instances are launched into
this VPC, and you cannot change the ID later.
+ If your account supports EC2 Classic, the default value is no VPC.
+ If your account does not support EC2 Classic, the default value is
the default VPC for the specified region.
If the VPC ID corresponds to a default VPC and you have specified
either the `DefaultAvailabilityZone` or the `DefaultSubnetId`
parameter only, AWS OpsWorks infers the value of the other
parameter. If you specify neither parameter, AWS OpsWorks sets
these parameters to the first valid Availability Zone for the
specified region and the corresponding default VPC subnet ID,
respectively.
If you specify a nondefault VPC ID, note the following:
+ It must belong to a VPC in your account that is in the specified
region.
+ You must specify a value for `DefaultSubnetId`.
For more information on how to use AWS OpsWorks with a VPC, see
`Running a Stack in a VPC`_. For more information on default VPC
and EC2 Classic, see `Supported Platforms`_.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type service_role_arn: string
:param service_role_arn: The stack AWS Identity and Access Management
(IAM) role, which allows AWS OpsWorks to work with AWS resources on
your behalf. You must set this parameter to the Amazon Resource
Name (ARN) for an existing IAM role. For more information about IAM
ARNs, see `Using Identifiers`_.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stack's operating system, which must be set to
one of the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`. You specify the custom AMI you want to use
when you create instances.
The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The stack's default Availability
Zone, which must be in the specified region. For more information,
see `Regions and Endpoints`_. If you also specify a value for
`DefaultSubnetId`, the subnet must be in the same zone. For more
information, see the `VpcId` parameter description.
:type default_subnet_id: string
:param default_subnet_id: The stack's default VPC subnet ID. This
parameter is required if you specify a value for the `VpcId`
parameter. All instances are launched into this subnet unless you
specify otherwise when you create the instance. If you also specify
a value for `DefaultAvailabilityZone`, the subnet must be in that
zone. For information on default values and when this parameter is
required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you clone
a stack we recommend that you use the configuration manager to
specify the Chef version, 0.9, 11.4, or 11.10. The default value is
currently 11.4.
:type chef_configuration: dict
:param chef_configuration: A `ChefConfiguration` object that specifies
whether to enable Berkshelf and the Berkshelf version on Chef 11.10
stacks. For more information, see `Create a New Stack`_.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether the stack uses custom cookbooks.
:type use_opsworks_security_groups: boolean
:param use_opsworks_security_groups: Whether to associate the AWS
OpsWorks built-in security groups with the stack's layers.
AWS OpsWorks provides a standard set of built-in security groups, one
for each layer, which are associated with layers by default. With
`UseOpsworksSecurityGroups` you can instead provide your own custom
security groups. `UseOpsworksSecurityGroups` has the following
settings:
+ True - AWS OpsWorks automatically associates the appropriate built-in
security group with each layer (default setting). You can associate
additional security groups with a layer after you create it but you
cannot delete the built-in security group.
+ False - AWS OpsWorks does not associate built-in security groups with
layers. You must create appropriate EC2 security groups and
associate a security group with each layer that you create.
However, you can still manually associate a built-in security group
with a layer on creation; custom security groups are required only
for those layers that need custom settings.
For more information, see `Create a New Stack`_.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the stack, but you
can override it when you create an instance. The default option is
`instance-store`. For more information, see `Storage for the Root
Device`_.
"""
params = {
'Name': name,
'Region': region,
'ServiceRoleArn': service_role_arn,
'DefaultInstanceProfileArn': default_instance_profile_arn,
}
if vpc_id is not None:
params['VpcId'] = vpc_id
if attributes is not None:
params['Attributes'] = attributes
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if chef_configuration is not None:
params['ChefConfiguration'] = chef_configuration
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if use_opsworks_security_groups is not None:
params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
return self.make_request(action='CreateStack',
body=json.dumps(params))
def create_user_profile(self, iam_user_arn, ssh_username=None,
ssh_public_key=None, allow_self_management=None):
"""
Creates a new user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
:type ssh_username: string
:param ssh_username: The user's SSH user name. The allowable characters
are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name
includes other punctuation marks, AWS OpsWorks removes them. For
example, `my.name` will be changed to `myname`. If you do not
specify an SSH user name, AWS OpsWorks generates one from the IAM
user name.
:type ssh_public_key: string
:param ssh_public_key: The user's public SSH key.
:type allow_self_management: boolean
:param allow_self_management: Whether users can specify their own SSH
public key through the My Settings page. For more information, see
`Setting an IAM User's Public SSH Key`_.
"""
params = {'IamUserArn': iam_user_arn, }
if ssh_username is not None:
params['SshUsername'] = ssh_username
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
if allow_self_management is not None:
params['AllowSelfManagement'] = allow_self_management
return self.make_request(action='CreateUserProfile',
body=json.dumps(params))
def delete_app(self, app_id):
"""
Deletes a specified app.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type app_id: string
:param app_id: The app ID.
"""
params = {'AppId': app_id, }
return self.make_request(action='DeleteApp',
body=json.dumps(params))
def delete_instance(self, instance_id, delete_elastic_ip=None,
delete_volumes=None):
"""
Deletes a specified instance, which terminates the associated
Amazon EC2 instance. You must stop an instance before you can
delete it.
For more information, see `Deleting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type delete_elastic_ip: boolean
:param delete_elastic_ip: Whether to delete the instance Elastic IP
address.
:type delete_volumes: boolean
:param delete_volumes: Whether to delete the instance's Amazon EBS
volumes.
"""
params = {'InstanceId': instance_id, }
if delete_elastic_ip is not None:
params['DeleteElasticIp'] = delete_elastic_ip
if delete_volumes is not None:
params['DeleteVolumes'] = delete_volumes
return self.make_request(action='DeleteInstance',
body=json.dumps(params))
def delete_layer(self, layer_id):
"""
Deletes a specified layer. You must first stop and then delete
all associated instances or unassign registered instances. For
more information, see `How to Delete a Layer`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
"""
params = {'LayerId': layer_id, }
return self.make_request(action='DeleteLayer',
body=json.dumps(params))
def delete_stack(self, stack_id):
"""
Deletes a specified stack. You must first delete all
instances, layers, and apps or deregister registered
instances. For more information, see `Shut Down a Stack`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='DeleteStack',
body=json.dumps(params))
def delete_user_profile(self, iam_user_arn):
"""
Deletes a user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
"""
params = {'IamUserArn': iam_user_arn, }
return self.make_request(action='DeleteUserProfile',
body=json.dumps(params))
def deregister_elastic_ip(self, elastic_ip):
"""
Deregisters a specified Elastic IP address. The address can
then be registered by another stack. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
"""
params = {'ElasticIp': elastic_ip, }
return self.make_request(action='DeregisterElasticIp',
body=json.dumps(params))
def deregister_instance(self, instance_id):
"""
Deregister a registered Amazon EC2 or on-premises instance.
This action removes the instance from the stack and returns it
to your control. This action can not be used with instances
that were created with AWS OpsWorks.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='DeregisterInstance',
body=json.dumps(params))
def deregister_rds_db_instance(self, rds_db_instance_arn):
"""
Deregisters an Amazon RDS instance.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type rds_db_instance_arn: string
:param rds_db_instance_arn: The Amazon RDS instance's ARN.
"""
params = {'RdsDbInstanceArn': rds_db_instance_arn, }
return self.make_request(action='DeregisterRdsDbInstance',
body=json.dumps(params))
def deregister_volume(self, volume_id):
"""
Deregisters an Amazon EBS volume. The volume can then be
registered by another stack. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
"""
params = {'VolumeId': volume_id, }
return self.make_request(action='DeregisterVolume',
body=json.dumps(params))
def describe_apps(self, stack_id=None, app_ids=None):
"""
Requests a description of a specified set of apps.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The app stack ID. If you use this parameter,
`DescribeApps` returns a description of the apps in the specified
stack.
:type app_ids: list
:param app_ids: An array of app IDs for the apps to be described. If
you use this parameter, `DescribeApps` returns a description of the
specified apps. Otherwise, it returns a description of every app.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if app_ids is not None:
params['AppIds'] = app_ids
return self.make_request(action='DescribeApps',
body=json.dumps(params))
def describe_commands(self, deployment_id=None, instance_id=None,
command_ids=None):
"""
Describes the results of specified commands.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type deployment_id: string
:param deployment_id: The deployment ID. If you include this parameter,
`DescribeCommands` returns a description of the commands associated
with the specified deployment.
:type instance_id: string
:param instance_id: The instance ID. If you include this parameter,
`DescribeCommands` returns a description of the commands associated
with the specified instance.
:type command_ids: list
:param command_ids: An array of command IDs. If you include this
parameter, `DescribeCommands` returns a description of the
specified commands. Otherwise, it returns a description of every
command.
"""
params = {}
if deployment_id is not None:
params['DeploymentId'] = deployment_id
if instance_id is not None:
params['InstanceId'] = instance_id
if command_ids is not None:
params['CommandIds'] = command_ids
return self.make_request(action='DescribeCommands',
body=json.dumps(params))
def describe_deployments(self, stack_id=None, app_id=None,
deployment_ids=None):
"""
Requests a description of a specified set of deployments.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID. If you include this parameter,
`DescribeDeployments` returns a description of the commands
associated with the specified stack.
:type app_id: string
:param app_id: The app ID. If you include this parameter,
`DescribeDeployments` returns a description of the commands
associated with the specified app.
:type deployment_ids: list
:param deployment_ids: An array of deployment IDs to be described. If
you include this parameter, `DescribeDeployments` returns a
description of the specified deployments. Otherwise, it returns a
description of every deployment.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if app_id is not None:
params['AppId'] = app_id
if deployment_ids is not None:
params['DeploymentIds'] = deployment_ids
return self.make_request(action='DescribeDeployments',
body=json.dumps(params))
def describe_elastic_ips(self, instance_id=None, stack_id=None, ips=None):
"""
Describes `Elastic IP addresses`_.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you include this parameter,
`DescribeElasticIps` returns a description of the Elastic IP
addresses associated with the specified instance.
:type stack_id: string
:param stack_id: A stack ID. If you include this parameter,
`DescribeElasticIps` returns a description of the Elastic IP
addresses that are registered with the specified stack.
:type ips: list
:param ips: An array of Elastic IP addresses to be described. If you
include this parameter, `DescribeElasticIps` returns a description
of the specified Elastic IP addresses. Otherwise, it returns a
description of every Elastic IP address.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if stack_id is not None:
params['StackId'] = stack_id
if ips is not None:
params['Ips'] = ips
return self.make_request(action='DescribeElasticIps',
body=json.dumps(params))
def describe_elastic_load_balancers(self, stack_id=None, layer_ids=None):
"""
Describes a stack's Elastic Load Balancing instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: A stack ID. The action describes the stack's Elastic
Load Balancing instances.
:type layer_ids: list
:param layer_ids: A list of layer IDs. The action describes the Elastic
Load Balancing instances for the specified layers.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_ids is not None:
params['LayerIds'] = layer_ids
return self.make_request(action='DescribeElasticLoadBalancers',
body=json.dumps(params))
def describe_instances(self, stack_id=None, layer_id=None,
instance_ids=None):
"""
Requests a description of a set of instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: A stack ID. If you use this parameter,
`DescribeInstances` returns descriptions of the instances
associated with the specified stack.
:type layer_id: string
:param layer_id: A layer ID. If you use this parameter,
`DescribeInstances` returns descriptions of the instances
associated with the specified layer.
:type instance_ids: list
:param instance_ids: An array of instance IDs to be described. If you
use this parameter, `DescribeInstances` returns a description of
the specified instances. Otherwise, it returns a description of
every instance.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_id is not None:
params['LayerId'] = layer_id
if instance_ids is not None:
params['InstanceIds'] = instance_ids
return self.make_request(action='DescribeInstances',
body=json.dumps(params))
def describe_layers(self, stack_id=None, layer_ids=None):
"""
Requests a description of one or more layers in a specified
stack.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type layer_ids: list
:param layer_ids: An array of layer IDs that specify the layers to be
described. If you omit this parameter, `DescribeLayers` returns a
description of every layer in the specified stack.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_ids is not None:
params['LayerIds'] = layer_ids
return self.make_request(action='DescribeLayers',
body=json.dumps(params))
def describe_load_based_auto_scaling(self, layer_ids):
"""
Describes load-based auto scaling configurations for specified
layers.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type layer_ids: list
:param layer_ids: An array of layer IDs.
"""
params = {'LayerIds': layer_ids, }
return self.make_request(action='DescribeLoadBasedAutoScaling',
body=json.dumps(params))
def describe_my_user_profile(self):
"""
Describes a user's SSH information.
**Required Permissions**: To use this action, an IAM user must
have self-management enabled or an attached policy that
explicitly grants permissions. For more information on user
permissions, see `Managing User Permissions`_.
"""
params = {}
return self.make_request(action='DescribeMyUserProfile',
body=json.dumps(params))
def describe_permissions(self, iam_user_arn=None, stack_id=None):
"""
Describes the permissions for a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN. For more information about IAM
ARNs, see `Using Identifiers`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {}
if iam_user_arn is not None:
params['IamUserArn'] = iam_user_arn
if stack_id is not None:
params['StackId'] = stack_id
return self.make_request(action='DescribePermissions',
body=json.dumps(params))
def describe_raid_arrays(self, instance_id=None, stack_id=None,
raid_array_ids=None):
"""
Describe an instance's RAID arrays.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeRaidArrays` returns descriptions of the RAID arrays
associated with the specified instance.
:type stack_id: string
:param stack_id: The stack ID.
:type raid_array_ids: list
:param raid_array_ids: An array of RAID array IDs. If you use this
parameter, `DescribeRaidArrays` returns descriptions of the
specified arrays. Otherwise, it returns a description of every
array.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if stack_id is not None:
params['StackId'] = stack_id
if raid_array_ids is not None:
params['RaidArrayIds'] = raid_array_ids
return self.make_request(action='DescribeRaidArrays',
body=json.dumps(params))
def describe_rds_db_instances(self, stack_id, rds_db_instance_arns=None):
"""
Describes Amazon RDS instances.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID that the instances are registered with.
The operation returns descriptions of all registered Amazon RDS
instances.
:type rds_db_instance_arns: list
:param rds_db_instance_arns: An array containing the ARNs of the
instances to be described.
"""
params = {'StackId': stack_id, }
if rds_db_instance_arns is not None:
params['RdsDbInstanceArns'] = rds_db_instance_arns
return self.make_request(action='DescribeRdsDbInstances',
body=json.dumps(params))
def describe_service_errors(self, stack_id=None, instance_id=None,
service_error_ids=None):
"""
Describes AWS OpsWorks service errors.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID. If you use this parameter,
`DescribeServiceErrors` returns descriptions of the errors
associated with the specified stack.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeServiceErrors` returns descriptions of the errors
associated with the specified instance.
:type service_error_ids: list
:param service_error_ids: An array of service error IDs. If you use
this parameter, `DescribeServiceErrors` returns descriptions of the
specified errors. Otherwise, it returns a description of every
error.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if instance_id is not None:
params['InstanceId'] = instance_id
if service_error_ids is not None:
params['ServiceErrorIds'] = service_error_ids
return self.make_request(action='DescribeServiceErrors',
body=json.dumps(params))
def describe_stack_provisioning_parameters(self, stack_id):
"""
Requests a description of a stack's provisioning parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the stack
or an attached policy that explicitly grants permissions. For
more information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID
"""
params = {'StackId': stack_id, }
return self.make_request(action='DescribeStackProvisioningParameters',
body=json.dumps(params))
def describe_stack_summary(self, stack_id):
"""
Describes the number of layers and apps in a specified stack,
and the number of instances in each state, such as
`running_setup` or `online`.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='DescribeStackSummary',
body=json.dumps(params))
def describe_stacks(self, stack_ids=None):
"""
Requests a description of one or more stacks.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_ids: list
:param stack_ids: An array of stack IDs that specify the stacks to be
described. If you omit this parameter, `DescribeStacks` returns a
description of every stack.
"""
params = {}
if stack_ids is not None:
params['StackIds'] = stack_ids
return self.make_request(action='DescribeStacks',
body=json.dumps(params))
def describe_time_based_auto_scaling(self, instance_ids):
"""
Describes time-based auto scaling configurations for specified
instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_ids: list
:param instance_ids: An array of instance IDs.
"""
params = {'InstanceIds': instance_ids, }
return self.make_request(action='DescribeTimeBasedAutoScaling',
body=json.dumps(params))
def describe_user_profiles(self, iam_user_arns=None):
"""
Describe specified users.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arns: list
:param iam_user_arns: An array of IAM user ARNs that identify the users
to be described.
"""
params = {}
if iam_user_arns is not None:
params['IamUserArns'] = iam_user_arns
return self.make_request(action='DescribeUserProfiles',
body=json.dumps(params))
def describe_volumes(self, instance_id=None, stack_id=None,
raid_array_id=None, volume_ids=None):
"""
Describes an instance's Amazon EBS volumes.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeVolumes` returns descriptions of the volumes associated
with the specified instance.
:type stack_id: string
:param stack_id: A stack ID. The action describes the stack's
registered Amazon EBS volumes.
:type raid_array_id: string
:param raid_array_id: The RAID array ID. If you use this parameter,
`DescribeVolumes` returns descriptions of the volumes associated
with the specified RAID array.
:type volume_ids: list
:param volume_ids: Am array of volume IDs. If you use this parameter,
`DescribeVolumes` returns descriptions of the specified volumes.
Otherwise, it returns a description of every volume.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if stack_id is not None:
params['StackId'] = stack_id
if raid_array_id is not None:
params['RaidArrayId'] = raid_array_id
if volume_ids is not None:
params['VolumeIds'] = volume_ids
return self.make_request(action='DescribeVolumes',
body=json.dumps(params))
def detach_elastic_load_balancer(self, elastic_load_balancer_name,
layer_id):
"""
Detaches a specified Elastic Load Balancing instance from its
layer.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_load_balancer_name: string
:param elastic_load_balancer_name: The Elastic Load Balancing
instance's name.
:type layer_id: string
:param layer_id: The ID of the layer that the Elastic Load Balancing
instance is attached to.
"""
params = {
'ElasticLoadBalancerName': elastic_load_balancer_name,
'LayerId': layer_id,
}
return self.make_request(action='DetachElasticLoadBalancer',
body=json.dumps(params))
def disassociate_elastic_ip(self, elastic_ip):
"""
Disassociates an Elastic IP address from its instance. The
address remains registered with the stack. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
"""
params = {'ElasticIp': elastic_ip, }
return self.make_request(action='DisassociateElasticIp',
body=json.dumps(params))
def get_hostname_suggestion(self, layer_id):
"""
Gets a generated host name for the specified layer, based on
the current host name theme.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
"""
params = {'LayerId': layer_id, }
return self.make_request(action='GetHostnameSuggestion',
body=json.dumps(params))
def reboot_instance(self, instance_id):
"""
Reboots a specified instance. For more information, see
`Starting, Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='RebootInstance',
body=json.dumps(params))
def register_elastic_ip(self, elastic_ip, stack_id):
"""
Registers an Elastic IP address with a specified stack. An
address can be registered with only one stack at a time. If
the address is already registered, you must first deregister
it by calling DeregisterElasticIp. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'ElasticIp': elastic_ip, 'StackId': stack_id, }
return self.make_request(action='RegisterElasticIp',
body=json.dumps(params))
def register_instance(self, stack_id, hostname=None, public_ip=None,
private_ip=None, rsa_public_key=None,
rsa_public_key_fingerprint=None,
instance_identity=None):
"""
Registers instances with a specified stack that were created
outside of AWS OpsWorks.
We do not recommend using this action to register instances.
The complete registration operation has two primary steps,
installing the AWS OpsWorks agent on the instance and
registering the instance with the stack. `RegisterInstance`
handles only the second step. You should instead use the AWS
CLI `register` command, which performs the entire registration
operation.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The ID of the stack that the instance is to be
registered with.
:type hostname: string
:param hostname: The instance's hostname.
:type public_ip: string
:param public_ip: The instance's public IP address.
:type private_ip: string
:param private_ip: The instance's private IP address.
:type rsa_public_key: string
:param rsa_public_key: The instances public RSA key. This key is used
to encrypt communication between the instance and the service.
:type rsa_public_key_fingerprint: string
:param rsa_public_key_fingerprint: The instances public RSA key
fingerprint.
:type instance_identity: dict
:param instance_identity: An InstanceIdentity object that contains the
instance's identity.
"""
params = {'StackId': stack_id, }
if hostname is not None:
params['Hostname'] = hostname
if public_ip is not None:
params['PublicIp'] = public_ip
if private_ip is not None:
params['PrivateIp'] = private_ip
if rsa_public_key is not None:
params['RsaPublicKey'] = rsa_public_key
if rsa_public_key_fingerprint is not None:
params['RsaPublicKeyFingerprint'] = rsa_public_key_fingerprint
if instance_identity is not None:
params['InstanceIdentity'] = instance_identity
return self.make_request(action='RegisterInstance',
body=json.dumps(params))
def register_rds_db_instance(self, stack_id, rds_db_instance_arn,
db_user, db_password):
"""
Registers an Amazon RDS instance with a stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type rds_db_instance_arn: string
:param rds_db_instance_arn: The Amazon RDS instance's ARN.
:type db_user: string
:param db_user: The database's master user name.
:type db_password: string
:param db_password: The database password.
"""
params = {
'StackId': stack_id,
'RdsDbInstanceArn': rds_db_instance_arn,
'DbUser': db_user,
'DbPassword': db_password,
}
return self.make_request(action='RegisterRdsDbInstance',
body=json.dumps(params))
def register_volume(self, stack_id, ec_2_volume_id=None):
"""
Registers an Amazon EBS volume with a specified stack. A
volume can be registered with only one stack at a time. If the
volume is already registered, you must first deregister it by
calling DeregisterVolume. For more information, see `Resource
Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type ec_2_volume_id: string
:param ec_2_volume_id: The Amazon EBS volume ID.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
if ec_2_volume_id is not None:
params['Ec2VolumeId'] = ec_2_volume_id
return self.make_request(action='RegisterVolume',
body=json.dumps(params))
def set_load_based_auto_scaling(self, layer_id, enable=None,
up_scaling=None, down_scaling=None):
"""
Specify the load-based auto scaling configuration for a
specified layer. For more information, see `Managing Load with
Time-based and Load-based Instances`_.
To use load-based auto scaling, you must create a set of load-
based auto scaling instances. Load-based auto scaling operates
only on the instances from that set, so you must ensure that
you have created enough instances to handle the maximum
anticipated load.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
:type enable: boolean
:param enable: Enables load-based auto scaling for the layer.
:type up_scaling: dict
:param up_scaling: An `AutoScalingThresholds` object with the upscaling
threshold configuration. If the load exceeds these thresholds for a
specified amount of time, AWS OpsWorks starts a specified number of
instances.
:type down_scaling: dict
:param down_scaling: An `AutoScalingThresholds` object with the
downscaling threshold configuration. If the load falls below these
thresholds for a specified amount of time, AWS OpsWorks stops a
specified number of instances.
"""
params = {'LayerId': layer_id, }
if enable is not None:
params['Enable'] = enable
if up_scaling is not None:
params['UpScaling'] = up_scaling
if down_scaling is not None:
params['DownScaling'] = down_scaling
return self.make_request(action='SetLoadBasedAutoScaling',
body=json.dumps(params))
def set_permission(self, stack_id, iam_user_arn, allow_ssh=None,
allow_sudo=None, level=None):
"""
Specifies a user's permissions. For more information, see
`Security and Permissions`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
:type allow_ssh: boolean
:param allow_ssh: The user is allowed to use SSH to communicate with
the instance.
:type allow_sudo: boolean
:param allow_sudo: The user is allowed to use **sudo** to elevate
privileges.
:type level: string
:param level: The user's permission level, which must be set to one of
the following strings. You cannot set your own permissions level.
+ `deny`
+ `show`
+ `deploy`
+ `manage`
+ `iam_only`
For more information on the permissions associated with these levels,
see `Managing User Permissions`_
"""
params = {'StackId': stack_id, 'IamUserArn': iam_user_arn, }
if allow_ssh is not None:
params['AllowSsh'] = allow_ssh
if allow_sudo is not None:
params['AllowSudo'] = allow_sudo
if level is not None:
params['Level'] = level
return self.make_request(action='SetPermission',
body=json.dumps(params))
def set_time_based_auto_scaling(self, instance_id,
auto_scaling_schedule=None):
"""
Specify the time-based auto scaling configuration for a
specified instance. For more information, see `Managing Load
with Time-based and Load-based Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type auto_scaling_schedule: dict
:param auto_scaling_schedule: An `AutoScalingSchedule` with the
instance schedule.
"""
params = {'InstanceId': instance_id, }
if auto_scaling_schedule is not None:
params['AutoScalingSchedule'] = auto_scaling_schedule
return self.make_request(action='SetTimeBasedAutoScaling',
body=json.dumps(params))
def start_instance(self, instance_id):
"""
Starts a specified instance. For more information, see
`Starting, Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='StartInstance',
body=json.dumps(params))
def start_stack(self, stack_id):
"""
Starts a stack's instances.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='StartStack',
body=json.dumps(params))
def stop_instance(self, instance_id):
"""
Stops a specified instance. When you stop a standard instance,
the data disappears and must be reinstalled when you restart
the instance. You can stop an Amazon EBS-backed instance
without losing data. For more information, see `Starting,
Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='StopInstance',
body=json.dumps(params))
def stop_stack(self, stack_id):
"""
Stops a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='StopStack',
body=json.dumps(params))
def unassign_instance(self, instance_id):
"""
Unassigns a registered instance from all of it's layers. The
instance remains in the stack as an unassigned instance and
can be assigned to another layer, as needed. You cannot use
this action with instances that were created with AWS
OpsWorks.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='UnassignInstance',
body=json.dumps(params))
def unassign_volume(self, volume_id):
"""
Unassigns an assigned Amazon EBS volume. The volume remains
registered with the stack. For more information, see `Resource
Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
"""
params = {'VolumeId': volume_id, }
return self.make_request(action='UnassignVolume',
body=json.dumps(params))
def update_app(self, app_id, name=None, description=None,
data_sources=None, type=None, app_source=None,
domains=None, enable_ssl=None, ssl_configuration=None,
attributes=None, environment=None):
"""
Updates a specified app.
**Required Permissions**: To use this action, an IAM user must
have a Deploy or Manage permissions level for the stack, or an
attached policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type app_id: string
:param app_id: The app ID.
:type name: string
:param name: The app name.
:type description: string
:param description: A description of the app.
:type data_sources: list
:param data_sources: The app's data sources.
:type type: string
:param type: The app type.
:type app_source: dict
:param app_source: A `Source` object that specifies the app repository.
:type domains: list
:param domains: The app's virtual host settings, with multiple domains
separated by commas. For example: `'www.example.com, example.com'`
:type enable_ssl: boolean
:param enable_ssl: Whether SSL is enabled for the app.
:type ssl_configuration: dict
:param ssl_configuration: An `SslConfiguration` object with the SSL
configuration.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type environment: list
:param environment:
An array of `EnvironmentVariable` objects that specify environment
variables to be associated with the app. You can specify up to ten
environment variables. After you deploy the app, these variables
are defined on the associated app server instances.
This parameter is supported only by Chef 11.10 stacks. If you have
specified one or more environment variables, you cannot modify the
stack's Chef version.
"""
params = {'AppId': app_id, }
if name is not None:
params['Name'] = name
if description is not None:
params['Description'] = description
if data_sources is not None:
params['DataSources'] = data_sources
if type is not None:
params['Type'] = type
if app_source is not None:
params['AppSource'] = app_source
if domains is not None:
params['Domains'] = domains
if enable_ssl is not None:
params['EnableSsl'] = enable_ssl
if ssl_configuration is not None:
params['SslConfiguration'] = ssl_configuration
if attributes is not None:
params['Attributes'] = attributes
if environment is not None:
params['Environment'] = environment
return self.make_request(action='UpdateApp',
body=json.dumps(params))
def update_elastic_ip(self, elastic_ip, name=None):
"""
Updates a registered Elastic IP address's name. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The address.
:type name: string
:param name: The new name.
"""
params = {'ElasticIp': elastic_ip, }
if name is not None:
params['Name'] = name
return self.make_request(action='UpdateElasticIp',
body=json.dumps(params))
def update_instance(self, instance_id, layer_ids=None,
instance_type=None, auto_scaling_type=None,
hostname=None, os=None, ami_id=None,
ssh_key_name=None, architecture=None,
install_updates_on_boot=None, ebs_optimized=None):
"""
Updates a specified instance.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type layer_ids: list
:param layer_ids: The instance's layer IDs.
:type instance_type: string
:param instance_type: The instance type. AWS OpsWorks supports all
instance types except Cluster Compute, Cluster GPU, and High Memory
Cluster. For more information, see `Instance Families and Types`_.
The parameter values that you use to specify the various types are
in the API Name column of the Available Instance Types table.
:type auto_scaling_type: string
:param auto_scaling_type: For load-based or time-based instances, the
type.
:type hostname: string
:param hostname: The instance host name.
:type os: string
:param os: The instance's operating system, which must be set to one of
the following.
+ Standard operating systems: An Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`
The default option is the current Amazon Linux version, such as `Amazon
Linux 2014.09`. If you set this parameter to `Custom`, you must use
the CreateInstance action's AmiId parameter to specify the custom
AMI that you want to use. For more information on the standard
operating systems, see `Operating Systems`_For more information on
how to use custom AMIs with OpsWorks, see `Using Custom AMIs`_.
:type ami_id: string
:param ami_id:
A custom AMI ID to be used to create the instance. The AMI should be
based on one of the standard AWS OpsWorks AMIs: Amazon Linux,
Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see
`Instances`_
If you specify a custom AMI, you must set `Os` to `Custom`.
:type ssh_key_name: string
:param ssh_key_name: The instance SSH key name.
:type architecture: string
:param architecture: The instance architecture. Instance types do not
necessarily support both architectures. For a list of the
architectures that are supported by the different instance types,
see `Instance Families and Types`_.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
:type ebs_optimized: boolean
:param ebs_optimized: Whether this is an Amazon EBS-optimized instance.
"""
params = {'InstanceId': instance_id, }
if layer_ids is not None:
params['LayerIds'] = layer_ids
if instance_type is not None:
params['InstanceType'] = instance_type
if auto_scaling_type is not None:
params['AutoScalingType'] = auto_scaling_type
if hostname is not None:
params['Hostname'] = hostname
if os is not None:
params['Os'] = os
if ami_id is not None:
params['AmiId'] = ami_id
if ssh_key_name is not None:
params['SshKeyName'] = ssh_key_name
if architecture is not None:
params['Architecture'] = architecture
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if ebs_optimized is not None:
params['EbsOptimized'] = ebs_optimized
return self.make_request(action='UpdateInstance',
body=json.dumps(params))
def update_layer(self, layer_id, name=None, shortname=None,
attributes=None, custom_instance_profile_arn=None,
custom_security_group_ids=None, packages=None,
volume_configurations=None, enable_auto_healing=None,
auto_assign_elastic_ips=None,
auto_assign_public_ips=None, custom_recipes=None,
install_updates_on_boot=None,
use_ebs_optimized_instances=None,
lifecycle_event_configuration=None):
"""
Updates a specified layer.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
:type name: string
:param name: The layer name, which is used by the console.
:type shortname: string
:param shortname: The layer short name, which is used internally by AWS
OpsWorksand by Chef. The short name is also used as the name for
the directory where your app files are installed. It can have a
maximum of 200 characters and must be in the following format:
/\A[a-z0-9\-\_\.]+\Z/.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type custom_instance_profile_arn: string
:param custom_instance_profile_arn: The ARN of an IAM profile to be
used for all of the layer's EC2 instances. For more information
about IAM ARNs, see `Using Identifiers`_.
:type custom_security_group_ids: list
:param custom_security_group_ids: An array containing the layer's
custom security group IDs.
:type packages: list
:param packages: An array of `Package` objects that describe the
layer's packages.
:type volume_configurations: list
:param volume_configurations: A `VolumeConfigurations` object that
describes the layer's Amazon EBS volumes.
:type enable_auto_healing: boolean
:param enable_auto_healing: Whether to disable auto healing for the
layer.
:type auto_assign_elastic_ips: boolean
:param auto_assign_elastic_ips: Whether to automatically assign an
`Elastic IP address`_ to the layer's instances. For more
information, see `How to Edit a Layer`_.
:type auto_assign_public_ips: boolean
:param auto_assign_public_ips: For stacks that are running in a VPC,
whether to automatically assign a public IP address to the layer's
instances. For more information, see `How to Edit a Layer`_.
:type custom_recipes: dict
:param custom_recipes: A `LayerCustomRecipes` object that specifies the
layer's custom recipes.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
:type use_ebs_optimized_instances: boolean
:param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized
instances.
:type lifecycle_event_configuration: dict
:param lifecycle_event_configuration:
"""
params = {'LayerId': layer_id, }
if name is not None:
params['Name'] = name
if shortname is not None:
params['Shortname'] = shortname
if attributes is not None:
params['Attributes'] = attributes
if custom_instance_profile_arn is not None:
params['CustomInstanceProfileArn'] = custom_instance_profile_arn
if custom_security_group_ids is not None:
params['CustomSecurityGroupIds'] = custom_security_group_ids
if packages is not None:
params['Packages'] = packages
if volume_configurations is not None:
params['VolumeConfigurations'] = volume_configurations
if enable_auto_healing is not None:
params['EnableAutoHealing'] = enable_auto_healing
if auto_assign_elastic_ips is not None:
params['AutoAssignElasticIps'] = auto_assign_elastic_ips
if auto_assign_public_ips is not None:
params['AutoAssignPublicIps'] = auto_assign_public_ips
if custom_recipes is not None:
params['CustomRecipes'] = custom_recipes
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if use_ebs_optimized_instances is not None:
params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances
if lifecycle_event_configuration is not None:
params['LifecycleEventConfiguration'] = lifecycle_event_configuration
return self.make_request(action='UpdateLayer',
body=json.dumps(params))
def update_my_user_profile(self, ssh_public_key=None):
"""
Updates a user's SSH public key.
**Required Permissions**: To use this action, an IAM user must
have self-management enabled or an attached policy that
explicitly grants permissions. For more information on user
permissions, see `Managing User Permissions`_.
:type ssh_public_key: string
:param ssh_public_key: The user's SSH public key.
"""
params = {}
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
return self.make_request(action='UpdateMyUserProfile',
body=json.dumps(params))
def update_rds_db_instance(self, rds_db_instance_arn, db_user=None,
db_password=None):
"""
Updates an Amazon RDS instance.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type rds_db_instance_arn: string
:param rds_db_instance_arn: The Amazon RDS instance's ARN.
:type db_user: string
:param db_user: The master user name.
:type db_password: string
:param db_password: The database password.
"""
params = {'RdsDbInstanceArn': rds_db_instance_arn, }
if db_user is not None:
params['DbUser'] = db_user
if db_password is not None:
params['DbPassword'] = db_password
return self.make_request(action='UpdateRdsDbInstance',
body=json.dumps(params))
def update_stack(self, stack_id, name=None, attributes=None,
service_role_arn=None,
default_instance_profile_arn=None, default_os=None,
hostname_theme=None, default_availability_zone=None,
default_subnet_id=None, custom_json=None,
configuration_manager=None, chef_configuration=None,
use_custom_cookbooks=None, custom_cookbooks_source=None,
default_ssh_key_name=None,
default_root_device_type=None,
use_opsworks_security_groups=None):
"""
Updates a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type name: string
:param name: The stack's new name.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type service_role_arn: string
:param service_role_arn:
The stack AWS Identity and Access Management (IAM) role, which allows
AWS OpsWorks to work with AWS resources on your behalf. You must
set this parameter to the Amazon Resource Name (ARN) for an
existing IAM role. For more information about IAM ARNs, see `Using
Identifiers`_.
You must set this parameter to a valid service role ARN or the action
will fail; there is no default value. You can specify the stack's
current service role ARN, if you prefer, but you must do so
explicitly.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stack's operating system, which must be set to
one of the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`. You specify the custom AMI you want to use
when you create instances.
The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's new host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The stack's default Availability
Zone, which must be in the specified region. For more information,
see `Regions and Endpoints`_. If you also specify a value for
`DefaultSubnetId`, the subnet must be in the same zone. For more
information, see CreateStack.
:type default_subnet_id: string
:param default_subnet_id: The stack's default VPC subnet ID. This
parameter is required if you specify a value for the `VpcId`
parameter. All instances are launched into this subnet unless you
specify otherwise when you create the instance. If you also specify
a value for `DefaultAvailabilityZone`, the subnet must be in that
zone. For information on default values and when this parameter is
required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you clone
a stack we recommend that you use the configuration manager to
specify the Chef version, 0.9, 11.4, or 11.10. The default value is
currently 11.4.
:type chef_configuration: dict
:param chef_configuration: A `ChefConfiguration` object that specifies
whether to enable Berkshelf and the Berkshelf version on Chef 11.10
stacks. For more information, see `Create a New Stack`_.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether the stack uses custom cookbooks.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the stack, but you
can override it when you create an instance. For more information,
see `Storage for the Root Device`_.
:type use_opsworks_security_groups: boolean
:param use_opsworks_security_groups: Whether to associate the AWS
OpsWorks built-in security groups with the stack's layers.
AWS OpsWorks provides a standard set of built-in security groups, one
for each layer, which are associated with layers by default.
`UseOpsworksSecurityGroups` allows you to instead provide your own
custom security groups. `UseOpsworksSecurityGroups` has the
following settings:
+ True - AWS OpsWorks automatically associates the appropriate built-in
security group with each layer (default setting). You can associate
additional security groups with a layer after you create it but you
cannot delete the built-in security group.
+ False - AWS OpsWorks does not associate built-in security groups with
layers. You must create appropriate EC2 security groups and
associate a security group with each layer that you create.
However, you can still manually associate a built-in security group
with a layer on creation; custom security groups are required only
for those layers that need custom settings.
For more information, see `Create a New Stack`_.
"""
params = {'StackId': stack_id, }
if name is not None:
params['Name'] = name
if attributes is not None:
params['Attributes'] = attributes
if service_role_arn is not None:
params['ServiceRoleArn'] = service_role_arn
if default_instance_profile_arn is not None:
params['DefaultInstanceProfileArn'] = default_instance_profile_arn
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if chef_configuration is not None:
params['ChefConfiguration'] = chef_configuration
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
if use_opsworks_security_groups is not None:
params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups
return self.make_request(action='UpdateStack',
body=json.dumps(params))
def update_user_profile(self, iam_user_arn, ssh_username=None,
ssh_public_key=None, allow_self_management=None):
"""
Updates a specified user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user IAM ARN.
:type ssh_username: string
:param ssh_username: The user's SSH user name. The allowable characters
are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name
includes other punctuation marks, AWS OpsWorks removes them. For
example, `my.name` will be changed to `myname`. If you do not
specify an SSH user name, AWS OpsWorks generates one from the IAM
user name.
:type ssh_public_key: string
:param ssh_public_key: The user's new SSH public key.
:type allow_self_management: boolean
:param allow_self_management: Whether users can specify their own SSH
public key through the My Settings page. For more information, see
`Managing User Permissions`_.
"""
params = {'IamUserArn': iam_user_arn, }
if ssh_username is not None:
params['SshUsername'] = ssh_username
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
if allow_self_management is not None:
params['AllowSelfManagement'] = allow_self_management
return self.make_request(action='UpdateUserProfile',
body=json.dumps(params))
def update_volume(self, volume_id, name=None, mount_point=None):
"""
Updates an Amazon EBS volume's name or mount point. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
:type name: string
:param name: The new name.
:type mount_point: string
:param mount_point: The new mount point.
"""
params = {'VolumeId': volume_id, }
if name is not None:
params['Name'] = name
if mount_point is not None:
params['MountPoint'] = mount_point
return self.make_request(action='UpdateVolume',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| apache-2.0 |
RedlineResearch/ardupilot | Tools/LogAnalyzer/tests/TestCompass.py | 66 | 6392 | from LogAnalyzer import Test,TestResult
import DataflashLog
from functools import reduce
import math
class TestCompass(Test):
'''test for compass offsets and throttle interference'''
def __init__(self):
Test.__init__(self)
self.name = "Compass"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
def vec_len(x):
return math.sqrt(x[0]**2+x[1]**2+x[2]**2)
def FAIL():
self.result.status = TestResult.StatusType.FAIL
def WARN():
if self.result.status != TestResult.StatusType.FAIL:
self.result.status = TestResult.StatusType.WARN
try:
warnOffset = 300
failOffset = 500
param_offsets = (
logdata.parameters["COMPASS_OFS_X"],
logdata.parameters["COMPASS_OFS_Y"],
logdata.parameters["COMPASS_OFS_Z"]
)
if vec_len(param_offsets) > failOffset:
FAIL()
self.result.statusMessage = "FAIL: Large compass offset params (X:%.2f, Y:%.2f, Z:%.2f)\n" % (param_offsets[0],param_offsets[1],param_offsets[2])
elif vec_len(param_offsets) > warnOffset:
WARN()
self.result.statusMessage = "WARN: Large compass offset params (X:%.2f, Y:%.2f, Z:%.2f)\n" % (param_offsets[0],param_offsets[1],param_offsets[2])
if "MAG" in logdata.channels:
max_log_offsets = zip(
map(lambda x: x[1],logdata.channels["MAG"]["OfsX"].listData),
map(lambda x: x[1],logdata.channels["MAG"]["OfsY"].listData),
map(lambda x: x[1],logdata.channels["MAG"]["OfsZ"].listData)
)
max_log_offsets = reduce(lambda x,y: x if vec_len(x) > vec_len(y) else y, max_log_offsets)
if vec_len(max_log_offsets) > failOffset:
FAIL()
self.result.statusMessage += "FAIL: Large compass offset in MAG data (X:%.2f, Y:%.2f, Z:%.2f)\n" % (max_log_offsets[0],max_log_offsets[1],max_log_offsets[2])
elif vec_len(max_log_offsets) > warnOffset:
WARN()
self.result.statusMessage += "WARN: Large compass offset in MAG data (X:%.2f, Y:%.2f, Z:%.2f)\n" % (max_log_offsets[0],max_log_offsets[1],max_log_offsets[2])
# check for mag field length change, and length outside of recommended range
if "MAG" in logdata.channels:
percentDiffThresholdWARN = 0.25
percentDiffThresholdFAIL = 0.35
minMagFieldThreshold = 120.0
maxMagFieldThreshold = 550.0
index = 0
length = len(logdata.channels["MAG"]["MagX"].listData)
magField = []
(minMagField, maxMagField) = (None,None)
(minMagFieldLine, maxMagFieldLine) = (None,None)
zerosFound = False
while index<length:
mx = logdata.channels["MAG"]["MagX"].listData[index][1]
my = logdata.channels["MAG"]["MagY"].listData[index][1]
mz = logdata.channels["MAG"]["MagZ"].listData[index][1]
if ((mx==0) and (my==0) and (mz==0)): # sometimes they're zero, not sure why, same reason as why we get NaNs as offsets?
zerosFound = True
else:
mf = math.sqrt(mx*mx + my*my + mz*mz)
magField.append(mf)
if mf<minMagField:
minMagField = mf
minMagFieldLine = logdata.channels["MAG"]["MagX"].listData[index][0]
if mf>maxMagField:
maxMagField = mf
maxMagFieldLine = logdata.channels["MAG"]["MagX"].listData[index][0]
if index == 0:
(minMagField, maxMagField) = (mf,mf)
index += 1
if minMagField is None:
FAIL()
self.result.statusMessage = self.result.statusMessage + "No valid mag data found\n"
else:
percentDiff = (maxMagField-minMagField) / minMagField
if percentDiff > percentDiffThresholdFAIL:
FAIL()
self.result.statusMessage = self.result.statusMessage + "Large change in mag_field (%.2f%%)\n" % (percentDiff*100)
elif percentDiff > percentDiffThresholdWARN:
WARN()
self.result.statusMessage = self.result.statusMessage + "Moderate change in mag_field (%.2f%%)\n" % (percentDiff*100)
else:
self.result.statusMessage = self.result.statusMessage + "mag_field interference within limits (%.2f%%)\n" % (percentDiff*100)
if minMagField < minMagFieldThreshold:
self.result.statusMessage = self.result.statusMessage + "Min mag field length (%.2f) < recommended (%.2f)\n" % (minMagField,minMagFieldThreshold)
if maxMagField > maxMagFieldThreshold:
self.result.statusMessage = self.result.statusMessage + "Max mag field length (%.2f) > recommended (%.2f)\n" % (maxMagField,maxMagFieldThreshold)
if verbose:
self.result.statusMessage = self.result.statusMessage + "Min mag_field of %.2f on line %d\n" % (minMagField,minMagFieldLine)
self.result.statusMessage = self.result.statusMessage + "Max mag_field of %.2f on line %d\n" % (maxMagField,maxMagFieldLine)
if zerosFound:
if self.result.status == TestResult.StatusType.GOOD:
WARN()
self.result.statusMessage = self.result.statusMessage + "All zeros found in MAG X/Y/Z log data\n"
else:
self.result.statusMessage = self.result.statusMessage + "No MAG data, unable to test mag_field interference\n"
except KeyError as e:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = str(e) + ' not found'
| gpl-3.0 |
mitchcapper/mythbox | resources/lib/tvrage/tvrage/feeds.py | 4 | 2734 | # Copyright (c) 2009, Christian Kreutzer
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from util import _fetch
from urllib2 import quote
try:
import xml.etree.cElementTree as et
except ImportError:
import xml.etree.ElementTree as et
BASE_URL = 'http://www.tvrage.com/feeds/%s.php?%s=%s'
def _fetch_xml(url, node=None):
"""fetches the response of a simple xml-based webservice. If node is
omitted the root of the parsed xml doc is returned as an ElementTree object
otherwise the requested node is returned"""
xmldoc = _fetch(url)
result = et.parse(xmldoc)
root = result.getroot()
if not node:
retval = root
else:
retval = root.find(node)
return retval
def search(show, node=None):
return _fetch_xml(BASE_URL % ('search', 'show', quote(show)), node)
def full_search(show, node=None):
return _fetch_xml(BASE_URL % ('full_search', 'show', quote(show)), node)
def showinfo(sid, node=None):
return _fetch_xml(BASE_URL % ('showinfo', 'sid', sid), node)
def episode_list(sid, node=None):
return _fetch_xml(BASE_URL % ('episode_list', 'sid', sid), node)
def full_show_info(sid, node=None):
return _fetch_xml(BASE_URL % ('full_show_info', 'sid', sid), node)
| gpl-2.0 |
vadimtk/chrome4sdp | chrome/test/mini_installer/test_installer.py | 34 | 15601 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script tests the installer with test cases specified in the config file.
For each test case, it checks that the machine states after the execution of
each command match the expected machine states. For more details, take a look at
the design documentation at http://goo.gl/Q0rGM6
"""
import argparse
import datetime
import inspect
import json
import os
import subprocess
import sys
import time
import unittest
import _winreg
from variable_expander import VariableExpander
import verifier_runner
def LogMessage(message):
"""Logs a message to stderr.
Args:
message: The message string to be logged.
"""
now = datetime.datetime.now()
frameinfo = inspect.getframeinfo(inspect.currentframe().f_back)
filename = os.path.basename(frameinfo.filename)
line = frameinfo.lineno
sys.stderr.write('[%s:%s(%s)] %s\n' % (now.strftime('%m%d/%H%M%S'),
filename, line, message))
class Config:
"""Describes the machine states, actions, and test cases.
Attributes:
states: A dictionary where each key is a state name and the associated value
is a property dictionary describing that state.
actions: A dictionary where each key is an action name and the associated
value is the action's command.
tests: An array of test cases.
"""
def __init__(self):
self.states = {}
self.actions = {}
self.tests = []
class InstallerTest(unittest.TestCase):
"""Tests a test case in the config file."""
def __init__(self, name, test, config, variable_expander, quiet):
"""Constructor.
Args:
name: The name of this test.
test: An array of alternating state names and action names, starting and
ending with state names.
config: The Config object.
variable_expander: A VariableExpander object.
"""
super(InstallerTest, self).__init__()
self._name = name
self._test = test
self._config = config
self._variable_expander = variable_expander
self._quiet = quiet
self._verifier_runner = verifier_runner.VerifierRunner()
self._clean_on_teardown = True
def __str__(self):
"""Returns a string representing the test case.
Returns:
A string created by joining state names and action names together with
' -> ', for example, 'Test: clean -> install chrome -> chrome_installed'.
"""
return '%s: %s\n' % (self._name, ' -> '.join(self._test))
def id(self):
"""Returns the name of the test."""
# Overridden from unittest.TestCase so that id() contains the name of the
# test case from the config file in place of the name of this class's test
# function.
return unittest.TestCase.id(self).replace(self._testMethodName, self._name)
def runTest(self):
"""Run the test case."""
# |test| is an array of alternating state names and action names, starting
# and ending with state names. Therefore, its length must be odd.
self.assertEqual(1, len(self._test) % 2,
'The length of test array must be odd')
state = self._test[0]
self._VerifyState(state)
# Starting at index 1, we loop through pairs of (action, state).
for i in range(1, len(self._test), 2):
action = self._test[i]
if not self._quiet:
LogMessage('Beginning action %s' % action)
RunCommand(self._config.actions[action], self._variable_expander)
if not self._quiet:
LogMessage('Finished action %s' % action)
state = self._test[i + 1]
self._VerifyState(state)
# If the test makes it here, it means it was successful, because RunCommand
# and _VerifyState throw an exception on failure.
self._clean_on_teardown = False
def tearDown(self):
"""Cleans up the machine if the test case fails."""
if self._clean_on_teardown:
RunCleanCommand(True, self._variable_expander)
def shortDescription(self):
"""Overridden from unittest.TestCase.
We return None as the short description to suppress its printing.
The default implementation of this method returns the docstring of the
runTest method, which is not useful since it's the same for every test case.
The description from the __str__ method is informative enough.
"""
return None
def _VerifyState(self, state):
"""Verifies that the current machine state matches a given state.
Args:
state: A state name.
"""
if not self._quiet:
LogMessage('Verifying state %s' % state)
try:
self._verifier_runner.VerifyAll(self._config.states[state],
self._variable_expander)
except AssertionError as e:
# If an AssertionError occurs, we intercept it and add the state name
# to the error message so that we know where the test fails.
raise AssertionError("In state '%s', %s" % (state, e))
def RunCommand(command, variable_expander):
"""Runs the given command from the current file's directory.
This function throws an Exception if the command returns with non-zero exit
status.
Args:
command: A command to run. It is expanded using Expand.
variable_expander: A VariableExpander object.
"""
expanded_command = variable_expander.Expand(command)
script_dir = os.path.dirname(os.path.abspath(__file__))
exit_status = subprocess.call(expanded_command, shell=True, cwd=script_dir)
if exit_status != 0:
raise Exception('Command %s returned non-zero exit status %s' % (
expanded_command, exit_status))
def DeleteGoogleUpdateRegistration(system_level, registry_subkey,
variable_expander):
"""Deletes Chrome's registration with Google Update.
Args:
system_level: True if system-level Chrome is to be deleted.
registry_subkey: The pre-expansion registry subkey for the product.
variable_expander: A VariableExpander object.
"""
root = (_winreg.HKEY_LOCAL_MACHINE if system_level
else _winreg.HKEY_CURRENT_USER)
key_name = variable_expander.Expand(registry_subkey)
try:
key_handle = _winreg.OpenKey(root, key_name, 0,
_winreg.KEY_SET_VALUE |
_winreg.KEY_WOW64_32KEY)
_winreg.DeleteValue(key_handle, 'pv')
except WindowsError:
# The key isn't present, so there is no value to delete.
pass
def RunCleanCommand(force_clean, variable_expander):
"""Puts the machine in the clean state (i.e. Chrome not installed).
Args:
force_clean: A boolean indicating whether to force cleaning existing
installations.
variable_expander: A VariableExpander object.
"""
# A list of (system_level, product_name, product_switch, registry_subkey)
# tuples for the possible installed products.
data = [
(False, '$CHROME_LONG_NAME', '',
'$CHROME_UPDATE_REGISTRY_SUBKEY'),
(True, '$CHROME_LONG_NAME', '--system-level',
'$CHROME_UPDATE_REGISTRY_SUBKEY'),
]
if variable_expander.Expand('$SUPPORTS_SXS') == 'True':
data.append((False, '$CHROME_LONG_NAME_SXS', '',
'$CHROME_UPDATE_REGISTRY_SUBKEY_SXS'))
interactive_option = '--interactive' if not force_clean else ''
for system_level, product_name, product_switch, registry_subkey in data:
command = ('python uninstall_chrome.py '
'--chrome-long-name="%s" '
'--no-error-if-absent %s %s' %
(product_name, product_switch, interactive_option))
RunCommand(command, variable_expander)
if force_clean:
DeleteGoogleUpdateRegistration(system_level, registry_subkey,
variable_expander)
def MergePropertyDictionaries(current_property, new_property):
"""Merges the new property dictionary into the current property dictionary.
This is different from general dictionary merging in that, in case there are
keys with the same name, we merge values together in the first level, and we
override earlier values in the second level. For more details, take a look at
http://goo.gl/uE0RoR
Args:
current_property: The property dictionary to be modified.
new_property: The new property dictionary.
"""
for key, value in new_property.iteritems():
if key not in current_property:
current_property[key] = value
else:
assert(isinstance(current_property[key], dict) and
isinstance(value, dict))
# This merges two dictionaries together. In case there are keys with
# the same name, the latter will override the former.
current_property[key] = dict(
current_property[key].items() + value.items())
def FilterConditionalElem(elem, condition_name, variable_expander):
"""Returns True if a conditional element should be processed.
Args:
elem: A dictionary.
condition_name: The name of the condition property in |elem|.
variable_expander: A variable expander used to evaluate conditions.
Returns:
True if |elem| should be processed.
"""
if condition_name not in elem:
return True
condition = variable_expander.Expand(elem[condition_name])
return eval(condition, {'__builtins__': {'False': False, 'True': True}})
def ParsePropertyFiles(directory, filenames, variable_expander):
"""Parses an array of .prop files.
Args:
directory: The directory where the Config file and all Property files
reside in.
filenames: An array of Property filenames.
variable_expander: A variable expander used to evaluate conditions.
Returns:
A property dictionary created by merging all property dictionaries specified
in the array.
"""
current_property = {}
for filename in filenames:
path = os.path.join(directory, filename)
new_property = json.load(open(path))
if not FilterConditionalElem(new_property, 'Condition', variable_expander):
continue
# Remove any Condition from the propery dict before merging since it serves
# no purpose from here on out.
if 'Condition' in new_property:
del new_property['Condition']
MergePropertyDictionaries(current_property, new_property)
return current_property
def ParseConfigFile(filename, variable_expander):
"""Parses a .config file.
Args:
config_filename: A Config filename.
Returns:
A Config object.
"""
with open(filename, 'r') as fp:
config_data = json.load(fp)
directory = os.path.dirname(os.path.abspath(filename))
config = Config()
config.tests = config_data['tests']
# Drop conditional tests that should not be run in the current configuration.
config.tests = filter(lambda t: FilterConditionalElem(t, 'condition',
variable_expander),
config.tests)
for state_name, state_property_filenames in config_data['states']:
config.states[state_name] = ParsePropertyFiles(directory,
state_property_filenames,
variable_expander)
for action_name, action_command in config_data['actions']:
config.actions[action_name] = action_command
return config
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--build-dir', default='out',
help='Path to main build directory (the parent of the '
'Release or Debug directory)')
parser.add_argument('--target', default='Release',
help='Build target (Release or Debug)')
parser.add_argument('--force-clean', action='store_true', default=False,
help='Force cleaning existing installations')
parser.add_argument('-q', '--quiet', action='store_true', default=False,
help='Reduce test runner output')
parser.add_argument('--write-full-results-to', metavar='FILENAME',
help='Path to write the list of full results to.')
parser.add_argument('--config', metavar='FILENAME',
help='Path to test configuration file')
parser.add_argument('test', nargs='*',
help='Name(s) of tests to run.')
args = parser.parse_args()
if not args.config:
parser.error('missing mandatory --config FILENAME argument')
mini_installer_path = os.path.join(args.build_dir, args.target,
'mini_installer.exe')
assert os.path.exists(mini_installer_path), ('Could not find file %s' %
mini_installer_path)
suite = unittest.TestSuite()
# Set the env var used by mini_installer.exe to decide to not show UI.
os.environ['MINI_INSTALLER_TEST'] = '1'
variable_expander = VariableExpander(mini_installer_path)
config = ParseConfigFile(args.config, variable_expander)
RunCleanCommand(args.force_clean, variable_expander)
for test in config.tests:
# If tests were specified via |tests|, their names are formatted like so:
test_name = '%s/%s/%s' % (InstallerTest.__module__,
InstallerTest.__name__,
test['name'])
if not args.test or test_name in args.test:
suite.addTest(InstallerTest(test['name'], test['traversal'], config,
variable_expander, args.quiet))
verbosity = 2 if not args.quiet else 1
result = unittest.TextTestRunner(verbosity=verbosity).run(suite)
if args.write_full_results_to:
with open(args.write_full_results_to, 'w') as fp:
json.dump(_FullResults(suite, result, {}), fp, indent=2)
fp.write('\n')
return 0 if result.wasSuccessful() else 1
# TODO(dpranke): Find a way for this to be shared with the mojo and other tests.
TEST_SEPARATOR = '.'
def _FullResults(suite, result, metadata):
"""Convert the unittest results to the Chromium JSON test result format.
This matches run-webkit-tests (the layout tests) and the flakiness dashboard.
"""
full_results = {}
full_results['interrupted'] = False
full_results['path_delimiter'] = TEST_SEPARATOR
full_results['version'] = 3
full_results['seconds_since_epoch'] = time.time()
for md in metadata:
key, val = md.split('=', 1)
full_results[key] = val
all_test_names = _AllTestNames(suite)
failed_test_names = _FailedTestNames(result)
full_results['num_failures_by_type'] = {
'FAIL': len(failed_test_names),
'PASS': len(all_test_names) - len(failed_test_names),
}
full_results['tests'] = {}
for test_name in all_test_names:
value = {}
value['expected'] = 'PASS'
if test_name in failed_test_names:
value['actual'] = 'FAIL'
value['is_unexpected'] = True
else:
value['actual'] = 'PASS'
_AddPathToTrie(full_results['tests'], test_name, value)
return full_results
def _AllTestNames(suite):
test_names = []
# _tests is protected pylint: disable=W0212
for test in suite._tests:
if isinstance(test, unittest.suite.TestSuite):
test_names.extend(_AllTestNames(test))
else:
test_names.append(test.id())
return test_names
def _FailedTestNames(result):
return set(test.id() for test, _ in result.failures + result.errors)
def _AddPathToTrie(trie, path, value):
if TEST_SEPARATOR not in path:
trie[path] = value
return
directory, rest = path.split(TEST_SEPARATOR, 1)
if directory not in trie:
trie[directory] = {}
_AddPathToTrie(trie[directory], rest, value)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
ozamiatin/glance | glance/db/sqlalchemy/models_artifacts.py | 5 | 12959 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
from sqlalchemy import BigInteger
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy.ext import declarative
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import Numeric
from sqlalchemy.orm import backref
from sqlalchemy.orm import composite
from sqlalchemy.orm import relationship
from sqlalchemy import String
from sqlalchemy import Text
import glance.artifacts as ga
from glance.common import semver_db
from glance import i18n
from oslo_log import log as os_logging
BASE = declarative.declarative_base()
LOG = os_logging.getLogger(__name__)
_LW = i18n._LW
class ArtifactBase(models.ModelBase, models.TimestampMixin):
"""Base class for Artifact Models."""
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
__table_initialized__ = False
__protected_attributes__ = set([
"created_at", "updated_at"])
created_at = Column(DateTime, default=lambda: timeutils.utcnow(),
nullable=False)
updated_at = Column(DateTime, default=lambda: timeutils.utcnow(),
nullable=False, onupdate=lambda: timeutils.utcnow())
def save(self, session=None):
from glance.db.sqlalchemy import api as db_api
super(ArtifactBase, self).save(session or db_api.get_session())
def keys(self):
return self.__dict__.keys()
def values(self):
return self.__dict__.values()
def items(self):
return self.__dict__.items()
def to_dict(self):
d = {}
for c in self.__table__.columns:
d[c.name] = self[c.name]
return d
def _parse_property_type_value(prop, show_text_properties=True):
columns = [
'int_value',
'string_value',
'bool_value',
'numeric_value']
if show_text_properties:
columns.append('text_value')
for prop_type in columns:
if getattr(prop, prop_type) is not None:
return prop_type.rpartition('_')[0], getattr(prop, prop_type)
return None, None
class Artifact(BASE, ArtifactBase):
__tablename__ = 'artifacts'
__table_args__ = (
Index('ix_artifact_name_and_version', 'name', 'version_prefix',
'version_suffix'),
Index('ix_artifact_type', 'type_name', 'type_version_prefix',
'type_version_suffix'),
Index('ix_artifact_state', 'state'),
Index('ix_artifact_owner', 'owner'),
Index('ix_artifact_visibility', 'visibility'),
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'})
__protected_attributes__ = ArtifactBase.__protected_attributes__.union(
set(['published_at', 'deleted_at']))
id = Column(String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
name = Column(String(255), nullable=False)
type_name = Column(String(255), nullable=False)
type_version_prefix = Column(BigInteger, nullable=False)
type_version_suffix = Column(String(255))
type_version_meta = Column(String(255))
type_version = composite(semver_db.DBVersion, type_version_prefix,
type_version_suffix, type_version_meta)
version_prefix = Column(BigInteger, nullable=False)
version_suffix = Column(String(255))
version_meta = Column(String(255))
version = composite(semver_db.DBVersion, version_prefix,
version_suffix, version_meta)
description = Column(Text)
visibility = Column(String(32), nullable=False)
state = Column(String(32), nullable=False)
owner = Column(String(255), nullable=False)
published_at = Column(DateTime)
deleted_at = Column(DateTime)
def to_dict(self, show_level=ga.Showlevel.BASIC,
show_text_properties=True):
d = super(Artifact, self).to_dict()
d.pop('type_version_prefix')
d.pop('type_version_suffix')
d.pop('type_version_meta')
d.pop('version_prefix')
d.pop('version_suffix')
d.pop('version_meta')
d['type_version'] = str(self.type_version)
d['version'] = str(self.version)
tags = []
for tag in self.tags:
tags.append(tag.value)
d['tags'] = tags
if show_level == ga.Showlevel.NONE:
return d
properties = {}
# sort properties
self.properties.sort(key=lambda elem: (elem.name, elem.position))
for prop in self.properties:
proptype, propvalue = _parse_property_type_value(
prop, show_text_properties)
if proptype is None:
continue
if prop.position is not None:
# make array
for p in properties.keys():
if p == prop.name:
# add value to array
properties[p]['value'].append(dict(type=proptype,
value=propvalue))
break
else:
# create new array
p = dict(type='array',
value=[])
p['value'].append(dict(type=proptype,
value=propvalue))
properties[prop.name] = p
else:
# make scalar
properties[prop.name] = dict(type=proptype,
value=propvalue)
d['properties'] = properties
blobs = {}
# sort blobs
self.blobs.sort(key=lambda elem: elem.position)
for blob in self.blobs:
locations = []
# sort locations
blob.locations.sort(key=lambda elem: elem.position)
for loc in blob.locations:
locations.append(dict(value=loc.value,
status=loc.status))
if blob.name in blobs:
blobs[blob.name].append(dict(size=blob.size,
checksum=blob.checksum,
locations=locations,
item_key=blob.item_key))
else:
blobs[blob.name] = []
blobs[blob.name].append(dict(size=blob.size,
checksum=blob.checksum,
locations=locations,
item_key=blob.item_key))
d['blobs'] = blobs
return d
class ArtifactDependency(BASE, ArtifactBase):
__tablename__ = 'artifact_dependencies'
__table_args__ = (Index('ix_artifact_dependencies_source_id',
'artifact_source'),
Index('ix_artifact_dependencies_origin_id',
'artifact_origin'),
Index('ix_artifact_dependencies_dest_id',
'artifact_dest'),
Index('ix_artifact_dependencies_direct_dependencies',
'artifact_source', 'is_direct'),
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'})
id = Column(String(36), primary_key=True, nullable=False,
default=lambda: str(uuid.uuid4()))
artifact_source = Column(String(36), ForeignKey('artifacts.id'),
nullable=False)
artifact_dest = Column(String(36), ForeignKey('artifacts.id'),
nullable=False)
artifact_origin = Column(String(36), ForeignKey('artifacts.id'),
nullable=False)
is_direct = Column(Boolean, nullable=False)
position = Column(Integer)
name = Column(String(36))
source = relationship('Artifact',
backref=backref('dependencies', cascade="all, "
"delete"),
foreign_keys="ArtifactDependency.artifact_source")
dest = relationship('Artifact',
foreign_keys="ArtifactDependency.artifact_dest")
origin = relationship('Artifact',
foreign_keys="ArtifactDependency.artifact_origin")
class ArtifactTag(BASE, ArtifactBase):
__tablename__ = 'artifact_tags'
__table_args__ = (Index('ix_artifact_tags_artifact_id', 'artifact_id'),
Index('ix_artifact_tags_artifact_id_tag_value',
'artifact_id', 'value'),
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},)
id = Column(String(36), primary_key=True, nullable=False,
default=lambda: str(uuid.uuid4()))
artifact_id = Column(String(36), ForeignKey('artifacts.id'),
nullable=False)
artifact = relationship(Artifact,
backref=backref('tags',
cascade="all, delete-orphan"))
value = Column(String(255), nullable=False)
class ArtifactProperty(BASE, ArtifactBase):
__tablename__ = 'artifact_properties'
__table_args__ = (
Index('ix_artifact_properties_artifact_id', 'artifact_id'),
Index('ix_artifact_properties_name', 'name'),
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},)
id = Column(String(36), primary_key=True, nullable=False,
default=lambda: str(uuid.uuid4()))
artifact_id = Column(String(36), ForeignKey('artifacts.id'),
nullable=False)
artifact = relationship(Artifact,
backref=backref('properties',
cascade="all, delete-orphan"))
name = Column(String(255), nullable=False)
string_value = Column(String(255))
int_value = Column(Integer)
numeric_value = Column(Numeric)
bool_value = Column(Boolean)
text_value = Column(Text)
position = Column(Integer)
class ArtifactBlob(BASE, ArtifactBase):
__tablename__ = 'artifact_blobs'
__table_args__ = (
Index('ix_artifact_blobs_artifact_id', 'artifact_id'),
Index('ix_artifact_blobs_name', 'name'),
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},)
id = Column(String(36), primary_key=True, nullable=False,
default=lambda: str(uuid.uuid4()))
artifact_id = Column(String(36), ForeignKey('artifacts.id'),
nullable=False)
name = Column(String(255), nullable=False)
item_key = Column(String(329))
size = Column(BigInteger(), nullable=False)
checksum = Column(String(32))
position = Column(Integer)
artifact = relationship(Artifact,
backref=backref('blobs',
cascade="all, delete-orphan"))
class ArtifactBlobLocation(BASE, ArtifactBase):
__tablename__ = 'artifact_blob_locations'
__table_args__ = (Index('ix_artifact_blob_locations_blob_id',
'blob_id'),
{'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'})
id = Column(String(36), primary_key=True, nullable=False,
default=lambda: str(uuid.uuid4()))
blob_id = Column(String(36), ForeignKey('artifact_blobs.id'),
nullable=False)
value = Column(Text, nullable=False)
position = Column(Integer)
status = Column(String(36), default='active', nullable=True)
blob = relationship(ArtifactBlob,
backref=backref('locations',
cascade="all, delete-orphan"))
def register_models(engine):
"""Create database tables for all models with the given engine."""
models = (Artifact, ArtifactTag, ArtifactProperty,
ArtifactBlob, ArtifactBlobLocation, ArtifactDependency)
for model in models:
model.metadata.create_all(engine)
def unregister_models(engine):
"""Drop database tables for all models with the given engine."""
models = (ArtifactDependency, ArtifactBlobLocation, ArtifactBlob,
ArtifactProperty, ArtifactTag, Artifact)
for model in models:
model.metadata.drop_all(engine)
| apache-2.0 |
mfitzp/padua | setup.py | 1 | 1035 | from setuptools import setup, find_packages
version = '0.1.16'
setup(
name='padua',
version=version,
url='http://github.com/mfitzp/padua',
author='Martin Fitzpatrick',
author_email='martin.fitzpatrick@gmail.com',
description='A Python interface for Proteomic Data Analysis, working with MaxQuant & Perseus outputs',
license='MIT',
packages=find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Desktop Environment',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Widget Sets',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4'
],
install_requires=[
'numpy',
'scipy',
'matplotlib',
'pandas',
'statsmodels',
'matplotlib-venn',
'scikit-learn',
'requests',
'requests_toolbelt',
'adjustText'
]
)
| bsd-2-clause |
shaanlan/youtube-dl | youtube_dl/extractor/funnyordie.py | 64 | 2887 | from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class FunnyOrDieIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|articles|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'
_TESTS = [{
'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
'md5': 'bcd81e0c4f26189ee09be362ad6e6ba9',
'info_dict': {
'id': '0732f586d7',
'ext': 'mp4',
'title': 'Heart-Shaped Box: Literal Video Version',
'description': 'md5:ea09a01bc9a1c46d9ab696c01747c338',
'thumbnail': 're:^http:.*\.jpg$',
},
}, {
'url': 'http://www.funnyordie.com/embed/e402820827',
'info_dict': {
'id': 'e402820827',
'ext': 'mp4',
'title': 'Please Use This Song (Jon Lajoie)',
'description': 'Please use this to sell something. www.jonlajoie.com',
'thumbnail': 're:^http:.*\.jpg$',
},
}, {
'url': 'http://www.funnyordie.com/articles/ebf5e34fc8/10-hours-of-walking-in-nyc-as-a-man',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
links = re.findall(r'<source src="([^"]+/v)[^"]+\.([^"]+)" type=\'video', webpage)
if not links:
raise ExtractorError('No media links available for %s' % video_id)
links.sort(key=lambda link: 1 if link[1] == 'mp4' else 0)
bitrates = self._html_search_regex(r'<source src="[^"]+/v,((?:\d+,)+)\.mp4\.csmil', webpage, 'video bitrates')
bitrates = [int(b) for b in bitrates.rstrip(',').split(',')]
bitrates.sort()
formats = []
for bitrate in bitrates:
for link in links:
formats.append({
'url': self._proto_relative_url('%s%d.%s' % (link[0], bitrate, link[1])),
'format_id': '%s-%d' % (link[1], bitrate),
'vbr': bitrate,
})
subtitles = {}
for src, src_lang in re.findall(r'<track kind="captions" src="([^"]+)" srclang="([^"]+)"', webpage):
subtitles[src_lang] = [{
'ext': src.split('/')[-1],
'url': 'http://www.funnyordie.com%s' % src,
}]
post_json = self._search_regex(
r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details')
post = json.loads(post_json)
return {
'id': video_id,
'title': post['name'],
'description': post.get('description'),
'thumbnail': post.get('picture'),
'formats': formats,
'subtitles': subtitles,
}
| unlicense |
PlayUAV/MissionPlanner | Lib/distutils/command/sdist.py | 42 | 18344 | """distutils.command.sdist
Implements the Distutils 'sdist' command (create a source distribution)."""
__revision__ = "$Id$"
import os
import string
import sys
from glob import glob
from warnings import warn
from distutils.core import Command
from distutils import dir_util, dep_util, file_util, archive_util
from distutils.text_file import TextFile
from distutils.errors import (DistutilsPlatformError, DistutilsOptionError,
DistutilsTemplateError)
from distutils.filelist import FileList
from distutils import log
from distutils.util import convert_path
def show_formats():
"""Print all possible values for the 'formats' option (used by
the "--help-formats" command-line option).
"""
from distutils.fancy_getopt import FancyGetopt
from distutils.archive_util import ARCHIVE_FORMATS
formats = []
for format in ARCHIVE_FORMATS.keys():
formats.append(("formats=" + format, None,
ARCHIVE_FORMATS[format][2]))
formats.sort()
FancyGetopt(formats).print_help(
"List of available source distribution formats:")
class sdist(Command):
description = "create a source distribution (tarball, zip file, etc.)"
def checking_metadata(self):
"""Callable used for the check sub-command.
Placed here so user_options can view it"""
return self.metadata_check
user_options = [
('template=', 't',
"name of manifest template file [default: MANIFEST.in]"),
('manifest=', 'm',
"name of manifest file [default: MANIFEST]"),
('use-defaults', None,
"include the default file set in the manifest "
"[default; disable with --no-defaults]"),
('no-defaults', None,
"don't include the default file set"),
('prune', None,
"specifically exclude files/directories that should not be "
"distributed (build tree, RCS/CVS dirs, etc.) "
"[default; disable with --no-prune]"),
('no-prune', None,
"don't automatically exclude anything"),
('manifest-only', 'o',
"just regenerate the manifest and then stop "
"(implies --force-manifest)"),
('force-manifest', 'f',
"forcibly regenerate the manifest and carry on as usual. "
"Deprecated: now the manifest is always regenerated."),
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
('metadata-check', None,
"Ensure that all required elements of meta-data "
"are supplied. Warn if any missing. [default]"),
('owner=', 'u',
"Owner name used when creating a tar file [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file [default: current group]"),
]
boolean_options = ['use-defaults', 'prune',
'manifest-only', 'force-manifest',
'keep-temp', 'metadata-check']
help_options = [
('help-formats', None,
"list available distribution formats", show_formats),
]
negative_opt = {'no-defaults': 'use-defaults',
'no-prune': 'prune' }
default_format = {'posix': 'gztar',
'nt': 'zip' }
sub_commands = [('check', checking_metadata)]
def initialize_options(self):
# 'template' and 'manifest' are, respectively, the names of
# the manifest template and manifest file.
self.template = None
self.manifest = None
# 'use_defaults': if true, we will include the default file set
# in the manifest
self.use_defaults = 1
self.prune = 1
self.manifest_only = 0
self.force_manifest = 0
self.formats = None
self.keep_temp = 0
self.dist_dir = None
self.archive_files = None
self.metadata_check = 1
self.owner = None
self.group = None
def finalize_options(self):
if self.manifest is None:
self.manifest = "MANIFEST"
if self.template is None:
self.template = "MANIFEST.in"
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError, \
"don't know how to create source distributions " + \
"on platform %s" % os.name
bad_format = archive_util.check_archive_formats(self.formats)
if bad_format:
raise DistutilsOptionError, \
"unknown archive format '%s'" % bad_format
if self.dist_dir is None:
self.dist_dir = "dist"
def run(self):
# 'filelist' contains the list of files that will make up the
# manifest
self.filelist = FileList()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# Do whatever it takes to get the list of files to process
# (process the manifest template, read an existing manifest,
# whatever). File list is accumulated in 'self.filelist'.
self.get_file_list()
# If user just wanted us to regenerate the manifest, stop now.
if self.manifest_only:
return
# Otherwise, go ahead and create the source distribution tarball,
# or zipfile, or whatever.
self.make_distribution()
def check_metadata(self):
"""Deprecated API."""
warn("distutils.command.sdist.check_metadata is deprecated, \
use the check command instead", PendingDeprecationWarning)
check = self.distribution.get_command_obj('check')
check.ensure_finalized()
check.run()
def get_file_list(self):
"""Figure out the list of files to include in the source
distribution, and put it in 'self.filelist'. This might involve
reading the manifest template (and writing the manifest), or just
reading the manifest, or just using the default file set -- it all
depends on the user's options.
"""
# new behavior:
# the file list is recalculated everytime because
# even if MANIFEST.in or setup.py are not changed
# the user might have added some files in the tree that
# need to be included.
#
# This makes --force the default and only behavior.
template_exists = os.path.isfile(self.template)
if not template_exists:
self.warn(("manifest template '%s' does not exist " +
"(using default file list)") %
self.template)
self.filelist.findall()
if self.use_defaults:
self.add_defaults()
if template_exists:
self.read_template()
if self.prune:
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
standards = [('README', 'README.txt'), self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = 0
for fn in alts:
if os.path.exists(fn):
got_it = 1
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
string.join(alts, ', '))
else:
if os.path.exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
if files:
self.filelist.extend(files)
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str): # plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else: # a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
def read_template(self):
"""Read and parse manifest template file named by self.template.
(usually "MANIFEST.in") The parsing and processing is done by
'self.filelist', which updates itself accordingly.
"""
log.info("reading manifest template '%s'", self.template)
template = TextFile(self.template,
strip_comments=1,
skip_blanks=1,
join_lines=1,
lstrip_ws=1,
rstrip_ws=1,
collapse_join=1)
while 1:
line = template.readline()
if line is None: # end of file
break
try:
self.filelist.process_template_line(line)
except DistutilsTemplateError, msg:
self.warn("%s, line %d: %s" % (template.filename,
template.current_line,
msg))
def prune_file_list(self):
"""Prune off branches that might slip into the file list as created
by 'read_template()', but really don't belong there:
* the build tree (typically "build")
* the release tree itself (only an issue if we ran "sdist"
previously with --keep-temp, or it aborted)
* any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories
"""
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
# pruning out vcs directories
# both separators are used under win32
if sys.platform == 'win32':
seps = r'/|\\'
else:
seps = '/'
vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr',
'_darcs']
vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps)
self.filelist.exclude_pattern(vcs_ptrn, is_regex=1)
def write_manifest(self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
if os.path.isfile(self.manifest):
fp = open(self.manifest)
try:
first_line = fp.readline()
finally:
fp.close()
if first_line != '# file GENERATED by distutils, do NOT edit\n':
log.info("not writing to manually maintained "
"manifest file '%s'" % self.manifest)
return
content = self.filelist.files[:]
content.insert(0, '# file GENERATED by distutils, do NOT edit')
self.execute(file_util.write_file, (self.manifest, content),
"writing manifest file '%s'" % self.manifest)
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest)
while 1:
line = manifest.readline()
if line == '': # end of file
break
if line[-1] == '\n':
line = line[0:-1]
self.filelist.append(line)
manifest.close()
def make_release_tree(self, base_dir, files):
"""Create the directory tree that will become the source
distribution archive. All directories implied by the filenames in
'files' are created under 'base_dir', and then we hard link or copy
(if hard linking is unavailable) those files into place.
Essentially, this duplicates the developer's source tree, but in a
directory named after the distribution, containing only the files
to be distributed.
"""
# Create all the directories under 'base_dir' necessary to
# put 'files' there; the 'mkpath()' is just so we don't die
# if the manifest happens to be empty.
self.mkpath(base_dir)
dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
# And walk over the list of files, either making a hard link (if
# os.link exists) to each one that doesn't already exist in its
# corresponding location under 'base_dir', or copying each file
# that's out-of-date in 'base_dir'. (Usually, all files will be
# out-of-date, because by default we blow away 'base_dir' when
# we're done making the distribution archives.)
if hasattr(os, 'link'): # can make hard links on this system
link = 'hard'
msg = "making hard links in %s..." % base_dir
else: # nope, have to copy
link = None
msg = "copying files to %s..." % base_dir
if not files:
log.warn("no files to distribute -- empty manifest?")
else:
log.info(msg)
for file in files:
if not os.path.isfile(file):
log.warn("'%s' not a regular file -- skipping" % file)
else:
dest = os.path.join(base_dir, file)
self.copy_file(file, dest, link=link)
self.distribution.metadata.write_pkg_info(base_dir)
def make_distribution(self):
"""Create the source distribution(s). First, we create the release
tree with 'make_release_tree()'; then, we create all required
archive files (according to 'self.formats') from the release tree.
Finally, we clean up by blowing away the release tree (unless
'self.keep_temp' is true). The list of archive files created is
stored so it can be retrieved later by 'get_archive_files()'.
"""
# Don't warn about missing meta-data here -- should be (and is!)
# done elsewhere.
base_dir = self.distribution.get_fullname()
base_name = os.path.join(self.dist_dir, base_dir)
self.make_release_tree(base_dir, self.filelist.files)
archive_files = [] # remember names of files we create
# tar archive must be created last to avoid overwrite and remove
if 'tar' in self.formats:
self.formats.append(self.formats.pop(self.formats.index('tar')))
for fmt in self.formats:
file = self.make_archive(base_name, fmt, base_dir=base_dir,
owner=self.owner, group=self.group)
archive_files.append(file)
self.distribution.dist_files.append(('sdist', '', file))
self.archive_files = archive_files
if not self.keep_temp:
dir_util.remove_tree(base_dir, dry_run=self.dry_run)
def get_archive_files(self):
"""Return the list of archive files created when the command
was run, or None if the command hasn't run yet.
"""
return self.archive_files
| gpl-3.0 |
Chetox/RCode | Cannon_Basico/test/join_tests.py | 2 | 5402 | # -*- mode:python; coding:utf-8; tab-width:4 -*-
from unittest import TestCase
from hamcrest import assert_that, is_
from matrix_utils import matrix_join
from common import M2, M3, M4, M6, M8
class MatrixJoinTests(TestCase):
def test_join_4_2x2_blocks_in_4x4_matrix(self):
# given
A0 = M2(1, 2,
5, 6)
A1 = M2(3, 4,
7, 8)
A2 = M2(9, 10,
13, 14)
A3 = M2(11, 12,
15, 16)
blocks = (A0, A1, A2, A3)
# when
actual = matrix_join(blocks)
# then
expected = M4(1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16)
assert_that(actual, is_(expected))
def test_join_9_2x2_blocks_in_6x6_matrix(self):
# given
A0 = M2(1, 2,
7, 8)
A1 = M2(3, 4,
9, 10)
A2 = M2(5, 6,
11, 12)
A3 = M2(13, 14,
19, 20)
A4 = M2(15, 16,
21, 22)
A5 = M2(17, 18,
23, 24)
A6 = M2(25, 26,
31, 32)
A7 = M2(27, 28,
33, 34)
A8 = M2(29, 30,
35, 36)
blocks = (A0, A1, A2,A3, A4, A5, A6, A7, A8)
# when
actual = matrix_join(blocks)
# then
expected = M6(1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36)
assert_that(actual, is_(expected))
def test_join_4_3x3_blocks_in_6x6_matrix(self):
# given
A0 = M3(1, 2, 3,
7, 8, 9,
13, 14, 15)
A1 = M3(4, 5, 6,
10, 11, 12,
16, 17, 18)
A2 = M3(19, 20, 21,
25, 26, 27,
31, 32, 33)
A3 = M3(22, 23, 24,
28, 29, 30,
34, 35, 36)
blocks = (A0, A1, A2, A3)
# when
actual = matrix_join(blocks)
# then
expected = M6(1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36)
assert_that(actual, is_(expected))
def test_join_4_4x4_blocks_in_8x8_matrix(self):
# given
A0 = M4(1, 2, 3, 4,
9, 10, 11, 12,
17, 18, 19, 20,
25, 26, 27, 28)
A1 = M4(5, 6, 7, 8,
13, 14, 15, 16,
21, 22, 23, 24,
29, 30, 31, 32)
A2 = M4(33, 34, 35, 36,
41, 42, 43, 44,
49, 50, 51, 52,
57, 58, 59, 60)
A3 = M4(37, 38, 39, 40,
45, 46, 47, 48,
53, 54, 55, 56,
61, 62, 63, 64)
blocks = (A0, A1, A2, A3)
# when
actual = matrix_join(blocks)
# then
expected = M8(1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56,
57, 58, 59, 60, 61, 62, 63, 64)
assert_that(actual, is_(expected))
def test_join_16_2x2_blocks_in_8x8_matrix(self):
# given
A0 = M2(1, 2,
9, 10)
A1 = M2(3, 4,
11, 12)
A2 = M2(5, 6,
13, 14)
A3 = M2(7, 8,
15, 16)
A4 = M2(17, 18,
25, 26)
A5 = M2(19, 20,
27, 28)
A6 = M2(21, 22,
29, 30)
A7 = M2(23, 24,
31, 32)
A8 = M2(33, 34,
41, 42)
A9 = M2(35, 36,
43, 44)
A10 = M2(37, 38,
45, 46)
A11 = M2(39, 40,
47, 48)
A12 = M2(49, 50,
57, 58)
A13 = M2(51, 52,
59, 60)
A14 = M2(53, 54,
61, 62)
A15 = M2(55, 56,
63, 64)
blocks = (A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15)
# when
actual = matrix_join(blocks)
# then
expected = M8(1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56,
57, 58, 59, 60, 61, 62, 63, 64)
assert_that(actual, is_(expected))
| apache-2.0 |
solymosin/maps2winbugs | plugin/xdist.py | 1 | 1633 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
maps2WinBUGS
A QGIS plugin a tool to facilitate data processing for Bayesian spatial modeling
-------------------
begin : 2015-07-31
git sha : $Format:%H$
copyright : (C) 2015 by Norbert Solymosi
email : solymosi.norbert@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtWidgets import QDialog
from .xdist_dialog import Ui_xDist
class Dialog(QDialog, Ui_xDist):
def __init__(self):
"""Constructor for the dialog.
"""
QDialog.__init__(self)
self.setupUi(self)
self.pushCancel.clicked.connect(self.reject)
self.pushOK.clicked.connect(self.accept)
| gpl-2.0 |
highweb-project/highweb-webcl-html5spec | third_party/google_appengine_cloudstorage/cloudstorage/common.py | 120 | 11732 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Helpers shared by cloudstorage_stub and cloudstorage_api."""
__all__ = ['CS_XML_NS',
'CSFileStat',
'dt_str_to_posix',
'local_api_url',
'LOCAL_GCS_ENDPOINT',
'local_run',
'get_access_token',
'get_metadata',
'GCSFileStat',
'http_time_to_posix',
'memory_usage',
'posix_time_to_http',
'posix_to_dt_str',
'set_access_token',
'validate_options',
'validate_bucket_name',
'validate_bucket_path',
'validate_file_path',
]
import calendar
import datetime
from email import utils as email_utils
import logging
import os
import re
try:
from google.appengine.api import runtime
except ImportError:
from google.appengine.api import runtime
_GCS_BUCKET_REGEX_BASE = r'[a-z0-9\.\-_]{3,63}'
_GCS_BUCKET_REGEX = re.compile(_GCS_BUCKET_REGEX_BASE + r'$')
_GCS_BUCKET_PATH_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'$')
_GCS_PATH_PREFIX_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'.*')
_GCS_FULLPATH_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'/.*')
_GCS_METADATA = ['x-goog-meta-',
'content-disposition',
'cache-control',
'content-encoding']
_GCS_OPTIONS = _GCS_METADATA + ['x-goog-acl']
CS_XML_NS = 'http://doc.s3.amazonaws.com/2006-03-01'
LOCAL_GCS_ENDPOINT = '/_ah/gcs'
_access_token = ''
_MAX_GET_BUCKET_RESULT = 1000
def set_access_token(access_token):
"""Set the shared access token to authenticate with Google Cloud Storage.
When set, the library will always attempt to communicate with the
real Google Cloud Storage with this token even when running on dev appserver.
Note the token could expire so it's up to you to renew it.
When absent, the library will automatically request and refresh a token
on appserver, or when on dev appserver, talk to a Google Cloud Storage
stub.
Args:
access_token: you can get one by run 'gsutil -d ls' and copy the
str after 'Bearer'.
"""
global _access_token
_access_token = access_token
def get_access_token():
"""Returns the shared access token."""
return _access_token
class GCSFileStat(object):
"""Container for GCS file stat."""
def __init__(self,
filename,
st_size,
etag,
st_ctime,
content_type=None,
metadata=None,
is_dir=False):
"""Initialize.
For files, the non optional arguments are always set.
For directories, only filename and is_dir is set.
Args:
filename: a Google Cloud Storage filename of form '/bucket/filename'.
st_size: file size in bytes. long compatible.
etag: hex digest of the md5 hash of the file's content. str.
st_ctime: posix file creation time. float compatible.
content_type: content type. str.
metadata: a str->str dict of user specified options when creating
the file. Possible keys are x-goog-meta-, content-disposition,
content-encoding, and cache-control.
is_dir: True if this represents a directory. False if this is a real file.
"""
self.filename = filename
self.is_dir = is_dir
self.st_size = None
self.st_ctime = None
self.etag = None
self.content_type = content_type
self.metadata = metadata
if not is_dir:
self.st_size = long(st_size)
self.st_ctime = float(st_ctime)
if etag[0] == '"' and etag[-1] == '"':
etag = etag[1:-1]
self.etag = etag
def __repr__(self):
if self.is_dir:
return '(directory: %s)' % self.filename
return (
'(filename: %(filename)s, st_size: %(st_size)s, '
'st_ctime: %(st_ctime)s, etag: %(etag)s, '
'content_type: %(content_type)s, '
'metadata: %(metadata)s)' %
dict(filename=self.filename,
st_size=self.st_size,
st_ctime=self.st_ctime,
etag=self.etag,
content_type=self.content_type,
metadata=self.metadata))
def __cmp__(self, other):
if not isinstance(other, self.__class__):
raise ValueError('Argument to cmp must have the same type. '
'Expect %s, got %s', self.__class__.__name__,
other.__class__.__name__)
if self.filename > other.filename:
return 1
elif self.filename < other.filename:
return -1
return 0
def __hash__(self):
if self.etag:
return hash(self.etag)
return hash(self.filename)
CSFileStat = GCSFileStat
def get_metadata(headers):
"""Get user defined options from HTTP response headers."""
return dict((k, v) for k, v in headers.iteritems()
if any(k.lower().startswith(valid) for valid in _GCS_METADATA))
def validate_bucket_name(name):
"""Validate a Google Storage bucket name.
Args:
name: a Google Storage bucket name with no prefix or suffix.
Raises:
ValueError: if name is invalid.
"""
_validate_path(name)
if not _GCS_BUCKET_REGEX.match(name):
raise ValueError('Bucket should be 3-63 characters long using only a-z,'
'0-9, underscore, dash or dot but got %s' % name)
def validate_bucket_path(path):
"""Validate a Google Cloud Storage bucket path.
Args:
path: a Google Storage bucket path. It should have form '/bucket'.
Raises:
ValueError: if path is invalid.
"""
_validate_path(path)
if not _GCS_BUCKET_PATH_REGEX.match(path):
raise ValueError('Bucket should have format /bucket '
'but got %s' % path)
def validate_file_path(path):
"""Validate a Google Cloud Storage file path.
Args:
path: a Google Storage file path. It should have form '/bucket/filename'.
Raises:
ValueError: if path is invalid.
"""
_validate_path(path)
if not _GCS_FULLPATH_REGEX.match(path):
raise ValueError('Path should have format /bucket/filename '
'but got %s' % path)
def _process_path_prefix(path_prefix):
"""Validate and process a Google Cloud Stoarge path prefix.
Args:
path_prefix: a Google Cloud Storage path prefix of format '/bucket/prefix'
or '/bucket/' or '/bucket'.
Raises:
ValueError: if path is invalid.
Returns:
a tuple of /bucket and prefix. prefix can be None.
"""
_validate_path(path_prefix)
if not _GCS_PATH_PREFIX_REGEX.match(path_prefix):
raise ValueError('Path prefix should have format /bucket, /bucket/, '
'or /bucket/prefix but got %s.' % path_prefix)
bucket_name_end = path_prefix.find('/', 1)
bucket = path_prefix
prefix = None
if bucket_name_end != -1:
bucket = path_prefix[:bucket_name_end]
prefix = path_prefix[bucket_name_end + 1:] or None
return bucket, prefix
def _validate_path(path):
"""Basic validation of Google Storage paths.
Args:
path: a Google Storage path. It should have form '/bucket/filename'
or '/bucket'.
Raises:
ValueError: if path is invalid.
TypeError: if path is not of type basestring.
"""
if not path:
raise ValueError('Path is empty')
if not isinstance(path, basestring):
raise TypeError('Path should be a string but is %s (%s).' %
(path.__class__, path))
def validate_options(options):
"""Validate Google Cloud Storage options.
Args:
options: a str->basestring dict of options to pass to Google Cloud Storage.
Raises:
ValueError: if option is not supported.
TypeError: if option is not of type str or value of an option
is not of type basestring.
"""
if not options:
return
for k, v in options.iteritems():
if not isinstance(k, str):
raise TypeError('option %r should be a str.' % k)
if not any(k.lower().startswith(valid) for valid in _GCS_OPTIONS):
raise ValueError('option %s is not supported.' % k)
if not isinstance(v, basestring):
raise TypeError('value %r for option %s should be of type basestring.' %
(v, k))
def http_time_to_posix(http_time):
"""Convert HTTP time format to posix time.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1
for http time format.
Args:
http_time: time in RFC 2616 format. e.g.
"Mon, 20 Nov 1995 19:12:08 GMT".
Returns:
A float of secs from unix epoch.
"""
if http_time is not None:
return email_utils.mktime_tz(email_utils.parsedate_tz(http_time))
def posix_time_to_http(posix_time):
"""Convert posix time to HTML header time format.
Args:
posix_time: unix time.
Returns:
A datatime str in RFC 2616 format.
"""
if posix_time:
return email_utils.formatdate(posix_time, usegmt=True)
_DT_FORMAT = '%Y-%m-%dT%H:%M:%S'
def dt_str_to_posix(dt_str):
"""format str to posix.
datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ,
e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator
between date and time when they are on the same line.
Z indicates UTC (zero meridian).
A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html
This is used to parse LastModified node from GCS's GET bucket XML response.
Args:
dt_str: A datetime str.
Returns:
A float of secs from unix epoch. By posix definition, epoch is midnight
1970/1/1 UTC.
"""
parsable, _ = dt_str.split('.')
dt = datetime.datetime.strptime(parsable, _DT_FORMAT)
return calendar.timegm(dt.utctimetuple())
def posix_to_dt_str(posix):
"""Reverse of str_to_datetime.
This is used by GCS stub to generate GET bucket XML response.
Args:
posix: A float of secs from unix epoch.
Returns:
A datetime str.
"""
dt = datetime.datetime.utcfromtimestamp(posix)
dt_str = dt.strftime(_DT_FORMAT)
return dt_str + '.000Z'
def local_run():
"""Whether we should hit GCS dev appserver stub."""
server_software = os.environ.get('SERVER_SOFTWARE')
if server_software is None:
return True
if 'remote_api' in server_software:
return False
if server_software.startswith(('Development', 'testutil')):
return True
return False
def local_api_url():
"""Return URL for GCS emulation on dev appserver."""
return 'http://%s%s' % (os.environ.get('HTTP_HOST'), LOCAL_GCS_ENDPOINT)
def memory_usage(method):
"""Log memory usage before and after a method."""
def wrapper(*args, **kwargs):
logging.info('Memory before method %s is %s.',
method.__name__, runtime.memory_usage().current())
result = method(*args, **kwargs)
logging.info('Memory after method %s is %s',
method.__name__, runtime.memory_usage().current())
return result
return wrapper
def _add_ns(tagname):
return '{%(ns)s}%(tag)s' % {'ns': CS_XML_NS,
'tag': tagname}
_T_CONTENTS = _add_ns('Contents')
_T_LAST_MODIFIED = _add_ns('LastModified')
_T_ETAG = _add_ns('ETag')
_T_KEY = _add_ns('Key')
_T_SIZE = _add_ns('Size')
_T_PREFIX = _add_ns('Prefix')
_T_COMMON_PREFIXES = _add_ns('CommonPrefixes')
_T_NEXT_MARKER = _add_ns('NextMarker')
_T_IS_TRUNCATED = _add_ns('IsTruncated')
| bsd-3-clause |
whbrewer/spc | src/gluino/utils.py | 4 | 10460 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This file specifically includes utilities for security.
"""
import threading
import struct
import hashlib
import hmac
import uuid
import random
import time
import os
import re
import sys
import logging
import socket
import base64
import zlib
python_version = sys.version_info[0]
if python_version == 2:
import cPickle as pickle
else:
import pickle
try:
from Crypto.Cipher import AES
except ImportError:
import contrib.aes as AES
try:
from contrib.pbkdf2 import pbkdf2_hex
HAVE_PBKDF2 = True
except ImportError:
try:
from .pbkdf2 import pbkdf2_hex
HAVE_PBKDF2 = True
except (ImportError, ValueError):
HAVE_PBKDF2 = False
logger = logging.getLogger("web2py")
def AES_new(key, IV=None):
""" Returns an AES cipher object and random IV if None specified """
if IV is None:
IV = fast_urandom16()
return AES.new(key, AES.MODE_CBC, IV), IV
def compare(a, b):
""" compares two strings and not vulnerable to timing attacks """
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def md5_hash(text):
""" Generate a md5 hash with the given text """
return hashlib.md5(text).hexdigest()
def simple_hash(text, key='', salt='', digest_alg='md5'):
"""
Generates hash with the given text using the specified
digest hashing algorithm
"""
if not digest_alg:
raise RuntimeError("simple_hash with digest_alg=None")
elif not isinstance(digest_alg, str): # manual approach
h = digest_alg(text + key + salt)
elif digest_alg.startswith('pbkdf2'): # latest and coolest!
iterations, keylen, alg = digest_alg[7:-1].split(',')
return pbkdf2_hex(text, salt, int(iterations),
int(keylen), get_digest(alg))
elif key: # use hmac
digest_alg = get_digest(digest_alg)
h = hmac.new(key + salt, text, digest_alg)
else: # compatible with third party systems
h = hashlib.new(digest_alg)
h.update(text + salt)
return h.hexdigest()
def get_digest(value):
"""
Returns a hashlib digest algorithm from a string
"""
if not isinstance(value, str):
return value
value = value.lower()
if value == "md5":
return hashlib.md5
elif value == "sha1":
return hashlib.sha1
elif value == "sha224":
return hashlib.sha224
elif value == "sha256":
return hashlib.sha256
elif value == "sha384":
return hashlib.sha384
elif value == "sha512":
return hashlib.sha512
else:
raise ValueError("Invalid digest algorithm: %s" % value)
DIGEST_ALG_BY_SIZE = {
128 / 4: 'md5',
160 / 4: 'sha1',
224 / 4: 'sha224',
256 / 4: 'sha256',
384 / 4: 'sha384',
512 / 4: 'sha512',
}
def pad(s, n=32, padchar=' '):
return s + (32 - len(s) % 32) * padchar
def secure_dumps(data, encryption_key, hash_key=None, compression_level=None):
if not hash_key:
hash_key = hashlib.sha1(encryption_key).hexdigest()
dump = pickle.dumps(data)
if compression_level:
dump = zlib.compress(dump, compression_level)
key = pad(encryption_key[:32])
cipher, IV = AES_new(key)
encrypted_data = base64.urlsafe_b64encode(IV + cipher.encrypt(pad(dump)))
signature = hmac.new(hash_key, encrypted_data).hexdigest()
return signature + ':' + encrypted_data
def secure_loads(data, encryption_key, hash_key=None, compression_level=None):
if not ':' in data:
return None
if not hash_key:
hash_key = hashlib.sha1(encryption_key).hexdigest()
signature, encrypted_data = data.split(':', 1)
actual_signature = hmac.new(hash_key, encrypted_data).hexdigest()
if not compare(signature, actual_signature):
return None
key = pad(encryption_key[:32])
encrypted_data = base64.urlsafe_b64decode(encrypted_data)
IV, encrypted_data = encrypted_data[:16], encrypted_data[16:]
cipher, _ = AES_new(key, IV=IV)
try:
data = cipher.decrypt(encrypted_data)
data = data.rstrip(' ')
if compression_level:
data = zlib.decompress(data)
return pickle.loads(data)
except (TypeError, pickle.UnpicklingError):
return None
### compute constant CTOKENS
def initialize_urandom():
"""
This function and the web2py_uuid follow from the following discussion:
http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09
At startup web2py compute a unique ID that identifies the machine by adding
uuid.getnode() + int(time.time() * 1e3)
This is a 48-bit number. It converts the number into 16 8-bit tokens.
It uses this value to initialize the entropy source ('/dev/urandom') and to seed random.
If os.random() is not supported, it falls back to using random and issues a warning.
"""
node_id = uuid.getnode()
microseconds = int(time.time() * 1e6)
ctokens = [((node_id + microseconds) >> ((i % 6) * 8)) %
256 for i in range(16)]
random.seed(node_id + microseconds)
try:
os.urandom(1)
have_urandom = True
try:
# try to add process-specific entropy
frandom = open('/dev/urandom', 'wb')
try:
if python_version == 2:
frandom.write(''.join(chr(t) for t in ctokens)) # python 2
else:
frandom.write(bytes([]).join(bytes([t]) for t in ctokens)) # python 3
finally:
frandom.close()
except IOError:
# works anyway
pass
except NotImplementedError:
have_urandom = False
logger.warning(
"""Cryptographically secure session management is not possible on your system because
your system does not provide a cryptographically secure entropy source.
This is not specific to web2py; consider deploying on a different operating system.""")
if python_version == 2:
packed = ''.join(chr(x) for x in ctokens) # python 2
else:
packed = bytes([]).join(bytes([x]) for x in ctokens) # python 3
unpacked_ctokens = struct.unpack('=QQ', packed)
return unpacked_ctokens, have_urandom
UNPACKED_CTOKENS, HAVE_URANDOM = initialize_urandom()
def fast_urandom16(urandom=[], locker=threading.RLock()):
"""
this is 4x faster than calling os.urandom(16) and prevents
the "too many files open" issue with concurrent access to os.urandom()
"""
try:
return urandom.pop()
except IndexError:
try:
locker.acquire()
ur = os.urandom(16 * 1024)
urandom += [ur[i:i + 16] for i in xrange(16, 1024 * 16, 16)]
return ur[0:16]
finally:
locker.release()
def web2py_uuid(ctokens=UNPACKED_CTOKENS):
"""
This function follows from the following discussion:
http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09
It works like uuid.uuid4 except that tries to use os.urandom() if possible
and it XORs the output with the tokens uniquely associated with this machine.
"""
rand_longs = (random.getrandbits(64), random.getrandbits(64))
if HAVE_URANDOM:
urand_longs = struct.unpack('=QQ', fast_urandom16())
byte_s = struct.pack('=QQ',
rand_longs[0] ^ urand_longs[0] ^ ctokens[0],
rand_longs[1] ^ urand_longs[1] ^ ctokens[1])
else:
byte_s = struct.pack('=QQ',
rand_longs[0] ^ ctokens[0],
rand_longs[1] ^ ctokens[1])
return str(uuid.UUID(bytes=byte_s, version=4))
REGEX_IPv4 = re.compile('(\d+)\.(\d+)\.(\d+)\.(\d+)')
def is_valid_ip_address(address):
"""
>>> is_valid_ip_address('127.0')
False
>>> is_valid_ip_address('127.0.0.1')
True
>>> is_valid_ip_address('2001:660::1')
True
"""
# deal with special cases
if address.lower() in ('127.0.0.1', 'localhost', '::1', '::ffff:127.0.0.1'):
return True
elif address.lower() in ('unknown', ''):
return False
elif address.count('.') == 3: # assume IPv4
if address.startswith('::ffff:'):
address = address[7:]
if hasattr(socket, 'inet_aton'): # try validate using the OS
try:
socket.inet_aton(address)
return True
except socket.error: # invalid address
return False
else: # try validate using Regex
match = REGEX_IPv4.match(address)
if match and all(0 <= int(match.group(i)) < 256 for i in (1, 2, 3, 4)):
return True
return False
elif hasattr(socket, 'inet_pton'): # assume IPv6, try using the OS
try:
socket.inet_pton(socket.AF_INET6, address)
return True
except socket.error: # invalid address
return False
else: # do not know what to do? assume it is a valid address
return True
def is_loopback_ip_address(ip=None, addrinfo=None):
"""
Determines whether the address appears to be a loopback address.
This assumes that the IP is valid.
"""
if addrinfo: # see socket.getaddrinfo() for layout of addrinfo tuple
if addrinfo[0] == socket.AF_INET or addrinfo[0] == socket.AF_INET6:
ip = addrinfo[4]
if not isinstance(ip, basestring):
return False
# IPv4 or IPv6-embedded IPv4 or IPv4-compatible IPv6
if ip.count('.') == 3:
return ip.lower().startswith(('127', '::127', '0:0:0:0:0:0:127',
'::ffff:127', '0:0:0:0:0:ffff:127'))
return ip == '::1' or ip == '0:0:0:0:0:0:0:1' # IPv6 loopback
def getipaddrinfo(host):
"""
Filter out non-IP and bad IP addresses from getaddrinfo
"""
try:
return [addrinfo for addrinfo in socket.getaddrinfo(host, None)
if (addrinfo[0] == socket.AF_INET or
addrinfo[0] == socket.AF_INET6)
and isinstance(addrinfo[4][0], basestring)]
except socket.error:
return []
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.