repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
mooja/eisenhower_dashboard | eisenhower_dashboard/users/admin.py | 41 | 1155 | from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as AuthUserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from .models import User
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserCreationForm(UserCreationForm):
error_message = UserCreationForm.error_messages.update({
'duplicate_username': 'This username has already been taken.'
})
class Meta(UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
@admin.register(User)
class MyUserAdmin(AuthUserAdmin):
form = MyUserChangeForm
add_form = MyUserCreationForm
fieldsets = (
('User Profile', {'fields': ('name',)}),
) + AuthUserAdmin.fieldsets
list_display = ('username', 'name', 'is_superuser')
search_fields = ['name']
| mit |
tyc85/nwsdr-3.6.3-dsc | gr-qtgui/examples/pyqt_example_c.py | 17 | 5860 | #!/usr/bin/env python
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
import sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
print "Error: Program requires PyQt4 and gr-qtgui."
sys.exit(1)
class dialog_box(QtGui.QWidget):
def __init__(self, display, control):
QtGui.QWidget.__init__(self, None)
self.setWindowTitle('PyQt Test GUI')
self.boxlayout = QtGui.QBoxLayout(QtGui.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.boxlayout.addWidget(control)
self.resize(800, 500)
class control_box(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle('Control Panel')
self.setToolTip('Control the signals')
QtGui.QToolTip.setFont(QtGui.QFont('OldEnglish', 10))
self.layout = QtGui.QFormLayout(self)
# Control the first signal
self.freq1Edit = QtGui.QLineEdit(self)
self.freq1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Frequency:", self.freq1Edit)
self.connect(self.freq1Edit, QtCore.SIGNAL("editingFinished()"),
self.freq1EditText)
self.amp1Edit = QtGui.QLineEdit(self)
self.amp1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Amplitude:", self.amp1Edit)
self.connect(self.amp1Edit, QtCore.SIGNAL("editingFinished()"),
self.amp1EditText)
# Control the second signal
self.freq2Edit = QtGui.QLineEdit(self)
self.freq2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Frequency:", self.freq2Edit)
self.connect(self.freq2Edit, QtCore.SIGNAL("editingFinished()"),
self.freq2EditText)
self.amp2Edit = QtGui.QLineEdit(self)
self.amp2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Amplitude:", self.amp2Edit)
self.connect(self.amp2Edit, QtCore.SIGNAL("editingFinished()"),
self.amp2EditText)
self.quit = QtGui.QPushButton('Close', self)
self.quit.setMinimumWidth(100)
self.layout.addWidget(self.quit)
self.connect(self.quit, QtCore.SIGNAL('clicked()'),
QtGui.qApp, QtCore.SLOT('quit()'))
def attach_signal1(self, signal):
self.signal1 = signal
self.freq1Edit.setText(QtCore.QString("%1").arg(self.signal1.frequency()))
self.amp1Edit.setText(QtCore.QString("%1").arg(self.signal1.amplitude()))
def attach_signal2(self, signal):
self.signal2 = signal
self.freq2Edit.setText(QtCore.QString("%1").arg(self.signal2.frequency()))
self.amp2Edit.setText(QtCore.QString("%1").arg(self.signal2.amplitude()))
def freq1EditText(self):
try:
newfreq = float(self.freq1Edit.text())
self.signal1.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp1EditText(self):
try:
newamp = float(self.amp1Edit.text())
self.signal1.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
def freq2EditText(self):
try:
newfreq = float(self.freq2Edit.text())
self.signal2.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp2EditText(self):
try:
newamp = float(self.amp2Edit.text())
self.signal2.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 1000
f2 = 2000
fftsize = 2048
self.qapp = QtGui.QApplication(sys.argv)
src1 = gr.sig_source_c(Rs, gr.GR_SIN_WAVE, f1, 0.1, 0)
src2 = gr.sig_source_c(Rs, gr.GR_SIN_WAVE, f2, 0.1, 0)
src = gr.add_cc()
channel = gr.channel_model(0.001)
thr = gr.throttle(gr.sizeof_gr_complex, 100*fftsize)
self.snk1 = qtgui.sink_c(fftsize, gr.firdes.WIN_BLACKMAN_hARRIS,
0, Rs,
"Complex Signal Example",
True, True, True, False)
self.connect(src1, (src,0))
self.connect(src2, (src,1))
self.connect(src, channel, thr, self.snk1)
self.ctrl_win = control_box()
self.ctrl_win.attach_signal1(src1)
self.ctrl_win.attach_signal2(src2)
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.pyqwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt4.QtGui.QWidget
pyWin = sip.wrapinstance(pyQt, QtGui.QWidget)
self.main_box = dialog_box(pyWin, self.ctrl_win)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block();
tb.start()
tb.qapp.exec_()
tb.stop()
| gpl-3.0 |
lwjohnst86/studyGroup | lessons/python/matplotlib/hwk3.1.py | 12 | 2149 | # -*- coding: utf-8 -*-
from numpy import float32
from numpy import linspace
from numpy import polyfit
from numpy import polyval
import matplotlib.pyplot as plt
#Read in data from csv
f=open('data.csv','r')
line=f.readlines()
#Empty array for data
FN=[]
EFN=[]
#This loop goes through every line, strips new line character and then splits the data on ,. It will then save data into the arrays
for l in line:
a=l.strip()
x,y=a.split(",")
FN.append(float32(x))
EFN.append(float32(y))
f.close()
#Generate linear space but this was not used as of yet
z=linspace(-1,4)
#Create grid and plot data
fig = plt.figure(figsize = (4,4), dpi = 600)
a = fig.add_subplot(1,1,1)
plt.plot(FN,EFN,'ks',markersize=3)
#Created a fitted line for the data
fit=polyfit(FN,EFN,1)
plt.plot(z,polyval(fit,z),label=fit,color='k')
#Reset font size
for t in a.yaxis.get_major_ticks():
t.label.set_fontsize(6)
for t in a.xaxis.get_major_ticks():
t.label.set_fontsize(6)
#Set the subplot sizing
fig.subplots_adjust(top=0.95, right =0.89, left=0.13,bottom=0.25)
#Set limits and labels
plt.xlim(-0.2,3.5)
plt.ylim(0,0.8)
plt.ylabel(r'Extrafloral Nectar (mg of sugar per extrafloral nectary)',fontsize=6,verticalalignment='center')
plt.xlabel(r'Floral Nectar (mg of sugar per flower)',fontsize=6,horizontalalignment='center')
#Save as pdf
fig.savefig('EFNvFN.pdf',dpi=600)
plt.show()
"""In ecology, animals and plants interact with one another in an ecosystem.
There are several types of interactions that may occur such as predation,
parasitisim and mutualism. Mutualism is where the animals and plants both give
one another a survival benefit. So if a trait is not useful why invest energy
into producing it?
Different interactions have generally been studied individually even though
they occur in a community. This plot shows the relationship between EFN and FN
production in T. ulmifolia. There is a positive correlation, which suggests that
plants that produce more of one also produce more of the other
This is probably because of overall plant vigour. This was an initial figure
for a later experiment showing interactions."""
| apache-2.0 |
zaabjuda/GB_Fabrica | web/views.py | 1 | 3023 | # coding=utf-8
__author__ = "Dmitry Zhiltsov"
__copyright__ = "Copyright 2015, Dmitry Zhiltsov"
from django.contrib.auth import logout, decorators
from django.http import HttpResponseRedirect
from django.views.generic import TemplateView, RedirectView, CreateView, DetailView, UpdateView, ListView
from guest_book.defs import GuestBookMessageData
from guest_book.models import GuestBook
from .forms import CreateMessageForm
class LoginRequiredMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(LoginRequiredMixin, cls).as_view(**initkwargs)
return decorators.login_required(view)
class IndexView(TemplateView):
template_name = 'index.html'
class LogoutView(RedirectView):
"""
Provides users the ability to logout
"""
permanent = True
url = '/'
def get(self, request, *args, **kwargs):
logout(request)
return super(LogoutView, self).get(request, *args, **kwargs)
class AddGB(LoginRequiredMixin, CreateView):
model = GuestBook
fields = ['name', 'slug', 'is_moderated']
def form_valid(self, form):
form.instance.owner = self.request.user
return super(AddGB, self).form_valid(form)
class _BaseGBDetail(DetailView):
def get_object(self, queryset=None):
owner = self.kwargs.get('owner')
if not queryset:
queryset = GuestBook.objects
queryset = queryset.filter(owner__username=owner)
return super(_BaseGBDetail, self).get_object(queryset=queryset)
class _BaseGBUpdate(UpdateView):
def get_object(self, queryset=None):
owner = self.kwargs.get('owner')
if not queryset:
queryset = GuestBook.objects
queryset = queryset.filter(owner__username=owner)
return super(_BaseGBUpdate, self).get_object(queryset=queryset)
class ViewGB(LoginRequiredMixin, _BaseGBDetail):
model = GuestBook
class AddMessage(LoginRequiredMixin, _BaseGBUpdate):
template_name = 'create_message.html'
template_name_suffix = ''
model = GuestBook
form_class = CreateMessageForm
def form_valid(self, form):
message = form.data.get('message')
msg_data = GuestBookMessageData(message=message, author_id=self.request.user.id)
gb = self.get_object()
gb.create_message(msg_data)
return HttpResponseRedirect(self.get_success_url())
class SettingsGB(LoginRequiredMixin, _BaseGBUpdate):
model = GuestBook
fields = ['name', 'slug', 'is_moderated']
def get_object(self, queryset=None):
owner = self.kwargs.get('owner')
if not queryset:
queryset = GuestBook.objects
queryset = queryset.filter(owner__username=owner, owner=self.request.user)
return super(SettingsGB, self).get_object(queryset=queryset)
class UserGBs(LoginRequiredMixin, ListView):
model = GuestBook
def get_queryset(self):
qs = super(UserGBs, self).get_queryset()
qs = qs.filter(owner=self.request.user)
return qs
| mit |
buskjan/inetradio | lib/python3.5/site-packages/pip/_vendor/lockfile/pidlockfile.py | 536 | 6090 | # -*- coding: utf-8 -*-
# pidlockfile.py
#
# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
import errno
import os
import time
from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
LockTimeout)
class PIDLockFile(LockBase):
""" Lockfile implemented as a Unix PID file.
The lock file is a normal file named by the attribute `path`.
A lock's PID file contains a single line of text, containing
the process ID (PID) of the process that acquired the lock.
>>> lock = PIDLockFile('somefile')
>>> lock = PIDLockFile('somefile')
"""
def __init__(self, path, threaded=False, timeout=None):
# pid lockfiles don't support threaded operation, so always force
# False as the threaded arg.
LockBase.__init__(self, path, False, timeout)
self.unique_name = self.path
def read_pid(self):
""" Get the PID from the lock file.
"""
return read_pid_from_pidfile(self.path)
def is_locked(self):
""" Test if the lock is currently held.
The lock is held if the PID file for this lock exists.
"""
return os.path.exists(self.path)
def i_am_locking(self):
""" Test if the lock is held by the current process.
Returns ``True`` if the current process ID matches the
number stored in the PID file.
"""
return self.is_locked() and os.getpid() == self.read_pid()
def acquire(self, timeout=None):
""" Acquire the lock.
Creates the PID file for this lock, or raises an error if
the lock could not be acquired.
"""
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
try:
write_pid_to_pidfile(self.path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# The lock creation failed. Maybe sleep a bit.
if time.time() > end_time:
if timeout is not None and timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout / 10 or 0.1)
else:
raise LockFailed("failed to create %s" % self.path)
else:
return
def release(self):
""" Release the lock.
Removes the PID file to release the lock, or raises an
error if the current process does not hold the lock.
"""
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
remove_existing_pidfile(self.path)
def break_lock(self):
""" Break an existing lock.
Removes the PID file if it already exists, otherwise does
nothing.
"""
remove_existing_pidfile(self.path)
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
"""
pid = None
try:
pidfile = open(pidfile_path, 'r')
except IOError:
pass
else:
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character.
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
pass
pidfile.close()
return pid
def write_pid_to_pidfile(pidfile_path):
""" Write the PID in the named PID file.
Get the numeric process ID (“PID”) of the current process
and write it to the named file as a line of text.
"""
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
open_mode = 0o644
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
pidfile = os.fdopen(pidfile_fd, 'w')
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. For
# example, if crond was process number 25, /var/run/crond.pid
# would contain three characters: two, five, and newline.
pid = os.getpid()
pidfile.write("%s\n" % pid)
pidfile.close()
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
| gpl-3.0 |
boegel/easybuild-easyblocks | easybuild/easyblocks/p/pytorch.py | 1 | 11524 | ##
# Copyright 2020-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing PyTorch, implemented as an easyblock
@author: Alexander Grund (TU Dresden)
"""
import os
import re
import tempfile
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.pythonpackage import PythonPackage
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option
import easybuild.tools.environment as env
from easybuild.tools.modules import get_software_root
from easybuild.tools.systemtools import POWER, get_cpu_architecture
class EB_PyTorch(PythonPackage):
"""Support for building/installing TensorFlow."""
@staticmethod
def extra_options():
extra_vars = PythonPackage.extra_options()
extra_vars.update({
'excluded_tests': [{}, 'Mapping of architecture strings to list of tests to be excluded', CUSTOM],
'custom_opts': [[], 'List of options for the build/install command. Can be used to change the defaults ' +
'set by the PyTorch EasyBlock, for example ["USE_MKLDNN=0"].', CUSTOM]
})
extra_vars['download_dep_fail'][0] = True
extra_vars['sanity_pip_check'][0] = True
return extra_vars
def __init__(self, *args, **kwargs):
"""Constructor for PyTorch easyblock."""
super(EB_PyTorch, self).__init__(*args, **kwargs)
self.options['modulename'] = 'torch'
# Test as-if pytorch was installed
self.testinstall = True
self.tmpdir = tempfile.mkdtemp(suffix='-pytorch-build')
def fetch_step(self, skip_checksums=False):
"""Fetch sources for installing PyTorch, including those for tests."""
super(EB_PyTorch, self).fetch_step(skip_checksums)
# Resolve tests early to avoid failures later. Use obtain_file if path is not absolute
tests = [test if os.path.isabs(test) else self.obtain_file(test) for test in self.cfg['tests']]
self.cfg['tests'] = tests
@staticmethod
def get_dependency_options_for_version(pytorch_version):
"""
PyTorch can enable some functionality based on available software or use system software instead of a submodule
This returns EasyBuild names of that and the flag that should be used when the dependency is found
The result is a list of tuples (enable_flag, eb_name)
"""
pytorch_version = LooseVersion(pytorch_version)
def is_version_ok(version_range):
"""Return True if the PyTorch version to be installed matches the version_range"""
min_version, max_version = version_range.split(':')
result = True
if min_version and pytorch_version < LooseVersion(min_version):
result = False
if max_version and pytorch_version >= LooseVersion(max_version):
result = False
return result
available_libs = (
# Format: (PyTorch flag to enable, EB name, '<min version>:<exclusive max version>')
# Use `None` for the EB name if no known EC exists
('USE_FFMPEG=1', 'FFmpeg', '1.0.0:'),
('USE_GFLAGS=1', 'gflags', '1.0.0:'),
('USE_GLOG=1', 'glog', '1.0.0:'),
# For system libs check CMakeLists.txt, below `if(USE_SYSTEM_LIBS)`, order kept here
# NCCL handled specially as other env variables are requires for it
('USE_SYSTEM_CPUINFO=1', None, '1.6.0:'),
('USE_SYSTEM_SLEEF=1', None, '1.6.0:'),
('USE_SYSTEM_GLOO=1', None, '1.6.0:'),
('BUILD_CUSTOM_PROTOBUF=0', 'protobuf', '1.2.0:'),
('USE_SYSTEM_EIGEN_INSTALL=1', 'Eigen', '1.0.0:'),
('USE_SYSTEM_FP16=1', None, '1.6.0:'),
('USE_SYSTEM_PTHREADPOOL=1', None, '1.6.0:'),
('USE_SYSTEM_PSIMD=1', None, '1.6.0:'),
('USE_SYSTEM_FXDIV=1', None, '1.6.0:'),
('USE_SYSTEM_BENCHMARK=1', None, '1.6.0:'), # Google Benchmark
('USE_SYSTEM_ONNX=1', None, '1.6.0:'),
('USE_SYSTEM_XNNPACK=1', None, '1.6.0:'),
)
return [(enable_opt, dep_name) for enable_opt, dep_name, version_range in available_libs
if is_version_ok(version_range)]
def configure_step(self):
"""Custom configure procedure for PyTorch."""
super(EB_PyTorch, self).configure_step()
# Gather default options. Will be checked against (and can be overwritten by) custom_opts
options = ['PYTORCH_BUILD_VERSION=' + self.version, 'PYTORCH_BUILD_NUMBER=1']
# enable verbose mode when --debug is used (to show compiler commands)
if build_option('debug'):
options.append('VERBOSE=1')
# Restrict parallelism
options.append('MAX_JOBS=%s' % self.cfg['parallel'])
# BLAS Interface
if get_software_root('imkl'):
options.append('BLAS=MKL')
options.append('INTEL_MKL_DIR=$MKLROOT')
else:
# This is what PyTorch defaults to if no MKL is found. Make this explicit here
options.append('BLAS=Eigen')
available_dependency_options = EB_PyTorch.get_dependency_options_for_version(self.version)
dependency_names = set(dep['name'] for dep in self.cfg.dependencies())
not_used_dep_names = []
for enable_opt, dep_name in available_dependency_options:
if dep_name is None:
continue
if dep_name in dependency_names:
options.append(enable_opt)
else:
not_used_dep_names.append(dep_name)
self.log.info('Did not enable options for the following dependencies as they are not used in the EC: %s',
not_used_dep_names)
# Use Infiniband by default
# you can disable this by including 'USE_IBVERBS=0' in 'custom_opts' in the easyconfig file
options.append('USE_IBVERBS=1')
if get_software_root('CUDA'):
options.append('USE_CUDA=1')
cudnn_root = get_software_root('cuDNN')
if cudnn_root:
options.append('CUDNN_LIB_DIR=' + os.path.join(cudnn_root, 'lib64'))
options.append('CUDNN_INCLUDE_DIR=' + os.path.join(cudnn_root, 'include'))
nccl_root = get_software_root('NCCL')
if nccl_root:
options.append('USE_SYSTEM_NCCL=1')
options.append('NCCL_INCLUDE_DIR=' + os.path.join(nccl_root, 'include'))
# list of CUDA compute capabilities to use can be specifed in two ways (where (2) overrules (1)):
# (1) in the easyconfig file, via the custom cuda_compute_capabilities;
# (2) in the EasyBuild configuration, via --cuda-compute-capabilities configuration option;
cuda_cc = build_option('cuda_compute_capabilities') or self.cfg['cuda_compute_capabilities']
if not cuda_cc:
raise EasyBuildError('List of CUDA compute capabilities must be specified, either via '
'cuda_compute_capabilities easyconfig parameter or via '
'--cuda-compute-capabilities')
self.log.info('Compiling with specified list of CUDA compute capabilities: %s', ', '.join(cuda_cc))
options.append('TORCH_CUDA_ARCH_LIST="%s"' % ';'.join(cuda_cc))
else:
# Disable CUDA
options.append('USE_CUDA=0')
if get_cpu_architecture() == POWER:
# *NNPACK is not supported on Power, disable to avoid warnings
options.extend(['USE_NNPACK=0', 'USE_QNNPACK=0', 'USE_PYTORCH_QNNPACK=0', 'USE_XNNPACK=0'])
# Metal only supported on IOS which likely doesn't work with EB, so disabled
options.append('USE_METAL=0')
unique_options = self.cfg['custom_opts']
for option in options:
name = option.split('=')[0] + '=' # Include the equals sign to avoid partial matches
if not any(opt.startswith(name) for opt in unique_options):
unique_options.append(option)
self.cfg.update('prebuildopts', ' '.join(unique_options) + ' ')
self.cfg.update('preinstallopts', ' '.join(unique_options) + ' ')
def test_step(self):
"""Run unit tests"""
# Make PyTorch tests not use the user home
env.setvar('XDG_CACHE_HOME', os.path.join(self.tmpdir, '.cache'))
# Parse excluded_tests and flatten into space separated string
excluded_tests = []
for arch, tests in self.cfg['excluded_tests'].items():
if not arch or arch == get_cpu_architecture():
excluded_tests.extend(tests)
# -x should not be used if there are no excluded tests
if excluded_tests:
excluded_tests = ['-x'] + excluded_tests
self.cfg.template_values.update({
'python': self.python_cmd,
'excluded_tests': ' '.join(excluded_tests)
})
super(EB_PyTorch, self).test_step()
def test_cases_step(self):
# Make PyTorch tests not use the user home
env.setvar('XDG_CACHE_HOME', os.path.join(self.tmpdir, '.cache'))
super(EB_PyTorch, self).test_cases_step()
def sanity_check_step(self, *args, **kwargs):
"""Custom sanity check for PyTorch"""
if self.cfg.get('download_dep_fail', True):
# CMake might mistakenly download dependencies during configure
self.log.info('Checking for downloaded submodules')
pattern = r'^-- Downloading (\w+) to /'
downloaded_deps = re.findall(pattern, self.install_cmd_output, re.M)
if downloaded_deps:
self.log.info('Found downloaded submodules: %s', ', '.join(downloaded_deps))
fail_msg = 'found one or more downloaded dependencies: %s' % ', '.join(downloaded_deps)
self.sanity_check_fail_msgs.append(fail_msg)
super(EB_PyTorch, self).sanity_check_step(*args, **kwargs)
def make_module_req_guess(self):
"""Set extra environment variables for PyTorch."""
guesses = super(EB_PyTorch, self).make_module_req_guess()
guesses['CMAKE_PREFIX_PATH'] = [os.path.join(self.pylibdir, 'torch')]
# Required to dynamically load libcaffe2_nvrtc.so
guesses['LD_LIBRARY_PATH'] = [os.path.join(self.pylibdir, 'torch', 'lib')]
return guesses
| gpl-2.0 |
ya7lelkom/googleads-python-lib | examples/dfp/v201505/creative_template_service/get_creative_templates_by_statement.py | 3 | 2383 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all system defined creative templates.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CreativeTemplateService.getCreativeTemplatesByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
creative_template_service = client.GetService(
'CreativeTemplateService', version='v201505')
# Create statement object to only select system defined creative templates.
values = [{
'key': 'creativeTemplateType',
'value': {
'xsi_type': 'TextValue',
'value': 'SYSTEM_DEFINED'
}
}]
query = 'WHERE type = :creativeTemplateType'
statement = dfp.FilterStatement(query, values)
# Get creative templates by statement.
while True:
response = creative_template_service.getCreativeTemplatesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for template in response['results']:
print ('Creative template with id \'%s\', name \'%s\', and type \'%s\' '
'was found.' % (template['id'],
template['name'],
template['type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 |
tempbottle/ironpython3 | Src/StdLib/Lib/lib2to3/fixes/fix_metaclass.py | 88 | 8201 | """Fixer for __metaclass__ = X -> (metaclass=X) methods.
The various forms of classef (inherits nothing, inherits once, inherints
many) don't parse the same in the CST so we look at ALL classes for
a __metaclass__ and if we find one normalize the inherits to all be
an arglist.
For one-liner classes ('class X: pass') there is no indent/dedent so
we normalize those into having a suite.
Moving the __metaclass__ into the classdef can also cause the class
body to be empty so there is some special casing for that as well.
This fixer also tries very hard to keep original indenting and spacing
in all those corner cases.
"""
# Author: Jack Diederich
# Local imports
from .. import fixer_base
from ..pygram import token
from ..fixer_util import Name, syms, Node, Leaf
def has_metaclass(parent):
""" we have to check the cls_node without changing it.
There are two possiblities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
"""
for node in parent.children:
if node.type == syms.suite:
return has_metaclass(node)
elif node.type == syms.simple_stmt and node.children:
expr_node = node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
left_side = expr_node.children[0]
if isinstance(left_side, Leaf) and \
left_side.value == '__metaclass__':
return True
return False
def fixup_parse_tree(cls_node):
""" one-line classes don't get a suite in the parse tree so we add
one to normalize the tree
"""
for node in cls_node.children:
if node.type == syms.suite:
# already in the preferred format, do nothing
return
# !%@#! oneliners have no suite node, we have to fake one up
for i, node in enumerate(cls_node.children):
if node.type == token.COLON:
break
else:
raise ValueError("No class suite and no ':'!")
# move everything into a suite node
suite = Node(syms.suite, [])
while cls_node.children[i+1:]:
move_node = cls_node.children[i+1]
suite.append_child(move_node.clone())
move_node.remove()
cls_node.append_child(suite)
node = suite
def fixup_simple_stmt(parent, i, stmt_node):
""" if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything after the semi-colon into its own simple_stmt node
"""
for semi_ind, node in enumerate(stmt_node.children):
if node.type == token.SEMI: # *sigh*
break
else:
return
node.remove() # kill the semicolon
new_expr = Node(syms.expr_stmt, [])
new_stmt = Node(syms.simple_stmt, [new_expr])
while stmt_node.children[semi_ind:]:
move_node = stmt_node.children[semi_ind]
new_expr.append_child(move_node.clone())
move_node.remove()
parent.insert_child(i, new_stmt)
new_leaf1 = new_stmt.children[0].children[0]
old_leaf1 = stmt_node.children[0].children[0]
new_leaf1.prefix = old_leaf1.prefix
def remove_trailing_newline(node):
if node.children and node.children[-1].type == token.NEWLINE:
node.children[-1].remove()
def find_metas(cls_node):
# find the suite node (Mmm, sweet nodes)
for node in cls_node.children:
if node.type == syms.suite:
break
else:
raise ValueError("No class suite!")
# look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
for i, simple_node in list(enumerate(node.children)):
if simple_node.type == syms.simple_stmt and simple_node.children:
expr_node = simple_node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
# Check if the expr_node is a simple assignment.
left_node = expr_node.children[0]
if isinstance(left_node, Leaf) and \
left_node.value == '__metaclass__':
# We found a assignment to __metaclass__.
fixup_simple_stmt(node, i, simple_node)
remove_trailing_newline(simple_node)
yield (node, i, simple_node)
def fixup_indent(suite):
""" If an INDENT is followed by a thing with a prefix then nuke the prefix
Otherwise we get in trouble when removing __metaclass__ at suite start
"""
kids = suite.children[::-1]
# find the first indent
while kids:
node = kids.pop()
if node.type == token.INDENT:
break
# find the first Leaf
while kids:
node = kids.pop()
if isinstance(node, Leaf) and node.type != token.DEDENT:
if node.prefix:
node.prefix = ''
return
else:
kids.extend(node.children[::-1])
class FixMetaclass(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef<any*>
"""
def transform(self, node, results):
if not has_metaclass(node):
return
fixup_parse_tree(node)
# find metaclasses, keep the last one
last_metaclass = None
for suite, i, stmt in find_metas(node):
last_metaclass = stmt
stmt.remove()
text_type = node.children[0].type # always Leaf(nnn, 'class')
# figure out what kind of classdef we have
if len(node.children) == 7:
# Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
# 0 1 2 3 4 5 6
if node.children[3].type == syms.arglist:
arglist = node.children[3]
# Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
else:
parent = node.children[3].clone()
arglist = Node(syms.arglist, [parent])
node.set_child(3, arglist)
elif len(node.children) == 6:
# Node(classdef, ['class', 'name', '(', ')', ':', suite])
# 0 1 2 3 4 5
arglist = Node(syms.arglist, [])
node.insert_child(3, arglist)
elif len(node.children) == 4:
# Node(classdef, ['class', 'name', ':', suite])
# 0 1 2 3
arglist = Node(syms.arglist, [])
node.insert_child(2, Leaf(token.RPAR, ')'))
node.insert_child(2, arglist)
node.insert_child(2, Leaf(token.LPAR, '('))
else:
raise ValueError("Unexpected class definition")
# now stick the metaclass in the arglist
meta_txt = last_metaclass.children[0].children[0]
meta_txt.value = 'metaclass'
orig_meta_prefix = meta_txt.prefix
if arglist.children:
arglist.append_child(Leaf(token.COMMA, ','))
meta_txt.prefix = ' '
else:
meta_txt.prefix = ''
# compact the expression "metaclass = Meta" -> "metaclass=Meta"
expr_stmt = last_metaclass.children[0]
assert expr_stmt.type == syms.expr_stmt
expr_stmt.children[1].prefix = ''
expr_stmt.children[2].prefix = ''
arglist.append_child(last_metaclass)
fixup_indent(suite)
# check for empty suite
if not suite.children:
# one-liner that was just __metaclass_
suite.remove()
pass_leaf = Leaf(text_type, 'pass')
pass_leaf.prefix = orig_meta_prefix
node.append_child(pass_leaf)
node.append_child(Leaf(token.NEWLINE, '\n'))
elif len(suite.children) > 1 and \
(suite.children[-2].type == token.INDENT and
suite.children[-1].type == token.DEDENT):
# there was only one line in the class body and it was __metaclass__
pass_leaf = Leaf(text_type, 'pass')
suite.insert_child(-1, pass_leaf)
suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))
| apache-2.0 |
SRabbelier/Melange | thirdparty/google_appengine/google/appengine/api/taskqueue/taskqueue_stub.py | 1 | 36937 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the Task Queue API.
This stub stores tasks and runs them via dev_appserver's AddEvent capability.
It also validates the tasks by checking their queue name against the queue.yaml.
As well as implementing Task Queue API functions, the stub exposes various other
functions that are used by the dev_appserver's admin console to display the
application's queues and tasks.
"""
import StringIO
import base64
import bisect
import datetime
import logging
import os
import random
import string
import time
import taskqueue_service_pb
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import queueinfo
from google.appengine.runtime import apiproxy_errors
DEFAULT_RATE = '5.00/s'
DEFAULT_BUCKET_SIZE = 5
MAX_ETA_DELTA_DAYS = 30
admin_console_dummy_tasks = {}
BUILT_IN_HEADERS = set(['x-appengine-queuename',
'x-appengine-taskname',
'x-appengine-taskretrycount',
'x-appengine-development-payload',
'content-length'])
DEFAULT_QUEUE_NAME = 'default'
CRON_QUEUE_NAME = '__cron'
class _DummyTaskStore(object):
"""A class that encapsulates a sorted store of tasks.
Used for testing the admin console.
"""
def __init__(self):
"""Constructor."""
self._sorted_by_name = []
self._sorted_by_eta = []
def _InsertTask(self, task):
"""Insert a task into the dummy store, keeps lists sorted.
Args:
task: the new task.
"""
eta = task.eta_usec()
name = task.task_name()
bisect.insort_left(self._sorted_by_eta, (eta, name, task))
bisect.insort_left(self._sorted_by_name, (name, task))
def Lookup(self, maximum, name=None, eta=None):
"""Lookup a number of sorted tasks from the store.
If 'eta' is specified, the tasks are looked up in a list sorted by 'eta',
then 'name'. Otherwise they are sorted by 'name'. We need to be able to
sort by 'eta' and 'name' because tasks can have identical eta. If you had
20 tasks with the same ETA, you wouldn't be able to page past them, since
the 'next eta' would give the first one again. Names are unique, though.
Args:
maximum: the maximum number of tasks to return.
name: a task name to start with.
eta: an eta to start with.
Returns:
A list of up to 'maximum' tasks.
Raises:
ValueError: if the task store gets corrupted.
"""
if eta is None:
pos = bisect.bisect_left(self._sorted_by_name, (name,))
tasks = (x[1] for x in self._sorted_by_name[pos:pos + maximum])
return list(tasks)
if name is None:
raise ValueError('must supply name or eta')
pos = bisect.bisect_left(self._sorted_by_eta, (eta, name))
tasks = (x[2] for x in self._sorted_by_eta[pos:pos + maximum])
return list(tasks)
def Count(self):
"""Returns the number of tasks in the store."""
return len(self._sorted_by_name)
def Oldest(self):
"""Returns the oldest eta in the store, or None if no tasks."""
if self._sorted_by_eta:
return self._sorted_by_eta[0][0]
return None
def Add(self, request):
"""Inserts a new task into the store.
Args:
request: A taskqueue_service_pb.TaskQueueAddRequest.
Raises:
apiproxy_errors.ApplicationError: If a task with the same name is already
in the store.
"""
pos = bisect.bisect_left(self._sorted_by_name, (request.task_name(),))
if (pos < len(self._sorted_by_name) and
self._sorted_by_name[pos][0] == request.task_name()):
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TASK_ALREADY_EXISTS)
now = datetime.datetime.utcnow()
now_sec = time.mktime(now.timetuple())
task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task()
task.set_task_name(request.task_name())
task.set_eta_usec(request.eta_usec())
task.set_creation_time_usec(now_sec * 1e6)
task.set_url(request.url())
task.set_method(request.method())
for keyvalue in task.header_list():
header = task.add_header()
header.set_key(keyvalue.key())
header.set_value(keyvalue.value())
if request.has_description():
task.set_description(request.description())
if request.has_body():
task.set_body(request.body())
if request.has_crontimetable():
task.mutable_crontimetable().set_schedule(
request.crontimetable().schedule())
task.mutable_crontimetable().set_timezone(
request.crontimetable().timezone())
self._InsertTask(task)
def Delete(self, name):
"""Deletes a task from the store by name.
Args:
name: the name of the task to delete.
Returns:
TaskQueueServiceError.UNKNOWN_TASK: if the task is unknown.
TaskQueueServiceError.INTERNAL_ERROR: if the store is corrupted.
TaskQueueServiceError.OK: otherwise.
"""
pos = bisect.bisect_left(self._sorted_by_name, (name,))
if pos >= len(self._sorted_by_name):
return taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK
if self._sorted_by_name[pos][1].task_name() != name:
logging.info('looking for task name %s, got task name %s', name,
self._sorted_by_name[pos][1].task_name())
return taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK
old_task = self._sorted_by_name.pop(pos)[1]
eta = old_task.eta_usec()
pos = bisect.bisect_left(self._sorted_by_eta, (eta, name, None))
if self._sorted_by_eta[pos][2] is not old_task:
logging.error('task store corrupted')
return taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERRROR
self._sorted_by_eta.pop(pos)
return taskqueue_service_pb.TaskQueueServiceError.OK
def Populate(self, num_tasks):
"""Populates the store with a number of tasks.
Args:
num_tasks: the number of tasks to insert.
"""
now = datetime.datetime.utcnow()
now_sec = time.mktime(now.timetuple())
def RandomTask():
"""Creates a new task and randomly populates values."""
task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task()
task.set_task_name(''.join(random.choice(string.ascii_lowercase)
for x in range(20)))
task.set_eta_usec(int(now_sec * 1e6) + random.randint(-10e6, 600e6))
task.set_creation_time_usec(min(now_sec * 1e6, task.eta_usec()) -
random.randint(0, 2e7))
task.set_url(random.choice(['/a', '/b', '/c', '/d']))
if random.random() < 0.2:
task.set_method(
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.POST)
task.set_body('A' * 2000)
else:
task.set_method(
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.GET)
task.set_retry_count(max(0, random.randint(-10, 5)))
if random.random() < 0.3:
random_headers = [('nexus', 'one'),
('foo', 'bar'),
('content-type', 'text/plain'),
('from', 'user@email.com')]
for _ in xrange(random.randint(1, 4)):
elem = random.randint(0, len(random_headers)-1)
key, value = random_headers.pop(elem)
header_proto = task.add_header()
header_proto.set_key(key)
header_proto.set_value(value)
return task
for _ in range(num_tasks):
self._InsertTask(RandomTask())
def _ParseQueueYaml(unused_self, root_path):
"""Loads the queue.yaml file and parses it.
Args:
unused_self: Allows this function to be bound to a class member. Not used.
root_path: Directory containing queue.yaml. Not used.
Returns:
None if queue.yaml doesn't exist, otherwise a queueinfo.QueueEntry object
populated from the queue.yaml.
"""
if root_path is None:
return None
for queueyaml in ('queue.yaml', 'queue.yml'):
try:
fh = open(os.path.join(root_path, queueyaml), 'r')
except IOError:
continue
try:
queue_info = queueinfo.LoadSingleQueue(fh)
return queue_info
finally:
fh.close()
return None
def _CompareTasksByEta(a, b):
"""Python sort comparator for tasks by estimated time of arrival (ETA).
Args:
a: A taskqueue_service_pb.TaskQueueAddRequest.
b: A taskqueue_service_pb.TaskQueueAddRequest.
Returns:
Standard 1/0/-1 comparison result.
"""
if a.eta_usec() > b.eta_usec():
return 1
if a.eta_usec() < b.eta_usec():
return -1
return 0
def _FormatEta(eta_usec):
"""Formats a task ETA as a date string in UTC."""
eta = datetime.datetime.fromtimestamp(eta_usec/1000000)
return eta.strftime('%Y/%m/%d %H:%M:%S')
def _EtaDelta(eta_usec):
"""Formats a task ETA as a relative time string."""
eta = datetime.datetime.fromtimestamp(eta_usec/1000000)
now = datetime.datetime.utcnow()
if eta > now:
return str(eta - now) + ' from now'
else:
return str(now - eta) + ' ago'
class TaskQueueServiceStub(apiproxy_stub.APIProxyStub):
"""Python only task queue service stub.
This stub executes tasks when enabled by using the dev_appserver's AddEvent
capability. When task running is disabled this stub will store tasks for
display on a console, where the user may manually execute the tasks.
"""
queue_yaml_parser = _ParseQueueYaml
def __init__(self,
service_name='taskqueue',
root_path=None,
auto_task_running=False,
task_retry_seconds=30,
_all_queues_valid=False):
"""Constructor.
Args:
service_name: Service name expected for all calls.
root_path: Root path to the directory of the application which may contain
a queue.yaml file. If None, then it's assumed no queue.yaml file is
available.
auto_task_running: When True, the dev_appserver should automatically
run tasks after they are enqueued.
task_retry_seconds: How long to wait between task executions after a
task fails.
"""
super(TaskQueueServiceStub, self).__init__(service_name)
self._taskqueues = {}
self._next_task_id = 1
self._root_path = root_path
self._all_queues_valid = _all_queues_valid
self._add_event = None
self._auto_task_running = auto_task_running
self._task_retry_seconds = task_retry_seconds
self._app_queues = {}
class _QueueDetails(taskqueue_service_pb.TaskQueueUpdateQueueRequest):
def __init__(self, paused=False):
self.paused = paused
def _ChooseTaskName(self):
"""Returns a string containing a unique task name."""
self._next_task_id += 1
return 'task%d' % (self._next_task_id - 1)
def _VerifyTaskQueueAddRequest(self, request):
"""Checks that a TaskQueueAddRequest is valid.
Checks that a TaskQueueAddRequest specifies a valid eta and a valid queue.
Args:
request: The taskqueue_service_pb.TaskQueueAddRequest to validate.
Returns:
A taskqueue_service_pb.TaskQueueServiceError indicating any problems with
the request or taskqueue_service_pb.TaskQueueServiceError.OK if it is
valid.
"""
if request.eta_usec() < 0:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA
eta = datetime.datetime.utcfromtimestamp(request.eta_usec() / 1e6)
max_eta = (datetime.datetime.utcnow() +
datetime.timedelta(days=MAX_ETA_DELTA_DAYS))
if eta > max_eta:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA
return taskqueue_service_pb.TaskQueueServiceError.OK
def _Dynamic_Add(self, request, response):
bulk_request = taskqueue_service_pb.TaskQueueBulkAddRequest()
bulk_response = taskqueue_service_pb.TaskQueueBulkAddResponse()
bulk_request.add_add_request().CopyFrom(request)
self._Dynamic_BulkAdd(bulk_request, bulk_response)
assert bulk_response.taskresult_size() == 1
result = bulk_response.taskresult(0).result()
if result != taskqueue_service_pb.TaskQueueServiceError.OK:
raise apiproxy_errors.ApplicationError(result)
elif bulk_response.taskresult(0).has_chosen_task_name():
response.set_chosen_task_name(
bulk_response.taskresult(0).chosen_task_name())
def _Dynamic_BulkAdd(self, request, response):
"""Add many tasks to a queue using a single request.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest. See
taskqueue_service.proto.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse. See
taskqueue_service.proto.
"""
assert request.add_request_size(), 'taskqueue should prevent empty requests'
app_id = None
if request.add_request(0).has_app_id():
app_id = request.add_request(0).app_id()
if not self._IsValidQueue(request.add_request(0).queue_name(), app_id):
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
error_found = False
task_results_with_chosen_names = []
for add_request in request.add_request_list():
task_result = response.add_taskresult()
error = self._VerifyTaskQueueAddRequest(add_request)
if error == taskqueue_service_pb.TaskQueueServiceError.OK:
if not add_request.task_name():
chosen_name = self._ChooseTaskName()
add_request.set_task_name(chosen_name)
task_results_with_chosen_names.append(task_result)
task_result.set_result(
taskqueue_service_pb.TaskQueueServiceError.SKIPPED)
else:
error_found = True
task_result.set_result(error)
if error_found:
return
if request.add_request(0).has_transaction():
self._TransactionalBulkAdd(request)
elif request.add_request(0).has_app_id():
self._DummyTaskStoreBulkAdd(request, response)
else:
self._NonTransactionalBulkAdd(request, response)
for add_request, task_result in zip(request.add_request_list(),
response.taskresult_list()):
if (task_result.result() ==
taskqueue_service_pb.TaskQueueServiceError.SKIPPED):
task_result.set_result(taskqueue_service_pb.TaskQueueServiceError.OK)
if task_result in task_results_with_chosen_names:
task_result.set_chosen_task_name(add_request.task_name())
def _TransactionalBulkAdd(self, request):
"""Uses datastore.AddActions to associate tasks with a transaction.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks to add. N.B. all tasks in the request have been validated and
assigned unique names.
"""
try:
apiproxy_stub_map.MakeSyncCall(
'datastore_v3', 'AddActions', request, api_base_pb.VoidProto())
except apiproxy_errors.ApplicationError, e:
raise apiproxy_errors.ApplicationError(
e.application_error +
taskqueue_service_pb.TaskQueueServiceError.DATASTORE_ERROR,
e.error_detail)
def _DummyTaskStoreBulkAdd(self, request, response):
"""Adds tasks to the appropriate DummyTaskStore.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks to add. N.B. all tasks in the request have been validated and
those with empty names have been assigned unique names.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse to populate
with the results. N.B. the chosen_task_name field in the response will
not be filled-in.
"""
store = self.GetDummyTaskStore(request.add_request(0).app_id(),
request.add_request(0).queue_name())
for add_request, task_result in zip(request.add_request_list(),
response.taskresult_list()):
try:
store.Add(add_request)
except apiproxy_errors.ApplicationError, e:
task_result.set_result(e.application_error)
else:
task_result.set_result(taskqueue_service_pb.TaskQueueServiceError.OK)
def _NonTransactionalBulkAdd(self, request, response):
"""Adds tasks to the appropriate list in in self._taskqueues.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks to add. N.B. all tasks in the request have been validated and
those with empty names have been assigned unique names.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse to populate
with the results. N.B. the chosen_task_name field in the response will
not be filled-in.
"""
existing_tasks = self._taskqueues.setdefault(
request.add_request(0).queue_name(), [])
existing_task_names = set(task.task_name() for task in existing_tasks)
def DefineCallback(queue_name, task_name):
return lambda: self._RunTask(queue_name, task_name)
for add_request, task_result in zip(request.add_request_list(),
response.taskresult_list()):
if add_request.task_name() in existing_task_names:
task_result.set_result(
taskqueue_service_pb.TaskQueueServiceError.TASK_ALREADY_EXISTS)
else:
existing_tasks.append(add_request)
if self._add_event and self._auto_task_running:
self._add_event(
add_request.eta_usec() / 1000000.0,
DefineCallback(add_request.queue_name(), add_request.task_name()))
existing_tasks.sort(_CompareTasksByEta)
def _IsValidQueue(self, queue_name, app_id):
"""Determines whether a queue is valid, i.e. tasks can be added to it.
Valid queues are the 'default' queue, plus any queues in the queue.yaml
file.
Args:
queue_name: the name of the queue to validate.
app_id: the app_id. Can be None.
Returns:
True iff queue is valid.
"""
if self._all_queues_valid:
return True
if queue_name == DEFAULT_QUEUE_NAME or queue_name == CRON_QUEUE_NAME:
return True
queue_info = self.queue_yaml_parser(self._root_path)
if queue_info and queue_info.queue:
for entry in queue_info.queue:
if entry.name == queue_name:
return True
if app_id is not None:
queues = self._app_queues.get(app_id, {})
return queues.get(queue_name, None) is not None
return False
def _RunTask(self, queue_name, task_name):
"""Returns a fake request for running a task in the dev_appserver.
Args:
queue_name: The queue the task is in.
task_name: The name of the task to run.
Returns:
None if this task no longer exists or tuple (connection, addrinfo) of
a fake connection and address information used to run this task. The
task will be deleted after it runs or re-enqueued in the future on
failure.
"""
task_list = self.GetTasks(queue_name)
for task in task_list:
if task['name'] == task_name:
break
else:
return None
class FakeConnection(object):
def __init__(self, input_buffer):
self.rfile = StringIO.StringIO(input_buffer)
self.wfile = StringIO.StringIO()
self.wfile_close = self.wfile.close
self.wfile.close = self.connection_done
def connection_done(myself):
result = myself.wfile.getvalue()
myself.wfile_close()
first_line, rest = (result.split('\n', 1) + ['', ''])[:2]
version, code, rest = (first_line.split(' ', 2) + ['', '500', ''])[:3]
try:
code = int(code)
except ValueError:
code = 500
if 200 <= int(code) <= 299:
self.DeleteTask(queue_name, task_name)
return
logging.warning('Task named "%s" on queue "%s" failed with code %s; '
'will retry in %d seconds',
task_name, queue_name, code, self._task_retry_seconds)
self._add_event(
time.time() + self._task_retry_seconds,
lambda: self._RunTask(queue_name, task_name))
def close(self):
pass
def makefile(self, mode, buffsize):
if mode.startswith('w'):
return self.wfile
else:
return self.rfile
payload = StringIO.StringIO()
payload.write('%s %s HTTP/1.1\r\n' % (task['method'], task['url']))
for key, value in task['headers']:
payload.write('%s: %s\r\n' % (key, value))
payload.write('\r\n')
payload.write(task['body'])
return FakeConnection(payload.getvalue()), ('0.1.0.2', 80)
def GetQueues(self):
"""Gets all the applications's queues.
Returns:
A list of dictionaries, where each dictionary contains one queue's
attributes. E.g.:
[{'name': 'some-queue',
'max_rate': '1/s',
'bucket_size': 5,
'oldest_task': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'tasks_in_queue': 12}, ...]
The list of queues always includes the default queue.
"""
queues = []
queue_info = self.queue_yaml_parser(self._root_path)
has_default = False
if queue_info and queue_info.queue:
for entry in queue_info.queue:
if entry.name == DEFAULT_QUEUE_NAME:
has_default = True
queue = {}
queues.append(queue)
queue['name'] = entry.name
queue['max_rate'] = entry.rate
if entry.bucket_size:
queue['bucket_size'] = entry.bucket_size
else:
queue['bucket_size'] = DEFAULT_BUCKET_SIZE
tasks = self._taskqueues.setdefault(entry.name, [])
if tasks:
queue['oldest_task'] = _FormatEta(tasks[0].eta_usec())
queue['eta_delta'] = _EtaDelta(tasks[0].eta_usec())
else:
queue['oldest_task'] = ''
queue['tasks_in_queue'] = len(tasks)
if not has_default:
queue = {}
queues.append(queue)
queue['name'] = DEFAULT_QUEUE_NAME
queue['max_rate'] = DEFAULT_RATE
queue['bucket_size'] = DEFAULT_BUCKET_SIZE
tasks = self._taskqueues.get(DEFAULT_QUEUE_NAME, [])
if tasks:
queue['oldest_task'] = _FormatEta(tasks[0].eta_usec())
queue['eta_delta'] = _EtaDelta(tasks[0].eta_usec())
else:
queue['oldest_task'] = ''
queue['tasks_in_queue'] = len(tasks)
return queues
def GetTasks(self, queue_name):
"""Gets a queue's tasks.
Args:
queue_name: Queue's name to return tasks for.
Returns:
A list of dictionaries, where each dictionary contains one task's
attributes. E.g.
[{'name': 'task-123',
'queue_name': 'default',
'url': '/update',
'method': 'GET',
'eta': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'body': '',
'headers': [('user-header', 'some-value')
('X-AppEngine-QueueName': 'update-queue'),
('X-AppEngine-TaskName': 'task-123'),
('X-AppEngine-TaskRetryCount': '0'),
('X-AppEngine-Development-Payload': '1'),
('Content-Length': 0),
('Content-Type': 'application/octet-stream')]
Raises:
ValueError: A task request contains an unknown HTTP method type.
"""
tasks = self._taskqueues.get(queue_name, [])
result_tasks = []
for task_request in tasks:
task = {}
result_tasks.append(task)
task['name'] = task_request.task_name()
task['queue_name'] = queue_name
task['url'] = task_request.url()
method = task_request.method()
if method == taskqueue_service_pb.TaskQueueAddRequest.GET:
task['method'] = 'GET'
elif method == taskqueue_service_pb.TaskQueueAddRequest.POST:
task['method'] = 'POST'
elif method == taskqueue_service_pb.TaskQueueAddRequest.HEAD:
task['method'] = 'HEAD'
elif method == taskqueue_service_pb.TaskQueueAddRequest.PUT:
task['method'] = 'PUT'
elif method == taskqueue_service_pb.TaskQueueAddRequest.DELETE:
task['method'] = 'DELETE'
else:
raise ValueError('Unexpected method: %d' % method)
task['eta'] = _FormatEta(task_request.eta_usec())
task['eta_delta'] = _EtaDelta(task_request.eta_usec())
task['body'] = base64.b64encode(task_request.body())
headers = [(header.key(), header.value())
for header in task_request.header_list()
if header.key().lower() not in BUILT_IN_HEADERS]
headers.append(('X-AppEngine-QueueName', queue_name))
headers.append(('X-AppEngine-TaskName', task['name']))
headers.append(('X-AppEngine-TaskRetryCount', '0'))
headers.append(('X-AppEngine-Development-Payload', '1'))
headers.append(('Content-Length', len(task['body'])))
if 'content-type' not in frozenset(key.lower() for key, _ in headers):
headers.append(('Content-Type', 'application/octet-stream'))
task['headers'] = headers
return result_tasks
def DeleteTask(self, queue_name, task_name):
"""Deletes a task from a queue.
Args:
queue_name: the name of the queue to delete the task from.
task_name: the name of the task to delete.
"""
tasks = self._taskqueues.get(queue_name, [])
for task in tasks:
if task.task_name() == task_name:
tasks.remove(task)
return
def FlushQueue(self, queue_name):
"""Removes all tasks from a queue.
Args:
queue_name: the name of the queue to remove tasks from.
"""
self._taskqueues[queue_name] = []
def _Dynamic_UpdateQueue(self, request, unused_response):
"""Local implementation of the UpdateQueue RPC in TaskQueueService.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest.
unused_response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse.
Not used.
"""
queues = self._app_queues.setdefault(request.app_id(), {})
if request.queue_name() in queues and queues[request.queue_name()] is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE)
defensive_copy = self._QueueDetails()
defensive_copy.CopyFrom(request)
queues[request.queue_name()] = defensive_copy
def _Dynamic_FetchQueues(self, request, response):
"""Local implementation of the FetchQueues RPC in TaskQueueService.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse.
"""
queues = self._app_queues.get(request.app_id(), {})
for unused_key, queue in sorted(queues.items()):
if request.max_rows() == response.queue_size():
break
if queue is None:
continue
response_queue = response.add_queue()
response_queue.set_queue_name(queue.queue_name())
response_queue.set_bucket_refill_per_second(
queue.bucket_refill_per_second())
response_queue.set_bucket_capacity(queue.bucket_capacity())
response_queue.set_user_specified_rate(queue.user_specified_rate())
if queue.has_max_concurrent_requests():
response_queue.set_max_concurrent_requests(
queue.max_concurrent_requests())
response_queue.set_paused(queue.paused)
def _Dynamic_FetchQueueStats(self, request, response):
"""Local 'random' implementation of the TaskQueueService.FetchQueueStats.
This implementation loads some stats from the dummy store,
the rest with random numbers.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueueStatsRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueueStatsResponse.
"""
for queue in request.queue_name_list():
store = self.GetDummyTaskStore(request.app_id(), queue)
stats = response.add_queuestats()
stats.set_num_tasks(store.Count())
if stats.num_tasks() == 0:
stats.set_oldest_eta_usec(-1)
else:
stats.set_oldest_eta_usec(store.Oldest())
if random.randint(0, 9) > 0:
scanner_info = stats.mutable_scanner_info()
scanner_info.set_executed_last_minute(random.randint(0, 10))
scanner_info.set_executed_last_hour(scanner_info.executed_last_minute()
+ random.randint(0, 100))
scanner_info.set_sampling_duration_seconds(random.random() * 10000.0)
scanner_info.set_requests_in_flight(random.randint(0, 10))
def GetDummyTaskStore(self, app_id, queue_name):
"""Get the dummy task store for this app_id/queue_name pair.
Creates an entry and populates it, if there's not already an entry.
Args:
app_id: the app_id.
queue_name: the queue_name.
Returns:
the existing or the new dummy store.
"""
task_store_key = (app_id, queue_name)
if task_store_key not in admin_console_dummy_tasks:
store = _DummyTaskStore()
if not self._all_queues_valid and queue_name != CRON_QUEUE_NAME:
store.Populate(random.randint(10, 100))
admin_console_dummy_tasks[task_store_key] = store
else:
store = admin_console_dummy_tasks[task_store_key]
return store
def _Dynamic_QueryTasks(self, request, response):
"""Local implementation of the TaskQueueService.QueryTasks RPC.
Uses the dummy store, creating tasks if this is the first time the
queue has been seen.
Args:
request: A taskqueue_service_pb.TaskQueueQueryTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryTasksResponse.
"""
store = self.GetDummyTaskStore(request.app_id(), request.queue_name())
if request.has_start_eta_usec():
tasks = store.Lookup(request.max_rows(), name=request.start_task_name(),
eta=request.start_eta_usec())
else:
tasks = store.Lookup(request.max_rows(), name=request.start_task_name())
for task in tasks:
response.add_task().MergeFrom(task)
def _Dynamic_Delete(self, request, response):
"""Local delete implementation of TaskQueueService.Delete.
Deletes tasks from the dummy store. A 1/20 chance of a transient error.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteRequest.
response: A taskqueue_service_pb.TaskQueueDeleteResponse.
"""
task_store_key = (request.app_id(), request.queue_name())
if task_store_key not in admin_console_dummy_tasks:
for _ in request.task_name_list():
response.add_result(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
return
store = admin_console_dummy_tasks[task_store_key]
for taskname in request.task_name_list():
if random.random() <= 0.05:
response.add_result(
taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR)
else:
response.add_result(store.Delete(taskname))
def _Dynamic_ForceRun(self, request, response):
"""Local force run implementation of TaskQueueService.ForceRun.
Forces running of a task in a queue. This is a no-op here.
This will fail randomly for testing.
Args:
request: A taskqueue_service_pb.TaskQueueForceRunRequest.
response: A taskqueue_service_pb.TaskQueueForceRunResponse.
"""
if random.random() <= 0.05:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR)
elif random.random() <= 0.052:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR)
else:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.OK)
def _Dynamic_DeleteQueue(self, request, response):
"""Local delete implementation of TaskQueueService.DeleteQueue.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteQueueRequest.
response: A taskqueue_service_pb.TaskQueueDeleteQueueResponse.
"""
if not request.queue_name():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_NAME)
queues = self._app_queues.get(request.app_id(), {})
if request.queue_name() not in queues:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
elif queues[request.queue_name()] is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE)
queues[request.queue_name()] = None
def _Dynamic_PauseQueue(self, request, response):
"""Local pause implementation of TaskQueueService.PauseQueue.
Args:
request: A taskqueue_service_pb.TaskQueuePauseQueueRequest.
response: A taskqueue_service_pb.TaskQueuePauseQueueResponse.
"""
if not request.queue_name():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_NAME)
queues = self._app_queues.get(request.app_id(), {})
if request.queue_name() != DEFAULT_QUEUE_NAME:
if request.queue_name() not in queues:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
elif queues[request.queue_name()] is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE)
queues[request.queue_name()].paused = request.pause()
def _Dynamic_PurgeQueue(self, request, response):
"""Local purge implementation of TaskQueueService.PurgeQueue.
Args:
request: A taskqueue_service_pb.TaskQueuePurgeQueueRequest.
response: A taskqueue_service_pb.TaskQueuePurgeQueueResponse.
"""
if not request.queue_name():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_NAME)
if request.has_app_id():
queues = self._app_queues.get(request.app_id(), {})
if request.queue_name() != DEFAULT_QUEUE_NAME:
if request.queue_name() not in queues:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
elif queues[request.queue_name()] is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE)
store = self.GetDummyTaskStore(request.app_id(), request.queue_name())
for task in store.Lookup(store.Count()):
store.Delete(task.task_name())
elif (not self._IsValidQueue(request.queue_name(), None)
and not request.queue_name() in self._taskqueues):
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
self.FlushQueue(request.queue_name())
def _Dynamic_DeleteGroup(self, request, response):
"""Local delete implementation of TaskQueueService.DeleteGroup.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteGroupRequest.
response: A taskqueue_service_pb.TaskQueueDeleteGroupResponse.
"""
queues = self._app_queues.get(request.app_id(), {})
for queue in queues.iterkeys():
store = self.GetDummyTaskStore(request.app_id(), queue)
for task in store.Lookup(store.Count()):
store.Delete(task.task_name())
self.FlushQueue(queue)
self._app_queues[request.app_id()] = {}
def _Dynamic_UpdateStorageLimit(self, request, response):
"""Local implementation of TaskQueueService.UpdateStorageLimit.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateStorageLimitRequest.
response: A taskqueue_service_pb.TaskQueueUpdateStorageLimitResponse.
"""
if request.limit() < 0 or request.limit() > 1000 * (1024 ** 4):
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST)
response.set_new_limit(request.limit())
| apache-2.0 |
erickt/hue | desktop/core/ext-py/guppy-0.1.10/guppy/heapy/test/support.py | 37 | 7367 | #._cv_part guppy.heapy.test.support
"""Supporting definitions for the Heapy regression test.
Addapted from Python standard module test_support.
"""
import sys
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class TestSkipped(Error):
"""Test skipped.
This can be raised to indicate that a test was deliberatly
skipped, but not because a feature wasn't available. For
example, if some resource can't be used, such as the network
appears to be unavailable, this should be raised instead of
TestFailed.
"""
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
def forget(modname):
unload(modname)
import os
for dirname in sys.path:
try:
os.unlink(os.path.join(dirname, modname + '.pyc'))
except os.error:
pass
def requires(resource, msg=None):
if use_resources is not None and resource not in use_resources:
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise TestSkipped(msg)
FUZZ = 1e-6
def fcmp(x, y): # fuzzy comparison function
if type(x) == type(0.0) or type(y) == type(0.0):
try:
x, y = coerce(x, y)
fuzz = (abs(x) + abs(y)) * FUZZ
if abs(x-y) <= fuzz:
return 0
except:
pass
elif type(x) == type(y) and type(x) in (type(()), type([])):
for i in range(min(len(x), len(y))):
outcome = fcmp(x[i], y[i])
if outcome != 0:
return outcome
return cmp(len(x), len(y))
return cmp(x, y)
try:
unicode
have_unicode = 1
except NameError:
have_unicode = 0
import os
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
elif os.name != 'riscos':
TESTFN = '@test'
# Unicode name only used if TEST_FN_ENCODING exists for the platform.
if have_unicode:
TESTFN_UNICODE=unicode("@test-\xe0\xf2", "latin-1") # 2 latin characters.
if os.name=="nt":
TESTFN_ENCODING="mbcs"
else:
TESTFN = 'test'
del os
from os import unlink
def findfile(file, here=__file__):
import os
if os.path.isabs(file):
return file
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def verify(condition, reason='test failed'):
"""Verify that condition is true. If not, raise TestFailed.
The optional argument reason can be given to provide
a better error text.
"""
if not condition:
raise TestFailed(reason)
def vereq(a, b):
"""Raise TestFailed if a == b is false.
This is better than verify(a == b) because, in case of failure, the
error message incorporates repr(a) and repr(b) so you can see the
inputs.
Note that "not (a == b)" isn't necessarily the same as "a != b"; the
former is tested.
"""
if not (a == b):
raise TestFailed, "%r == %r" % (a, b)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = dict.items()
items.sort()
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def check_syntax(statement):
try:
compile(statement, '<string>', 'exec')
except SyntaxError:
pass
else:
print 'Missing SyntaxError: "%s"' % statement
#=======================================================================
# Preliminary PyUNIT integration.
import unittest
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def run_suite(suite, testclass=None):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
if testclass is None:
msg = "errors occurred; run in verbose mode for details"
else:
msg = "errors occurred in %s.%s" \
% (testclass.__module__, testclass.__name__)
raise TestFailed(msg)
raise TestFailed(err)
def run_unittest(testclass, debug=0):
"""Run tests from a unittest.TestCase-derived class."""
suite = unittest.makeSuite(testclass)
if debug:
suite.debug()
else:
run_suite(suite, testclass)
def debug_unittest(testclass):
""" Debug tests from a unittest.TestCase-derived class."""
run_unittest(testclass, debug=1)
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
test_support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
f, t = doctest.testmod(module, verbose=verbosity)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
return f, t
finally:
sys.stdout = save_stdout
# Base test case, tailored for heapy
class TestCase(unittest.TestCase):
def setUp(self):
from guppy import Root
self.python = Root()
self.guppy = self.python.guppy
self.heapy = self.guppy.heapy
self.Part = self.heapy.Part
self.ImpSet = self.heapy.ImpSet
self.Use = self.heapy.Use
self.View = self.heapy.View
self.allocation_behaves_as_originally = self.heapy.allocation_behaves_as_originally
self.iso = self.Use.iso
self.idset = self.Use.idset
def aseq(self, a, b, cont=0):
if a != b:
print "aseq: Expected: b = ", b
print "Got actually : a = ", a
if cont <= 0:
if cont < 0:
pdb.set_trace()
else:
self.assert_(0)
def asis(self, a, b, cont=0):
if a is not b:
print "asis: Expected: b = ", b
print "Got actually : a = ", a
if cont <= 0:
if cont < 0:
pdb.set_trace()
else:
self.assert_(0)
def tearDown(self):
pass
| apache-2.0 |
isotoma/django-postgres | django_postgres/six.py | 8 | 12805 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.3.0"
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
_iterlists = "lists"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
_iterlists = "iterlists"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
def iterkeys(d, **kw):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)(**kw))
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary."""
return iter(getattr(d, _iterlists)(**kw))
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
| unlicense |
alphapapa/youtube-dl | youtube_dl/extractor/beeg.py | 103 | 2066 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class BeegIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)'
_TEST = {
'url': 'http://beeg.com/5416503',
'md5': '1bff67111adb785c51d1b42959ec10e5',
'info_dict': {
'id': '5416503',
'ext': 'mp4',
'title': 'Sultry Striptease',
'description': 'md5:6db3c6177972822aaba18652ff59c773',
'categories': list, # NSFW
'thumbnail': 're:https?://.*\.jpg$',
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
quality_arr = self._search_regex(
r'(?s)var\s+qualityArr\s*=\s*{\s*(.+?)\s*}', webpage, 'quality formats')
formats = [{
'url': fmt[1],
'format_id': fmt[0],
'height': int(fmt[0][:-1]),
} for fmt in re.findall(r"'([^']+)'\s*:\s*'([^']+)'", quality_arr)]
self._sort_formats(formats)
title = self._html_search_regex(
r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title')
description = self._html_search_regex(
r'<meta name="description" content="([^"]*)"',
webpage, 'description', fatal=False)
thumbnail = self._html_search_regex(
r'\'previewer.url\'\s*:\s*"([^"]*)"',
webpage, 'thumbnail', fatal=False)
categories_str = self._html_search_regex(
r'<meta name="keywords" content="([^"]+)"', webpage, 'categories', fatal=False)
categories = (
None if categories_str is None
else categories_str.split(','))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'categories': categories,
'formats': formats,
'age_limit': 18,
}
| unlicense |
shuggiefisher/crowdstock | django/db/models/sql/aggregates.py | 277 | 4176 | """
Classes to represent the default SQL aggregate functions
"""
class AggregateField(object):
"""An internal field mockup used to identify aggregates in the
data-conversion parts of the database backend.
"""
def __init__(self, internal_type):
self.internal_type = internal_type
def get_internal_type(self):
return self.internal_type
ordinal_aggregate_field = AggregateField('IntegerField')
computed_aggregate_field = AggregateField('FloatField')
class Aggregate(object):
"""
Default SQL Aggregate.
"""
is_ordinal = False
is_computed = False
sql_template = '%(function)s(%(field)s)'
def __init__(self, col, source=None, is_summary=False, **extra):
"""Instantiate an SQL aggregate
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* sql_function, the name of the SQL function that implements the
aggregate.
* sql_template, a template string that is used to render the
aggregate into SQL.
* is_ordinal, a boolean indicating if the output of this aggregate
is an integer (e.g., a count)
* is_computed, a boolean indicating if this output of this aggregate
is a computed float (e.g., an average), regardless of the input
type.
"""
self.col = col
self.source = source
self.is_summary = is_summary
self.extra = extra
# Follow the chain of aggregate sources back until you find an
# actual field, or an aggregate that forces a particular output
# type. This type of this field will be used to coerce values
# retrieved from the database.
tmp = self
while tmp and isinstance(tmp, Aggregate):
if getattr(tmp, 'is_ordinal', False):
tmp = ordinal_aggregate_field
elif getattr(tmp, 'is_computed', False):
tmp = computed_aggregate_field
else:
tmp = tmp.source
self.field = tmp
def relabel_aliases(self, change_map):
if isinstance(self.col, (list, tuple)):
self.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
def as_sql(self, qn, connection):
"Return the aggregate, rendered as SQL."
if hasattr(self.col, 'as_sql'):
field_name = self.col.as_sql(qn, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self.col
params = {
'function': self.sql_function,
'field': field_name
}
params.update(self.extra)
return self.sql_template % params
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
class Count(Aggregate):
is_ordinal = True
sql_function = 'COUNT'
sql_template = '%(function)s(%(distinct)s%(field)s)'
def __init__(self, col, distinct=False, **extra):
super(Count, self).__init__(col, distinct=distinct and 'DISTINCT ' or '', **extra)
class Max(Aggregate):
sql_function = 'MAX'
class Min(Aggregate):
sql_function = 'MIN'
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = sample and 'STDDEV_SAMP' or 'STDDEV_POP'
class Sum(Aggregate):
sql_function = 'SUM'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = sample and 'VAR_SAMP' or 'VAR_POP'
| bsd-3-clause |
NEricN/RobotCSimulator | Python/App/Lib/fractions.py | 252 | 22390 | # Originally contributed by Sjoerd Mullender.
# Significantly modified by Jeffrey Yasskin <jyasskin at gmail.com>.
"""Rational, infinite-precision, real numbers."""
from __future__ import division
from decimal import Decimal
import math
import numbers
import operator
import re
__all__ = ['Fraction', 'gcd']
Rational = numbers.Rational
def gcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
while b:
a, b = b, a%b
return a
_RATIONAL_FORMAT = re.compile(r"""
\A\s* # optional whitespace at the start, then
(?P<sign>[-+]?) # an optional sign, then
(?=\d|\.\d) # lookahead for digit or .digit
(?P<num>\d*) # numerator (possibly empty)
(?: # followed by
(?:/(?P<denom>\d+))? # an optional denominator
| # or
(?:\.(?P<decimal>\d*))? # an optional fractional part
(?:E(?P<exp>[-+]?\d+))? # and optional exponent
)
\s*\Z # and optional whitespace to finish
""", re.VERBOSE | re.IGNORECASE)
class Fraction(Rational):
"""This class implements rational numbers.
In the two-argument form of the constructor, Fraction(8, 6) will
produce a rational number equivalent to 4/3. Both arguments must
be Rational. The numerator defaults to 0 and the denominator
defaults to 1 so that Fraction(3) == 3 and Fraction() == 0.
Fractions can also be constructed from:
- numeric strings similar to those accepted by the
float constructor (for example, '-2.3' or '1e10')
- strings of the form '123/456'
- float and Decimal instances
- other Rational instances (including integers)
"""
__slots__ = ('_numerator', '_denominator')
# We're immutable, so use __new__ not __init__
def __new__(cls, numerator=0, denominator=None):
"""Constructs a Fraction.
Takes a string like '3/2' or '1.5', another Rational instance, a
numerator/denominator pair, or a float.
Examples
--------
>>> Fraction(10, -8)
Fraction(-5, 4)
>>> Fraction(Fraction(1, 7), 5)
Fraction(1, 35)
>>> Fraction(Fraction(1, 7), Fraction(2, 3))
Fraction(3, 14)
>>> Fraction('314')
Fraction(314, 1)
>>> Fraction('-35/4')
Fraction(-35, 4)
>>> Fraction('3.1415') # conversion from numeric string
Fraction(6283, 2000)
>>> Fraction('-47e-2') # string may include a decimal exponent
Fraction(-47, 100)
>>> Fraction(1.47) # direct construction from float (exact conversion)
Fraction(6620291452234629, 4503599627370496)
>>> Fraction(2.25)
Fraction(9, 4)
>>> Fraction(Decimal('1.47'))
Fraction(147, 100)
"""
self = super(Fraction, cls).__new__(cls)
if denominator is None:
if isinstance(numerator, Rational):
self._numerator = numerator.numerator
self._denominator = numerator.denominator
return self
elif isinstance(numerator, float):
# Exact conversion from float
value = Fraction.from_float(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, Decimal):
value = Fraction.from_decimal(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, basestring):
# Handle construction from strings.
m = _RATIONAL_FORMAT.match(numerator)
if m is None:
raise ValueError('Invalid literal for Fraction: %r' %
numerator)
numerator = int(m.group('num') or '0')
denom = m.group('denom')
if denom:
denominator = int(denom)
else:
denominator = 1
decimal = m.group('decimal')
if decimal:
scale = 10**len(decimal)
numerator = numerator * scale + int(decimal)
denominator *= scale
exp = m.group('exp')
if exp:
exp = int(exp)
if exp >= 0:
numerator *= 10**exp
else:
denominator *= 10**-exp
if m.group('sign') == '-':
numerator = -numerator
else:
raise TypeError("argument should be a string "
"or a Rational instance")
elif (isinstance(numerator, Rational) and
isinstance(denominator, Rational)):
numerator, denominator = (
numerator.numerator * denominator.denominator,
denominator.numerator * numerator.denominator
)
else:
raise TypeError("both arguments should be "
"Rational instances")
if denominator == 0:
raise ZeroDivisionError('Fraction(%s, 0)' % numerator)
g = gcd(numerator, denominator)
self._numerator = numerator // g
self._denominator = denominator // g
return self
@classmethod
def from_float(cls, f):
"""Converts a finite float to a rational number, exactly.
Beware that Fraction.from_float(0.3) != Fraction(3, 10).
"""
if isinstance(f, numbers.Integral):
return cls(f)
elif not isinstance(f, float):
raise TypeError("%s.from_float() only takes floats, not %r (%s)" %
(cls.__name__, f, type(f).__name__))
if math.isnan(f) or math.isinf(f):
raise TypeError("Cannot convert %r to %s." % (f, cls.__name__))
return cls(*f.as_integer_ratio())
@classmethod
def from_decimal(cls, dec):
"""Converts a finite Decimal instance to a rational number, exactly."""
from decimal import Decimal
if isinstance(dec, numbers.Integral):
dec = Decimal(int(dec))
elif not isinstance(dec, Decimal):
raise TypeError(
"%s.from_decimal() only takes Decimals, not %r (%s)" %
(cls.__name__, dec, type(dec).__name__))
if not dec.is_finite():
# Catches infinities and nans.
raise TypeError("Cannot convert %s to %s." % (dec, cls.__name__))
sign, digits, exp = dec.as_tuple()
digits = int(''.join(map(str, digits)))
if sign:
digits = -digits
if exp >= 0:
return cls(digits * 10 ** exp)
else:
return cls(digits, 10 ** -exp)
def limit_denominator(self, max_denominator=1000000):
"""Closest Fraction to self with denominator at most max_denominator.
>>> Fraction('3.141592653589793').limit_denominator(10)
Fraction(22, 7)
>>> Fraction('3.141592653589793').limit_denominator(100)
Fraction(311, 99)
>>> Fraction(4321, 8765).limit_denominator(10000)
Fraction(4321, 8765)
"""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
if self._denominator <= max_denominator:
return Fraction(self)
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = self._numerator, self._denominator
while True:
a = n//d
q2 = q0+a*q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
n, d = d, n-a*d
k = (max_denominator-q0)//q1
bound1 = Fraction(p0+k*p1, q0+k*q1)
bound2 = Fraction(p1, q1)
if abs(bound2 - self) <= abs(bound1-self):
return bound2
else:
return bound1
@property
def numerator(a):
return a._numerator
@property
def denominator(a):
return a._denominator
def __repr__(self):
"""repr(self)"""
return ('Fraction(%s, %s)' % (self._numerator, self._denominator))
def __str__(self):
"""str(self)"""
if self._denominator == 1:
return str(self._numerator)
else:
return '%s/%s' % (self._numerator, self._denominator)
def _operator_fallbacks(monomorphic_operator, fallback_operator):
"""Generates forward and reverse operators given a purely-rational
operator and a function from the operator module.
Use this like:
__op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op)
In general, we want to implement the arithmetic operations so
that mixed-mode operations either call an implementation whose
author knew about the types of both arguments, or convert both
to the nearest built in type and do the operation there. In
Fraction, that means that we define __add__ and __radd__ as:
def __add__(self, other):
# Both types have numerators/denominator attributes,
# so do the operation directly
if isinstance(other, (int, long, Fraction)):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
# float and complex don't have those operations, but we
# know about those types, so special case them.
elif isinstance(other, float):
return float(self) + other
elif isinstance(other, complex):
return complex(self) + other
# Let the other type take over.
return NotImplemented
def __radd__(self, other):
# radd handles more types than add because there's
# nothing left to fall back to.
if isinstance(other, Rational):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
elif isinstance(other, Real):
return float(other) + float(self)
elif isinstance(other, Complex):
return complex(other) + complex(self)
return NotImplemented
There are 5 different cases for a mixed-type addition on
Fraction. I'll refer to all of the above code that doesn't
refer to Fraction, float, or complex as "boilerplate". 'r'
will be an instance of Fraction, which is a subtype of
Rational (r : Fraction <: Rational), and b : B <:
Complex. The first three involve 'r + b':
1. If B <: Fraction, int, float, or complex, we handle
that specially, and all is well.
2. If Fraction falls back to the boilerplate code, and it
were to return a value from __add__, we'd miss the
possibility that B defines a more intelligent __radd__,
so the boilerplate should return NotImplemented from
__add__. In particular, we don't handle Rational
here, even though we could get an exact answer, in case
the other type wants to do something special.
3. If B <: Fraction, Python tries B.__radd__ before
Fraction.__add__. This is ok, because it was
implemented with knowledge of Fraction, so it can
handle those instances before delegating to Real or
Complex.
The next two situations describe 'b + r'. We assume that b
didn't know about Fraction in its implementation, and that it
uses similar boilerplate code:
4. If B <: Rational, then __radd_ converts both to the
builtin rational type (hey look, that's us) and
proceeds.
5. Otherwise, __radd__ tries to find the nearest common
base ABC, and fall back to its builtin type. Since this
class doesn't subclass a concrete type, there's no
implementation to fall back to, so we need to try as
hard as possible to return an actual value, or the user
will get a TypeError.
"""
def forward(a, b):
if isinstance(b, (int, long, Fraction)):
return monomorphic_operator(a, b)
elif isinstance(b, float):
return fallback_operator(float(a), b)
elif isinstance(b, complex):
return fallback_operator(complex(a), b)
else:
return NotImplemented
forward.__name__ = '__' + fallback_operator.__name__ + '__'
forward.__doc__ = monomorphic_operator.__doc__
def reverse(b, a):
if isinstance(a, Rational):
# Includes ints.
return monomorphic_operator(a, b)
elif isinstance(a, numbers.Real):
return fallback_operator(float(a), float(b))
elif isinstance(a, numbers.Complex):
return fallback_operator(complex(a), complex(b))
else:
return NotImplemented
reverse.__name__ = '__r' + fallback_operator.__name__ + '__'
reverse.__doc__ = monomorphic_operator.__doc__
return forward, reverse
def _add(a, b):
"""a + b"""
return Fraction(a.numerator * b.denominator +
b.numerator * a.denominator,
a.denominator * b.denominator)
__add__, __radd__ = _operator_fallbacks(_add, operator.add)
def _sub(a, b):
"""a - b"""
return Fraction(a.numerator * b.denominator -
b.numerator * a.denominator,
a.denominator * b.denominator)
__sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub)
def _mul(a, b):
"""a * b"""
return Fraction(a.numerator * b.numerator, a.denominator * b.denominator)
__mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul)
def _div(a, b):
"""a / b"""
return Fraction(a.numerator * b.denominator,
a.denominator * b.numerator)
__truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv)
__div__, __rdiv__ = _operator_fallbacks(_div, operator.div)
def __floordiv__(a, b):
"""a // b"""
# Will be math.floor(a / b) in 3.0.
div = a / b
if isinstance(div, Rational):
# trunc(math.floor(div)) doesn't work if the rational is
# more precise than a float because the intermediate
# rounding may cross an integer boundary.
return div.numerator // div.denominator
else:
return math.floor(div)
def __rfloordiv__(b, a):
"""a // b"""
# Will be math.floor(a / b) in 3.0.
div = a / b
if isinstance(div, Rational):
# trunc(math.floor(div)) doesn't work if the rational is
# more precise than a float because the intermediate
# rounding may cross an integer boundary.
return div.numerator // div.denominator
else:
return math.floor(div)
def __mod__(a, b):
"""a % b"""
div = a // b
return a - b * div
def __rmod__(b, a):
"""a % b"""
div = a // b
return a - b * div
def __pow__(a, b):
"""a ** b
If b is not an integer, the result will be a float or complex
since roots are generally irrational. If b is an integer, the
result will be rational.
"""
if isinstance(b, Rational):
if b.denominator == 1:
power = b.numerator
if power >= 0:
return Fraction(a._numerator ** power,
a._denominator ** power)
else:
return Fraction(a._denominator ** -power,
a._numerator ** -power)
else:
# A fractional power will generally produce an
# irrational number.
return float(a) ** float(b)
else:
return float(a) ** b
def __rpow__(b, a):
"""a ** b"""
if b._denominator == 1 and b._numerator >= 0:
# If a is an int, keep it that way if possible.
return a ** b._numerator
if isinstance(a, Rational):
return Fraction(a.numerator, a.denominator) ** b
if b._denominator == 1:
return a ** b._numerator
return a ** float(b)
def __pos__(a):
"""+a: Coerces a subclass instance to Fraction"""
return Fraction(a._numerator, a._denominator)
def __neg__(a):
"""-a"""
return Fraction(-a._numerator, a._denominator)
def __abs__(a):
"""abs(a)"""
return Fraction(abs(a._numerator), a._denominator)
def __trunc__(a):
"""trunc(a)"""
if a._numerator < 0:
return -(-a._numerator // a._denominator)
else:
return a._numerator // a._denominator
def __hash__(self):
"""hash(self)
Tricky because values that are exactly representable as a
float must have the same hash as that float.
"""
# XXX since this method is expensive, consider caching the result
if self._denominator == 1:
# Get integers right.
return hash(self._numerator)
# Expensive check, but definitely correct.
if self == float(self):
return hash(float(self))
else:
# Use tuple's hash to avoid a high collision rate on
# simple fractions.
return hash((self._numerator, self._denominator))
def __eq__(a, b):
"""a == b"""
if isinstance(b, Rational):
return (a._numerator == b.numerator and
a._denominator == b.denominator)
if isinstance(b, numbers.Complex) and b.imag == 0:
b = b.real
if isinstance(b, float):
if math.isnan(b) or math.isinf(b):
# comparisons with an infinity or nan should behave in
# the same way for any finite a, so treat a as zero.
return 0.0 == b
else:
return a == a.from_float(b)
else:
# Since a doesn't know how to compare with b, let's give b
# a chance to compare itself with a.
return NotImplemented
def _richcmp(self, other, op):
"""Helper for comparison operators, for internal use only.
Implement comparison between a Rational instance `self`, and
either another Rational instance or a float `other`. If
`other` is not a Rational instance or a float, return
NotImplemented. `op` should be one of the six standard
comparison operators.
"""
# convert other to a Rational instance where reasonable.
if isinstance(other, Rational):
return op(self._numerator * other.denominator,
self._denominator * other.numerator)
# comparisons with complex should raise a TypeError, for consistency
# with int<->complex, float<->complex, and complex<->complex comparisons.
if isinstance(other, complex):
raise TypeError("no ordering relation is defined for complex numbers")
if isinstance(other, float):
if math.isnan(other) or math.isinf(other):
return op(0.0, other)
else:
return op(self, self.from_float(other))
else:
return NotImplemented
def __lt__(a, b):
"""a < b"""
return a._richcmp(b, operator.lt)
def __gt__(a, b):
"""a > b"""
return a._richcmp(b, operator.gt)
def __le__(a, b):
"""a <= b"""
return a._richcmp(b, operator.le)
def __ge__(a, b):
"""a >= b"""
return a._richcmp(b, operator.ge)
def __nonzero__(a):
"""a != 0"""
return a._numerator != 0
# support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) == Fraction:
return self # I'm immutable; therefore I am my own clone
return self.__class__(self._numerator, self._denominator)
def __deepcopy__(self, memo):
if type(self) == Fraction:
return self # My components are also immutable
return self.__class__(self._numerator, self._denominator)
| apache-2.0 |
notmyname/swift | swift/common/middleware/healthcheck.py | 10 | 2073 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from swift.common.swob import Request, Response
class HealthCheckMiddleware(object):
"""
Healthcheck middleware used for monitoring.
If the path is /healthcheck, it will respond 200 with "OK" as the body.
If the optional config parameter "disable_path" is set, and a file is
present at that path, it will respond 503 with "DISABLED BY FILE" as the
body.
"""
def __init__(self, app, conf):
self.app = app
self.disable_path = conf.get('disable_path', '')
def GET(self, req):
"""Returns a 200 response with "OK" in the body."""
return Response(request=req, body="OK", content_type="text/plain")
def DISABLED(self, req):
"""Returns a 503 response with "DISABLED BY FILE" in the body."""
return Response(request=req, status=503, body="DISABLED BY FILE",
content_type="text/plain")
def __call__(self, env, start_response):
req = Request(env)
if req.path == '/healthcheck':
handler = self.GET
if self.disable_path and os.path.exists(self.disable_path):
handler = self.DISABLED
return handler(req)(env, start_response)
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def healthcheck_filter(app):
return HealthCheckMiddleware(app, conf)
return healthcheck_filter
| apache-2.0 |
Workday/OpenFrame | third_party/cython/src/Cython/StringIOTree.py | 109 | 3095 | from cStringIO import StringIO
class StringIOTree(object):
"""
See module docs.
"""
def __init__(self, stream=None):
self.prepended_children = []
if stream is None:
stream = StringIO()
self.stream = stream
self.write = stream.write
self.markers = []
def getvalue(self):
content = [x.getvalue() for x in self.prepended_children]
content.append(self.stream.getvalue())
return "".join(content)
def copyto(self, target):
"""Potentially cheaper than getvalue as no string concatenation
needs to happen."""
for child in self.prepended_children:
child.copyto(target)
stream_content = self.stream.getvalue()
if stream_content:
target.write(stream_content)
def commit(self):
# Save what we have written until now so that the buffer
# itself is empty -- this makes it ready for insertion
if self.stream.tell():
self.prepended_children.append(StringIOTree(self.stream))
self.prepended_children[-1].markers = self.markers
self.markers = []
self.stream = StringIO()
self.write = self.stream.write
def insert(self, iotree):
"""
Insert a StringIOTree (and all of its contents) at this location.
Further writing to self appears after what is inserted.
"""
self.commit()
self.prepended_children.append(iotree)
def insertion_point(self):
"""
Returns a new StringIOTree, which is left behind at the current position
(it what is written to the result will appear right before whatever is
next written to self).
Calling getvalue() or copyto() on the result will only return the
contents written to it.
"""
# Save what we have written until now
# This is so that getvalue on the result doesn't include it.
self.commit()
# Construct the new forked object to return
other = StringIOTree()
self.prepended_children.append(other)
return other
def allmarkers(self):
children = self.prepended_children
return [m for c in children for m in c.allmarkers()] + self.markers
__doc__ = r"""
Implements a buffer with insertion points. When you know you need to
"get back" to a place and write more later, simply call insertion_point()
at that spot and get a new StringIOTree object that is "left behind".
EXAMPLE:
>>> a = StringIOTree()
>>> a.write('first\n')
>>> b = a.insertion_point()
>>> a.write('third\n')
>>> b.write('second\n')
>>> a.getvalue().split()
['first', 'second', 'third']
>>> c = b.insertion_point()
>>> d = c.insertion_point()
>>> d.write('alpha\n')
>>> b.write('gamma\n')
>>> c.write('beta\n')
>>> b.getvalue().split()
['second', 'alpha', 'beta', 'gamma']
>>> i = StringIOTree()
>>> d.insert(i)
>>> i.write('inserted\n')
>>> out = StringIO()
>>> a.copyto(out)
>>> out.getvalue().split()
['first', 'second', 'alpha', 'inserted', 'beta', 'gamma', 'third']
"""
| bsd-3-clause |
cchurch/ansible | lib/ansible/modules/system/cron.py | 5 | 25748 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Dane Summers <dsummers@pinedesk.biz>
# Copyright: (c) 2013, Mike Grozak <mike.grozak@gmail.com>
# Copyright: (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
# Copyright: (c) 2015, Evan Kaufman <evan@digitalflophouse.com>
# Copyright: (c) 2015, Luca Berruti <nadirio@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: cron
short_description: Manage cron.d and crontab entries
description:
- Use this module to manage crontab and environment variables entries. This module allows
you to create environment variables and named crontab entries, update, or delete them.
- 'When crontab jobs are managed: the module includes one line with the description of the
crontab entry C("#Ansible: <name>") corresponding to the "name" passed to the module,
which is used by future ansible/module calls to find/check the state. The "name"
parameter should be unique, and changing the "name" value will result in a new cron
task being created (or a different one being removed).'
- When environment variables are managed, no comment line is added, but, when the module
needs to find/check the state, it uses the "name" parameter to find the environment
variable definition line.
- When using symbols such as %, they must be properly escaped.
version_added: "0.9"
options:
name:
description:
- Description of a crontab entry or, if env is set, the name of environment variable.
- Required if C(state=absent).
- Note that if name is not set and C(state=present), then a
new crontab entry will always be created, regardless of existing ones.
- This parameter will always be required in future releases.
type: str
user:
description:
- The specific user whose crontab should be modified.
- When unset, this parameter defaults to using C(root).
type: str
job:
description:
- The command to execute or, if env is set, the value of environment variable.
- The command should not contain line breaks.
- Required if C(state=present).
type: str
aliases: [ value ]
state:
description:
- Whether to ensure the job or environment variable is present or absent.
type: str
choices: [ absent, present ]
default: present
cron_file:
description:
- If specified, uses this file instead of an individual user's crontab.
- If this is a relative path, it is interpreted with respect to I(/etc/cron.d).
- If it is absolute, it will typically be I(/etc/crontab).
- Many linux distros expect (and some require) the filename portion to consist solely
of upper- and lower-case letters, digits, underscores, and hyphens.
- To use the C(cron_file) parameter you must specify the C(user) as well.
type: str
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup_file) variable by this module.
type: bool
default: no
minute:
description:
- Minute when the job should run ( 0-59, *, */2, etc )
type: str
default: "*"
hour:
description:
- Hour when the job should run ( 0-23, *, */2, etc )
type: str
default: "*"
day:
description:
- Day of the month the job should run ( 1-31, *, */2, etc )
type: str
default: "*"
aliases: [ dom ]
month:
description:
- Month of the year the job should run ( 1-12, *, */2, etc )
type: str
default: "*"
weekday:
description:
- Day of the week that the job should run ( 0-6 for Sunday-Saturday, *, etc )
type: str
default: "*"
aliases: [ dow ]
reboot:
description:
- If the job should be run at reboot. This option is deprecated. Users should use special_time.
version_added: "1.0"
type: bool
default: no
special_time:
description:
- Special time specification nickname.
type: str
choices: [ annually, daily, hourly, monthly, reboot, weekly, yearly ]
version_added: "1.3"
disabled:
description:
- If the job should be disabled (commented out) in the crontab.
- Only has effect if C(state=present).
type: bool
default: no
version_added: "2.0"
env:
description:
- If set, manages a crontab's environment variable.
- New variables are added on top of crontab.
- C(name) and C(value) parameters are the name and the value of environment variable.
type: bool
default: no
version_added: "2.1"
insertafter:
description:
- Used with C(state=present) and C(env).
- If specified, the environment variable will be inserted after the declaration of specified environment variable.
type: str
version_added: "2.1"
insertbefore:
description:
- Used with C(state=present) and C(env).
- If specified, the environment variable will be inserted before the declaration of specified environment variable.
type: str
version_added: "2.1"
requirements:
- cron
author:
- Dane Summers (@dsummersl)
- Mike Grozak (@rhaido)
- Patrick Callahan (@dirtyharrycallahan)
- Evan Kaufman (@EvanK)
- Luca Berruti (@lberruti)
'''
EXAMPLES = r'''
- name: Ensure a job that runs at 2 and 5 exists. Creates an entry like "0 5,2 * * ls -alh > /dev/null"
cron:
name: "check dirs"
minute: "0"
hour: "5,2"
job: "ls -alh > /dev/null"
- name: 'Ensure an old job is no longer present. Removes any job that is prefixed by "#Ansible: an old job" from the crontab'
cron:
name: "an old job"
state: absent
- name: Creates an entry like "@reboot /some/job.sh"
cron:
name: "a job for reboot"
special_time: reboot
job: "/some/job.sh"
- name: Creates an entry like "PATH=/opt/bin" on top of crontab
cron:
name: PATH
env: yes
job: /opt/bin
- name: Creates an entry like "APP_HOME=/srv/app" and insert it after PATH declaration
cron:
name: APP_HOME
env: yes
job: /srv/app
insertafter: PATH
- name: Creates a cron file under /etc/cron.d
cron:
name: yum autoupdate
weekday: "2"
minute: "0"
hour: "12"
user: root
job: "YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate"
cron_file: ansible_yum-autoupdate
- name: Removes a cron file from under /etc/cron.d
cron:
name: "yum autoupdate"
cron_file: ansible_yum-autoupdate
state: absent
- name: Removes "APP_HOME" environment variable from crontab
cron:
name: APP_HOME
env: yes
state: absent
'''
import os
import platform
import pwd
import re
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule, get_platform
from ansible.module_utils.six.moves import shlex_quote
CRONCMD = "/usr/bin/crontab"
class CronTabError(Exception):
pass
class CronTab(object):
"""
CronTab object to write time based crontab file
user - the user of the crontab (defaults to root)
cron_file - a cron file under /etc/cron.d, or an absolute path
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.root = (os.getuid() == 0)
self.lines = None
self.ansible = "#Ansible: "
self.existing = ''
if cron_file:
if os.path.isabs(cron_file):
self.cron_file = cron_file
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
f = open(self.cron_file, 'r')
self.existing = f.read()
self.lines = self.existing.splitlines()
f.close()
except IOError:
# cron file does not exist
return
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronTabError("Unable to read crontab")
self.existing = out
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
not re.match(r'# \(/tmp/.*installed on.*\)', l) and
not re.match(r'# \(.*version.*\)', l)):
self.lines.append(l)
else:
pattern = re.escape(l) + '[\r\n]?'
self.existing = re.sub(pattern, '', self.existing, 1)
count += 1
def is_empty(self):
if len(self.lines) == 0:
return True
else:
return False
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'w')
elif self.cron_file:
fileh = open(self.cron_file, 'w')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
os.chmod(path, int('0644', 8))
fileh = os.fdopen(filed, 'w')
fileh.write(self.render())
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=err)
# set SELinux permissions
if self.module.selinux_enabled() and self.cron_file:
self.module.set_default_selinux_context(self.cron_file, False)
def do_comment(self, name):
return "%s%s" % (self.ansible, name)
def add_job(self, name, job):
# Add the comment
self.lines.append(self.do_comment(name))
# Add the job
self.lines.append("%s" % (job))
def update_job(self, name, job):
return self._update_job(name, job, self.do_add_job)
def do_add_job(self, lines, comment, job):
lines.append(comment)
lines.append("%s" % (job))
def remove_job(self, name):
return self._update_job(name, "", self.do_remove_job)
def do_remove_job(self, lines, comment, job):
return None
def add_env(self, decl, insertafter=None, insertbefore=None):
if not (insertafter or insertbefore):
self.lines.insert(0, decl)
return
if insertafter:
other_name = insertafter
elif insertbefore:
other_name = insertbefore
other_decl = self.find_env(other_name)
if len(other_decl) > 0:
if insertafter:
index = other_decl[0] + 1
elif insertbefore:
index = other_decl[0]
self.lines.insert(index, decl)
return
self.module.fail_json(msg="Variable named '%s' not found." % other_name)
def update_env(self, name, decl):
return self._update_env(name, decl, self.do_add_env)
def do_add_env(self, lines, decl):
lines.append(decl)
def remove_env(self, name):
return self._update_env(name, '', self.do_remove_env)
def do_remove_env(self, lines, decl):
return None
def remove_job_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError:
# cron file does not exist
return False
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
def find_job(self, name, job=None):
# attempt to find job by 'Ansible:' header comment
comment = None
for l in self.lines:
if comment is not None:
if comment == name:
return [comment, l]
else:
comment = None
elif re.match(r'%s' % self.ansible, l):
comment = re.sub(r'%s' % self.ansible, '', l)
# failing that, attempt to find job by exact match
if job:
for i, l in enumerate(self.lines):
if l == job:
# if no leading ansible header, insert one
if not re.match(r'%s' % self.ansible, self.lines[i - 1]):
self.lines.insert(i, self.do_comment(name))
return [self.lines[i], l, True]
# if a leading blank ansible header AND job has a name, update header
elif name and self.lines[i - 1] == self.do_comment(None):
self.lines[i - 1] = self.do_comment(name)
return [self.lines[i - 1], l, True]
return []
def find_env(self, name):
for index, l in enumerate(self.lines):
if re.match(r'^%s=' % name, l):
return [index, l]
return []
def get_cron_job(self, minute, hour, day, month, weekday, job, special, disabled):
# normalize any leading/trailing newlines (ansible/ansible-modules-core#3791)
job = job.strip('\r\n')
if disabled:
disable_prefix = '#'
else:
disable_prefix = ''
if special:
if self.cron_file:
return "%s@%s %s %s" % (disable_prefix, special, self.user, job)
else:
return "%s@%s %s" % (disable_prefix, special, job)
else:
if self.cron_file:
return "%s%s %s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, self.user, job)
else:
return "%s%s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, job)
def get_jobnames(self):
jobnames = []
for l in self.lines:
if re.match(r'%s' % self.ansible, l):
jobnames.append(re.sub(r'%s' % self.ansible, '', l))
return jobnames
def get_envnames(self):
envnames = []
for l in self.lines:
if re.match(r'^\S+=', l):
envnames.append(l.split('=')[0])
return envnames
def _update_job(self, name, job, addlinesfunction):
ansiblename = self.do_comment(name)
newlines = []
comment = None
for l in self.lines:
if comment is not None:
addlinesfunction(newlines, comment, job)
comment = None
elif l == ansiblename:
comment = l
else:
newlines.append(l)
self.lines = newlines
if len(newlines) == 0:
return True
else:
return False # TODO add some more error testing
def _update_env(self, name, decl, addenvfunction):
newlines = []
for l in self.lines:
if re.match(r'^%s=' % name, l):
addenvfunction(newlines, decl)
else:
newlines.append(l)
self.lines = newlines
def render(self):
"""
Render this crontab as it would be in the crontab.
"""
crons = []
for cron in self.lines:
crons.append(cron)
result = '\n'.join(crons)
if result:
result = result.rstrip('\r\n') + '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(CRONCMD))
elif platform.system() == 'AIX':
return "%s -l %s" % (shlex_quote(CRONCMD), shlex_quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (CRONCMD, '-l', shlex_quote(self.user))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (CRONCMD, user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), CRONCMD, shlex_quote(path))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (CRONCMD, user, shlex_quote(path))
def main():
# The following example playbooks:
#
# - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
#
# - name: do the job
# cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
#
# - name: no job
# cron: name="an old job" state=absent
#
# - name: sets env
# cron: name="PATH" env=yes value="/bin:/usr/bin"
#
# Would produce:
# PATH=/bin:/usr/bin
# # Ansible: check dirs
# * * 5,2 * * ls -alh > /dev/null
# # Ansible: do the job
# * * 5,2 * * /some/dir/job.sh
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str'),
user=dict(type='str'),
job=dict(type='str', aliases=['value']),
cron_file=dict(type='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
backup=dict(type='bool', default=False),
minute=dict(type='str', default='*'),
hour=dict(type='str', default='*'),
day=dict(type='str', default='*', aliases=['dom']),
month=dict(type='str', default='*'),
weekday=dict(type='str', default='*', aliases=['dow']),
reboot=dict(type='bool', default=False),
special_time=dict(type='str', choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"]),
disabled=dict(type='bool', default=False),
env=dict(type='bool'),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['reboot', 'special_time'],
['insertafter', 'insertbefore'],
],
)
name = module.params['name']
user = module.params['user']
job = module.params['job']
cron_file = module.params['cron_file']
state = module.params['state']
backup = module.params['backup']
minute = module.params['minute']
hour = module.params['hour']
day = module.params['day']
month = module.params['month']
weekday = module.params['weekday']
reboot = module.params['reboot']
special_time = module.params['special_time']
disabled = module.params['disabled']
env = module.params['env']
insertafter = module.params['insertafter']
insertbefore = module.params['insertbefore']
do_install = state == 'present'
changed = False
res_args = dict()
warnings = list()
if cron_file:
cron_file_basename = os.path.basename(cron_file)
if not re.search(r'^[A-Z0-9_-]+$', cron_file_basename, re.I):
warnings.append('Filename portion of cron_file ("%s") should consist' % cron_file_basename +
' solely of upper- and lower-case letters, digits, underscores, and hyphens')
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(int('022', 8))
crontab = CronTab(module, user, cron_file)
module.debug('cron instantiated - name: "%s"' % name)
if not name:
module.deprecate(
msg="The 'name' parameter will be required in future releases.",
version='2.12'
)
if reboot:
module.deprecate(
msg="The 'reboot' parameter will be removed in future releases. Use 'special_time' option instead.",
version='2.12'
)
if module._diff:
diff = dict()
diff['before'] = crontab.existing
if crontab.cron_file:
diff['before_header'] = crontab.cron_file
else:
if crontab.user:
diff['before_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['before_header'] = 'crontab'
# --- user input validation ---
if (special_time or reboot) and \
(True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
module.fail_json(msg="You must specify time and date fields or special time.")
# cannot support special_time on solaris
if (special_time or reboot) and get_platform() == 'SunOS':
module.fail_json(msg="Solaris does not support special_time=... or @reboot")
if cron_file and do_install:
if not user:
module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well")
if job is None and do_install:
module.fail_json(msg="You must specify 'job' to install a new cron job or variable")
if (insertafter or insertbefore) and not env and do_install:
module.fail_json(msg="Insertafter and insertbefore parameters are valid only with env=yes")
if reboot:
special_time = "reboot"
# if requested make a backup before making a change
if backup and not module.check_mode:
(backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
crontab.write(backup_file)
if crontab.cron_file and not name and not do_install:
if module._diff:
diff['after'] = ''
diff['after_header'] = '/dev/null'
else:
diff = dict()
if module.check_mode:
changed = os.path.isfile(crontab.cron_file)
else:
changed = crontab.remove_job_file()
module.exit_json(changed=changed, cron_file=cron_file, state=state, diff=diff)
if env:
if ' ' in name:
module.fail_json(msg="Invalid name for environment variable")
decl = '%s="%s"' % (name, job)
old_decl = crontab.find_env(name)
if do_install:
if len(old_decl) == 0:
crontab.add_env(decl, insertafter, insertbefore)
changed = True
if len(old_decl) > 0 and old_decl[1] != decl:
crontab.update_env(name, decl)
changed = True
else:
if len(old_decl) > 0:
crontab.remove_env(name)
changed = True
else:
if do_install:
for char in ['\r', '\n']:
if char in job.strip('\r\n'):
warnings.append('Job should not contain line breaks')
break
job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
old_job = crontab.find_job(name, job)
if len(old_job) == 0:
crontab.add_job(name, job)
changed = True
if len(old_job) > 0 and old_job[1] != job:
crontab.update_job(name, job)
changed = True
if len(old_job) > 2:
crontab.update_job(name, job)
changed = True
else:
old_job = crontab.find_job(name)
if len(old_job) > 0:
crontab.remove_job(name)
changed = True
# no changes to env/job, but existing crontab needs a terminating newline
if not changed and crontab.existing != '':
if not (crontab.existing.endswith('\r') or crontab.existing.endswith('\n')):
changed = True
res_args = dict(
jobs=crontab.get_jobnames(),
envs=crontab.get_envnames(),
warnings=warnings,
changed=changed
)
if changed:
if not module.check_mode:
crontab.write()
if module._diff:
diff['after'] = crontab.render()
if crontab.cron_file:
diff['after_header'] = crontab.cron_file
else:
if crontab.user:
diff['after_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['after_header'] = 'crontab'
res_args['diff'] = diff
# retain the backup only if crontab or cron file have changed
if backup and not module.check_mode:
if changed:
res_args['backup_file'] = backup_file
else:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
# --- should never get here
module.exit_json(msg="Unable to execute cron task.")
if __name__ == '__main__':
main()
| gpl-3.0 |
chromium/chromium | third_party/wpt_tools/wpt/tools/wptrunner/wptrunner/browsers/firefox.py | 1 | 39112 | import base64
import io
import json
import os
import platform
import signal
import subprocess
import tempfile
import zipfile
from abc import ABCMeta, abstractmethod
import mozinfo
import mozleak
import mozversion
from mozprocess import ProcessHandler
from mozprofile import FirefoxProfile, Preferences
from mozrunner import FirefoxRunner
from mozrunner.utils import test_environment, get_stack_fixer_function
from mozcrash import mozcrash
from .base import (Browser,
ExecutorBrowser,
NullBrowser,
OutputHandler,
OutputHandlerState,
browser_command,
cmd_arg,
get_free_port,
require_arg)
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executormarionette import (MarionetteTestharnessExecutor, # noqa: F401
MarionetteRefTestExecutor, # noqa: F401
MarionettePrintRefTestExecutor, # noqa: F401
MarionetteWdspecExecutor, # noqa: F401
MarionetteCrashtestExecutor) # noqa: F401
from ..webdriver_server import WebDriverServer
here = os.path.dirname(__file__)
__wptrunner__ = {"product": "firefox",
"check_args": "check_args",
"browser": {None: "FirefoxBrowser",
"wdspec": "FirefoxWdSpecBrowser"},
"executor": {"crashtest": "MarionetteCrashtestExecutor",
"testharness": "MarionetteTestharnessExecutor",
"reftest": "MarionetteRefTestExecutor",
"print-reftest": "MarionettePrintRefTestExecutor",
"wdspec": "MarionetteWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"run_info_extras": "run_info_extras",
"update_properties": "update_properties",
"timeout_multiplier": "get_timeout_multiplier"}
def get_timeout_multiplier(test_type, run_info_data, **kwargs):
if kwargs["timeout_multiplier"] is not None:
return kwargs["timeout_multiplier"]
if test_type == "reftest":
if run_info_data["debug"] or run_info_data.get("asan") or run_info_data.get("tsan"):
return 4
else:
return 2
elif run_info_data["debug"] or run_info_data.get("asan") or run_info_data.get("tsan"):
if run_info_data.get("ccov"):
return 4
else:
return 3
elif run_info_data["os"] == "android":
return 4
# https://bugzilla.mozilla.org/show_bug.cgi?id=1538725
elif run_info_data["os"] == "win" and run_info_data["processor"] == "aarch64":
return 4
elif run_info_data.get("ccov"):
return 2
return 1
def check_args(**kwargs):
require_arg(kwargs, "binary")
def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
return {"binary": kwargs["binary"],
"prefs_root": kwargs["prefs_root"],
"extra_prefs": kwargs["extra_prefs"],
"test_type": test_type,
"debug_info": kwargs["debug_info"],
"symbols_path": kwargs["symbols_path"],
"stackwalk_binary": kwargs["stackwalk_binary"],
"certutil_binary": kwargs["certutil_binary"],
"ca_certificate_path": config.ssl_config["ca_cert_path"],
"e10s": kwargs["gecko_e10s"],
"enable_webrender": kwargs["enable_webrender"],
"enable_fission": kwargs["enable_fission"],
"stackfix_dir": kwargs["stackfix_dir"],
"binary_args": kwargs["binary_args"],
"timeout_multiplier": get_timeout_multiplier(test_type,
run_info_data,
**kwargs),
"leak_check": run_info_data["debug"] and (kwargs["leak_check"] is not False),
"asan": run_info_data.get("asan"),
"stylo_threads": kwargs["stylo_threads"],
"chaos_mode_flags": kwargs["chaos_mode_flags"],
"config": config,
"browser_channel": kwargs["browser_channel"],
"headless": kwargs["headless"],
"preload_browser": kwargs["preload_browser"] and not kwargs["pause_after_test"] and not kwargs["num_test_groups"] == 1,
"specialpowers_path": kwargs["specialpowers_path"]}
class WdSpecProfile(object):
def __init__(self, profile):
self.profile = profile
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.profile.cleanup()
def executor_kwargs(logger, test_type, test_environment, run_info_data,
**kwargs):
executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data,
**kwargs)
executor_kwargs["close_after_done"] = test_type != "reftest"
executor_kwargs["timeout_multiplier"] = get_timeout_multiplier(test_type,
run_info_data,
**kwargs)
executor_kwargs["e10s"] = run_info_data["e10s"]
capabilities = {}
if test_type == "testharness":
capabilities["pageLoadStrategy"] = "eager"
if test_type in ("reftest", "print-reftest"):
executor_kwargs["reftest_internal"] = kwargs["reftest_internal"]
executor_kwargs["reftest_screenshot"] = kwargs["reftest_screenshot"]
if test_type == "wdspec":
options = {"args": []}
if kwargs["binary"]:
options["binary"] = kwargs["binary"]
if kwargs["binary_args"]:
options["args"] = kwargs["binary_args"]
profile_creator = ProfileCreator(logger,
kwargs["prefs_root"],
test_environment.config,
test_type,
kwargs["extra_prefs"],
kwargs["gecko_e10s"],
kwargs["enable_fission"],
kwargs["browser_channel"],
kwargs["binary"],
kwargs["certutil_binary"],
test_environment.config.ssl_config["ca_cert_path"])
if kwargs["processes"] > 1:
# With multiple processes, we would need a profile directory per process, but we
# don't have an easy way to do that, so include the profile in the capabilties
# directly instead. This means recreating it per session, which is slow
options["profile"] = profile_creator.create_base64()
profile = None
else:
profile = profile_creator.create()
options["args"].extend(["--profile", profile.profile])
test_environment.env_extras_cms.append(WdSpecProfile(profile))
capabilities["moz:firefoxOptions"] = options
# This gets reused for firefox_android, but the environment setup
# isn't required in that case
if kwargs["binary"]:
environ = get_environ(logger,
kwargs["binary"],
kwargs["debug_info"],
kwargs["stylo_threads"],
kwargs["headless"],
kwargs["enable_webrender"],
kwargs["chaos_mode_flags"])
leak_report_file = setup_leak_report(kwargs["leak_check"], profile, environ)
# This doesn't work with wdspec tests
# In particular tests can create a session without passing in the capabilites
# and in those cases we get the default geckodriver profile which doesn't
# guarantee zero network access
del environ["MOZ_DISABLE_NONLOCAL_CONNECTIONS"]
executor_kwargs["environ"] = environ
else:
if kwargs["headless"] and "--headless" not in options["args"]:
options["args"].append("--headless")
leak_report_file = None
executor_kwargs["stackfix_dir"] = kwargs["stackfix_dir"],
executor_kwargs["leak_report_file"] = leak_report_file
executor_kwargs["asan"] = run_info_data.get("asan")
if kwargs["certutil_binary"] is None:
capabilities["acceptInsecureCerts"] = True
if capabilities:
executor_kwargs["capabilities"] = capabilities
executor_kwargs["debug"] = run_info_data["debug"]
executor_kwargs["ccov"] = run_info_data.get("ccov", False)
executor_kwargs["browser_version"] = run_info_data.get("browser_version")
executor_kwargs["debug_test"] = kwargs["debug_test"]
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
# The server host is set to 127.0.0.1 as Firefox is configured (through the
# network.dns.localDomains preference set below) to resolve the test
# domains to localhost without relying on the network stack.
#
# https://github.com/web-platform-tests/wpt/pull/9480
return {"server_host": "127.0.0.1",
"supports_debugger": True}
def run_info_extras(**kwargs):
def get_bool_pref_if_exists(pref):
for key, value in kwargs.get('extra_prefs', []):
if pref == key:
return value.lower() in ('true', '1')
return None
def get_bool_pref(pref):
pref_value = get_bool_pref_if_exists(pref)
return pref_value if pref_value is not None else False
rv = {"e10s": kwargs["gecko_e10s"],
"wasm": kwargs.get("wasm", True),
"verify": kwargs["verify"],
"headless": kwargs.get("headless", False) or "MOZ_HEADLESS" in os.environ,
"sw-e10s": True,
"fission": kwargs.get("enable_fission") or get_bool_pref("fission.autostart"),
"sessionHistoryInParent": (kwargs.get("enable_fission") or
get_bool_pref("fission.autostart") or
get_bool_pref("fission.sessionHistoryInParent")),
"swgl": get_bool_pref("gfx.webrender.software")}
# The value of `sw-e10s` defaults to whether the "parent_intercept"
# implementation is enabled for the current build. This value, however,
# can be overridden by explicitly setting the pref with the `--setpref` CLI
# flag, which is checked here.
sw_e10s_override = get_bool_pref_if_exists("dom.serviceWorkers.parent_intercept")
if sw_e10s_override is not None:
rv["sw-e10s"] = sw_e10s_override
rv.update(run_info_browser_version(**kwargs))
return rv
def run_info_browser_version(**kwargs):
try:
version_info = mozversion.get_version(kwargs["binary"])
except mozversion.errors.VersionError:
version_info = None
if version_info:
rv = {"browser_build_id": version_info.get("application_buildid", None),
"browser_changeset": version_info.get("application_changeset", None)}
if "browser_version" not in kwargs:
rv["browser_version"] = version_info.get("application_version")
return rv
return {}
def update_properties():
return (["os", "debug", "webrender", "fission", "e10s", "sw-e10s", "processor", "swgl"],
{"os": ["version"], "processor": ["bits"]})
def get_environ(logger, binary, debug_info, stylo_threads, headless, enable_webrender,
chaos_mode_flags=None):
env = test_environment(xrePath=os.path.abspath(os.path.dirname(binary)),
debugger=debug_info is not None,
useLSan=True,
log=logger)
env["STYLO_THREADS"] = str(stylo_threads)
if chaos_mode_flags is not None:
env["MOZ_CHAOSMODE"] = str(chaos_mode_flags)
if headless:
env["MOZ_HEADLESS"] = "1"
if enable_webrender:
env["MOZ_WEBRENDER"] = "1"
env["MOZ_ACCELERATED"] = "1"
else:
env["MOZ_WEBRENDER"] = "0"
return env
def setup_leak_report(leak_check, profile, env):
leak_report_file = None
if leak_check:
filename = "runtests_leaks_%s.log" % os.getpid()
if profile is not None:
leak_report_file = os.path.join(profile.profile, filename)
else:
leak_report_file = os.path.join(tempfile.gettempdir(), filename)
if os.path.exists(leak_report_file):
os.remove(leak_report_file)
env["XPCOM_MEM_BLOAT_LOG"] = leak_report_file
return leak_report_file
class FirefoxInstanceManager:
__metaclass__ = ABCMeta
def __init__(self, logger, binary, binary_args, profile_creator, debug_info,
chaos_mode_flags, headless, enable_webrender, stylo_threads,
leak_check, stackfix_dir, symbols_path, asan):
"""Object that manages starting and stopping instances of Firefox."""
self.logger = logger
self.binary = binary
self.binary_args = binary_args
self.base_profile = profile_creator.create()
self.debug_info = debug_info
self.chaos_mode_flags = chaos_mode_flags
self.headless = headless
self.enable_webrender = enable_webrender
self.stylo_threads = stylo_threads
self.leak_check = leak_check
self.stackfix_dir = stackfix_dir
self.symbols_path = symbols_path
self.asan = asan
self.previous = None
self.current = None
@abstractmethod
def teardown(self, force=False):
pass
@abstractmethod
def get(self):
"""Get a BrowserInstance for a running Firefox.
This can only be called once per instance, and between calls stop_current()
must be called."""
pass
def stop_current(self, force=False):
"""Shutdown the current instance of Firefox.
The BrowserInstance remains available through self.previous, since some
operations happen after shutdown."""
if not self.current:
return
self.current.stop(force)
self.previous = self.current
self.current = None
def start(self):
"""Start an instance of Firefox, returning a BrowserInstance handle"""
profile = self.base_profile.clone(self.base_profile.profile)
marionette_port = get_free_port()
profile.set_preferences({"marionette.port": marionette_port})
env = get_environ(self.logger, self.binary, self.debug_info, self.stylo_threads,
self.headless, self.enable_webrender, self.chaos_mode_flags)
args = self.binary_args[:] if self.binary_args else []
args += [cmd_arg("marionette"), "about:blank"]
debug_args, cmd = browser_command(self.binary,
args,
self.debug_info)
leak_report_file = setup_leak_report(self.leak_check, profile, env)
output_handler = FirefoxOutputHandler(self.logger,
cmd,
stackfix_dir=self.stackfix_dir,
symbols_path=self.symbols_path,
asan=self.asan,
leak_report_file=leak_report_file)
runner = FirefoxRunner(profile=profile,
binary=cmd[0],
cmdargs=cmd[1:],
env=env,
process_class=ProcessHandler,
process_args={"processOutputLine": [output_handler]})
instance = BrowserInstance(self.logger, runner, marionette_port,
output_handler, leak_report_file)
self.logger.debug("Starting Firefox")
runner.start(debug_args=debug_args,
interactive=self.debug_info and self.debug_info.interactive)
output_handler.after_process_start(runner.process_handler.pid)
self.logger.debug("Firefox Started")
return instance
class SingleInstanceManager(FirefoxInstanceManager):
"""FirefoxInstanceManager that manages a single Firefox instance"""
def get(self):
assert not self.current, ("Tried to call get() on InstanceManager that has "
"an existing instance")
if self.previous:
self.previous.cleanup()
self.previous = None
self.current = self.start()
return self.current
def teardown(self, force=False):
for instance in [self.previous, self.current]:
if instance:
instance.stop(force)
instance.cleanup()
self.base_profile.cleanup()
class PreloadInstanceManager(FirefoxInstanceManager):
def __init__(self, *args, **kwargs):
"""FirefoxInstanceManager that keeps once Firefox instance preloaded
to allow rapid resumption after an instance shuts down."""
super(PreloadInstanceManager, self).__init__(*args, **kwargs)
self.pending = None
def get(self):
assert not self.current, ("Tried to call get() on InstanceManager that has "
"an existing instance")
if self.previous:
self.previous.cleanup()
self.previous = None
if not self.pending:
self.pending = self.start()
self.current = self.pending
self.pending = self.start()
return self.current
def teardown(self, force=False):
for instance, unused in [(self.previous, False),
(self.current, False),
(self.pending, True)]:
if instance:
instance.stop(force, unused)
instance.cleanup()
self.base_profile.cleanup()
class BrowserInstance:
shutdown_timeout = 70
def __init__(self, logger, runner, marionette_port, output_handler, leak_report_file):
"""Handle to a running Firefox instance"""
self.logger = logger
self.runner = runner
self.marionette_port = marionette_port
self.output_handler = output_handler
self.leak_report_file = leak_report_file
def stop(self, force=False, unused=False):
"""Stop Firefox
:param force: Signal the firefox process without waiting for a clean shutdown
:param unused: This instance was not used for running tests and so
doesn't have an active marionette session and doesn't require
output postprocessing.
"""
is_running = self.runner is not None and self.runner.is_running()
if is_running:
self.logger.debug("Stopping Firefox %s" % self.pid())
shutdown_methods = [(True, lambda: self.runner.wait(self.shutdown_timeout)),
(False, lambda: self.runner.stop(signal.SIGTERM,
self.shutdown_timeout))]
if hasattr(signal, "SIGKILL"):
shutdown_methods.append((False, lambda: self.runner.stop(signal.SIGKILL,
self.shutdown_timeout)))
if unused or force:
# Don't wait for the instance to close itself
shutdown_methods = shutdown_methods[1:]
try:
# For Firefox we assume that stopping the runner prompts the
# browser to shut down. This allows the leak log to be written
for i, (clean, stop_f) in enumerate(shutdown_methods):
self.logger.debug("Shutting down attempt %i/%i" % (i + 1, len(shutdown_methods)))
retcode = stop_f()
if retcode is not None:
self.logger.info("Browser exited with return code %s" % retcode)
break
except OSError:
# This can happen on Windows if the process is already dead
pass
elif self.runner:
# The browser was already stopped, which we assume was a crash
# TODO: Should we check the exit code here?
clean = False
if not unused:
self.output_handler.after_process_stop(clean_shutdown=clean)
def pid(self):
if self.runner.process_handler is None:
return None
try:
return self.runner.process_handler.pid
except AttributeError:
return None
def is_alive(self):
if self.runner:
return self.runner.is_running()
return False
def cleanup(self):
self.runner.cleanup()
self.runner = None
class FirefoxOutputHandler(OutputHandler):
def __init__(self, logger, command, symbols_path=None, stackfix_dir=None, asan=False,
leak_report_file=None):
"""Filter for handling Firefox process output.
This receives Firefox process output in the __call__ function, does
any additional processing that's required, and decides whether to log
the output. Because the Firefox process can be started before we know
which filters are going to be required, we buffer all output until
setup() is called. This is responsible for doing the final configuration
of the output handlers.
"""
super().__init__(logger, command)
self.symbols_path = symbols_path
if stackfix_dir:
# We hide errors because they cause disconcerting `CRITICAL`
# warnings in web platform test output.
self.stack_fixer = get_stack_fixer_function(stackfix_dir,
self.symbols_path,
hideErrors=True)
else:
self.stack_fixer = None
self.asan = asan
self.leak_report_file = leak_report_file
# These are filled in after configure_handlers() is called
self.lsan_handler = None
self.mozleak_allowed = None
self.mozleak_thresholds = None
self.group_metadata = {}
def start(self, group_metadata=None, lsan_disabled=False, lsan_allowed=None,
lsan_max_stack_depth=None, mozleak_allowed=None, mozleak_thresholds=None,
**kwargs):
"""Configure the output handler"""
if group_metadata is None:
group_metadata = {}
self.group_metadata = group_metadata
self.mozleak_allowed = mozleak_allowed
self.mozleak_thresholds = mozleak_thresholds
if self.asan:
self.lsan_handler = mozleak.LSANLeaks(self.logger,
scope=group_metadata.get("scope", "/"),
allowed=lsan_allowed,
maxNumRecordedFrames=lsan_max_stack_depth,
allowAll=lsan_disabled)
else:
self.lsan_handler = None
super().start()
def after_process_stop(self, clean_shutdown=True):
super().after_process_stop(clean_shutdown)
if self.lsan_handler:
self.lsan_handler.process()
if self.leak_report_file is not None:
if not clean_shutdown:
# If we didn't get a clean shutdown there probably isn't a leak report file
self.logger.warning("Firefox didn't exit cleanly, not processing leak logs")
else:
# We have to ignore missing leaks in the tab because it can happen that the
# content process crashed and in that case we don't want the test to fail.
# Ideally we would record which content process crashed and just skip those.
self.logger.info("PROCESS LEAKS %s" % self.leak_report_file)
mozleak.process_leak_log(
self.leak_report_file,
leak_thresholds=self.mozleak_thresholds,
ignore_missing_leaks=["tab", "gmplugin"],
log=self.logger,
stack_fixer=self.stack_fixer,
scope=self.group_metadata.get("scope"),
allowed=self.mozleak_allowed)
if os.path.exists(self.leak_report_file):
os.unlink(self.leak_report_file)
def __call__(self, line):
"""Write a line of output from the firefox process to the log"""
if b"GLib-GObject-CRITICAL" in line:
return
if line:
if self.state < OutputHandlerState.AFTER_HANDLER_START:
self.line_buffer.append(line)
return
data = line.decode("utf8", "replace")
if self.stack_fixer:
data = self.stack_fixer(data)
if self.lsan_handler:
data = self.lsan_handler.log(data)
if data is not None:
self.logger.process_output(self.pid,
data,
command=" ".join(self.command))
class ProfileCreator:
def __init__(self, logger, prefs_root, config, test_type, extra_prefs, e10s,
enable_fission, browser_channel, binary, certutil_binary, ca_certificate_path):
self.logger = logger
self.prefs_root = prefs_root
self.config = config
self.test_type = test_type
self.extra_prefs = extra_prefs
self.e10s = e10s
self.enable_fission = enable_fission
self.browser_channel = browser_channel
self.ca_certificate_path = ca_certificate_path
self.binary = binary
self.certutil_binary = certutil_binary
self.ca_certificate_path = ca_certificate_path
def create(self, **kwargs):
"""Create a Firefox profile and return the mozprofile Profile object pointing at that
profile
:param kwargs: Additional arguments to pass into the profile constructor
"""
preferences = self._load_prefs()
profile = FirefoxProfile(preferences=preferences,
restore=False,
**kwargs)
self._set_required_prefs(profile)
if self.ca_certificate_path is not None:
self._setup_ssl(profile)
return profile
def create_base64(self, **kwargs):
profile = self.create(**kwargs)
try:
with io.BytesIO() as buf:
with zipfile.ZipFile(buf, "w", compression=zipfile.ZIP_DEFLATED) as zipf:
for dirpath, _, filenames in os.walk(profile.profile):
for filename in filenames:
src_path = os.path.join(dirpath, filename)
dest_path = os.path.relpath(src_path, profile.profile)
with open(src_path, "rb") as f:
zipf.writestr(dest_path, f.read())
return base64.b64encode(buf.getvalue()).decode("ascii").strip()
finally:
profile.cleanup()
def _load_prefs(self):
prefs = Preferences()
pref_paths = []
profiles = os.path.join(self.prefs_root, 'profiles.json')
if os.path.isfile(profiles):
with open(profiles, 'r') as fh:
for name in json.load(fh)['web-platform-tests']:
if self.browser_channel in (None, 'nightly'):
pref_paths.append(os.path.join(self.prefs_root, name, 'user.js'))
elif name != 'unittest-features':
pref_paths.append(os.path.join(self.prefs_root, name, 'user.js'))
else:
# Old preference files used before the creation of profiles.json (remove when no longer supported)
legacy_pref_paths = (
os.path.join(self.prefs_root, 'prefs_general.js'), # Used in Firefox 60 and below
os.path.join(self.prefs_root, 'common', 'user.js'), # Used in Firefox 61
)
for path in legacy_pref_paths:
if os.path.isfile(path):
pref_paths.append(path)
for path in pref_paths:
if os.path.exists(path):
prefs.add(Preferences.read_prefs(path))
else:
self.logger.warning("Failed to find base prefs file in %s" % path)
# Add any custom preferences
prefs.add(self.extra_prefs, cast=True)
return prefs()
def _set_required_prefs(self, profile):
"""Set preferences required for wptrunner to function.
Note that this doesn't set the marionette port, since we don't always
know that at profile creation time. So the caller is responisble for
setting that once it's available."""
profile.set_preferences({
"network.dns.localDomains": ",".join(self.config.domains_set),
"dom.file.createInChild": True,
# TODO: Remove preferences once Firefox 64 is stable (Bug 905404)
"network.proxy.type": 0,
"places.history.enabled": False,
"network.preload": True,
})
if self.e10s:
profile.set_preferences({"browser.tabs.remote.autostart": True})
if self.enable_fission:
profile.set_preferences({"fission.autostart": True})
if self.test_type in ("reftest", "print-reftest"):
profile.set_preferences({"layout.interruptible-reflow.enabled": False})
if self.test_type == "print-reftest":
profile.set_preferences({"print.always_print_silent": True})
# Bug 1262954: winxp + e10s, disable hwaccel
if (self.e10s and platform.system() in ("Windows", "Microsoft") and
"5.1" in platform.version()):
self.profile.set_preferences({"layers.acceleration.disabled": True})
def _setup_ssl(self, profile):
"""Create a certificate database to use in the test profile. This is configured
to trust the CA Certificate that has signed the web-platform.test server
certificate."""
if self.certutil_binary is None:
self.logger.info("--certutil-binary not supplied; Firefox will not check certificates")
return
self.logger.info("Setting up ssl")
# Make sure the certutil libraries from the source tree are loaded when using a
# local copy of certutil
# TODO: Maybe only set this if certutil won't launch?
env = os.environ.copy()
certutil_dir = os.path.dirname(self.binary or self.certutil_binary)
if mozinfo.isMac:
env_var = "DYLD_LIBRARY_PATH"
elif mozinfo.isUnix:
env_var = "LD_LIBRARY_PATH"
else:
env_var = "PATH"
env[env_var] = (os.path.pathsep.join([certutil_dir, env[env_var]])
if env_var in env else certutil_dir)
def certutil(*args):
cmd = [self.certutil_binary] + list(args)
self.logger.process_output("certutil",
subprocess.check_output(cmd,
env=env,
stderr=subprocess.STDOUT),
" ".join(cmd))
pw_path = os.path.join(profile.profile, ".crtdbpw")
with open(pw_path, "w") as f:
# Use empty password for certificate db
f.write("\n")
cert_db_path = profile.profile
# Create a new certificate db
certutil("-N", "-d", cert_db_path, "-f", pw_path)
# Add the CA certificate to the database and mark as trusted to issue server certs
certutil("-A", "-d", cert_db_path, "-f", pw_path, "-t", "CT,,",
"-n", "web-platform-tests", "-i", self.ca_certificate_path)
# List all certs in the database
certutil("-L", "-d", cert_db_path)
class FirefoxBrowser(Browser):
init_timeout = 70
def __init__(self, logger, binary, prefs_root, test_type, extra_prefs=None, debug_info=None,
symbols_path=None, stackwalk_binary=None, certutil_binary=None,
ca_certificate_path=None, e10s=False, enable_webrender=False, enable_fission=False,
stackfix_dir=None, binary_args=None, timeout_multiplier=None, leak_check=False,
asan=False, stylo_threads=1, chaos_mode_flags=None, config=None,
browser_channel="nightly", headless=None, preload_browser=False,
specialpowers_path=None, **kwargs):
Browser.__init__(self, logger)
self.logger = logger
if timeout_multiplier:
self.init_timeout = self.init_timeout * timeout_multiplier
self.instance = None
self._settings = None
self.stackfix_dir = stackfix_dir
self.symbols_path = symbols_path
self.stackwalk_binary = stackwalk_binary
self.asan = asan
self.leak_check = leak_check
self.specialpowers_path = specialpowers_path
profile_creator = ProfileCreator(logger,
prefs_root,
config,
test_type,
extra_prefs,
e10s,
enable_fission,
browser_channel,
binary,
certutil_binary,
ca_certificate_path)
if preload_browser:
instance_manager_cls = PreloadInstanceManager
else:
instance_manager_cls = SingleInstanceManager
self.instance_manager = instance_manager_cls(logger,
binary,
binary_args,
profile_creator,
debug_info,
chaos_mode_flags,
headless,
enable_webrender,
stylo_threads,
leak_check,
stackfix_dir,
symbols_path,
asan)
def settings(self, test):
self._settings = {"check_leaks": self.leak_check and not test.leaks,
"lsan_disabled": test.lsan_disabled,
"lsan_allowed": test.lsan_allowed,
"lsan_max_stack_depth": test.lsan_max_stack_depth,
"mozleak_allowed": self.leak_check and test.mozleak_allowed,
"mozleak_thresholds": self.leak_check and test.mozleak_threshold,
"special_powers": self.specialpowers_path and test.url_base == "/_mozilla/"}
return self._settings
def start(self, group_metadata=None, **kwargs):
self.instance = self.instance_manager.get()
self.instance.output_handler.start(group_metadata,
**kwargs)
def stop(self, force=False):
self.instance_manager.stop_current(force)
self.logger.debug("stopped")
def pid(self):
return self.instance.pid()
def is_alive(self):
return self.instance and self.instance.is_alive()
def cleanup(self, force=False):
self.instance_manager.teardown(force)
def executor_browser(self):
assert self.instance is not None
extensions = []
if self._settings.get("special_powers", False):
extensions.append(self.specialpowers_path)
return ExecutorBrowser, {"marionette_port": self.instance.marionette_port,
"extensions": extensions}
def check_crash(self, process, test):
dump_dir = os.path.join(self.instance.runner.profile.profile, "minidumps")
try:
return bool(mozcrash.log_crashes(self.logger,
dump_dir,
symbols_path=self.symbols_path,
stackwalk_binary=self.stackwalk_binary,
process=process,
test=test))
except IOError:
self.logger.warning("Looking for crash dump files failed")
return False
class FirefoxWdSpecBrowser(NullBrowser):
def __init__(self, logger, leak_check=False, **kwargs):
super().__init__(logger, **kwargs)
self.leak_check = leak_check
def settings(self, test):
return {"check_leaks": self.leak_check and not test.leaks,
"lsan_disabled": test.lsan_disabled,
"lsan_allowed": test.lsan_allowed,
"lsan_max_stack_depth": test.lsan_max_stack_depth,
"mozleak_allowed": self.leak_check and test.mozleak_allowed,
"mozleak_thresholds": self.leak_check and test.mozleak_threshold}
class GeckoDriverServer(WebDriverServer):
output_handler_cls = FirefoxOutputHandler
def __init__(self, logger, marionette_port=2828, binary="geckodriver",
host="127.0.0.1", port=None, env=None, args=None):
if env is None:
env = os.environ.copy()
env["RUST_BACKTRACE"] = "1"
WebDriverServer.__init__(self, logger, binary,
host=host,
port=port,
env=env,
args=args)
self.marionette_port = marionette_port
def make_command(self):
return [self.binary,
"--marionette-port", str(self.marionette_port),
"--host", self.host,
"--port", str(self.port)] + self._args
| bsd-3-clause |
Marvellousteam/android_kernel_sony_msm8930 | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
Qalthos/ansible | lib/ansible/modules/network/dellos6/dellos6_config.py | 39 | 13065 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
# Copyright (c) 2016 Dell Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos6_config
version_added: "2.2"
author: "Abirami N (@abirami-n)"
short_description: Manage Dell EMC Networking OS6 configuration sections
description:
- OS6 configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with OS6 configuration sections in
a deterministic way.
extends_documentation_fragment: dellos6
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser. This argument is mutually exclusive with I(src).
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is
mutually exclusive with I(lines).
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
default: line
choices: ['line', 'block']
update:
description:
- The I(update) argument controls how the configuration statements
are processed on the remote device. Valid choices for the I(update)
argument are I(merge) and I(check). When you set this argument to
I(merge), the configuration changes merge with the current
device running configuration. When you set this argument to I(check)
the configuration updates are determined but not actually configured
on the remote device.
default: merge
choices: ['merge', 'check']
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
type: bool
default: 'no'
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. If the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the playbook
root directory. If the directory does not exist, it is created.
type: bool
default: 'no'
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
"""
EXAMPLES = """
- dellos6_config:
lines: ['hostname {{ inventory_hostname }}']
- dellos6_config:
lines:
- 10 permit ip 1.1.1.1 any log
- 20 permit ip 2.2.2.2 any log
- 30 permit ip 3.3.3.3 any log
- 40 permit ip 4.4.4.4 any log
- 50 permit ip 5.5.5.5 any log
parents: ['ip access-list test']
before: ['no ip access-list test']
match: exact
- dellos6_config:
lines:
- 10 permit ip 1.1.1.1 any log
- 20 permit ip 2.2.2.2 any log
- 30 permit ip 3.3.3.3 any log
- 40 permit ip 4.4.4.4 any log
parents: ['ip access-list test']
before: ['no ip access-list test']
replace: block
- dellos6_config:
lines: ['hostname {{ inventory_hostname }}']
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device.
returned: always
type: list
sample: ['interface Te1/0/1', 'no shutdown', 'exit']
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['interface Te1/0/1', 'no shutdown', 'exit']
saved:
description: Returns whether the configuration is saved to the startup
configuration or not.
returned: When not check_mode.
type: bool
sample: True
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/dellos6_config.2017-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.dellos6.dellos6 import get_config, get_sublevel_config, Dellos6NetworkConfig
from ansible.module_utils.network.dellos6.dellos6 import dellos6_argument_spec, check_args
from ansible.module_utils.network.dellos6.dellos6 import load_config, run_commands
from ansible.module_utils.network.dellos6.dellos6 import WARNING_PROMPTS_RE
from ansible.module_utils.network.common.config import dumps
def get_candidate(module):
candidate = Dellos6NetworkConfig(indent=0)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
commands = module.params['lines'][0]
if (isinstance(commands, dict)) and (isinstance(commands['command'], list)):
candidate.add(commands['command'], parents=parents)
elif (isinstance(commands, dict)) and (isinstance(commands['command'], str)):
candidate.add([commands['command']], parents=parents)
else:
candidate.add(module.params['lines'], parents=parents)
return candidate
def get_running_config(module):
contents = module.params['config']
if not contents:
contents = get_config(module)
return contents
def main():
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
src=dict(type='path'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line',
choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
update=dict(choices=['merge', 'check'], default='merge'),
save=dict(type='bool', default=False),
config=dict(),
backup=dict(type='bool', default=False),
backup_options=dict(type='dict', options=backup_spec)
)
argument_spec.update(dellos6_argument_spec)
mutually_exclusive = [('lines', 'src'),
('parents', 'src')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
parents = module.params['parents'] or list()
match = module.params['match']
replace = module.params['replace']
warnings = list()
check_args(module, warnings)
result = dict(changed=False, saved=False, warnings=warnings)
candidate = get_candidate(module)
if module.params['backup']:
if not module.check_mode:
result['__backup__'] = get_config(module)
commands = list()
if any((module.params['lines'], module.params['src'])):
if match != 'none':
config = get_running_config(module)
config = Dellos6NetworkConfig(contents=config, indent=0)
if parents:
config = get_sublevel_config(config, module)
configobjs = candidate.difference(config, match=match, replace=replace)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands')
if ((isinstance(module.params['lines'], list)) and
(isinstance(module.params['lines'][0], dict)) and
set(['prompt', 'answer']).issubset(module.params['lines'][0])):
cmd = {'command': commands,
'prompt': module.params['lines'][0]['prompt'],
'answer': module.params['lines'][0]['answer']}
commands = [module.jsonify(cmd)]
else:
commands = commands.split('\n')
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
if not module.check_mode and module.params['update'] == 'merge':
load_config(module, commands)
result['changed'] = True
result['commands'] = commands
result['updates'] = commands
if module.params['save']:
result['changed'] = True
if not module.check_mode:
cmd = {'command': 'copy running-config startup-config',
'prompt': r'\(y/n\)\s?$', 'answer': 'y'}
run_commands(module, [cmd])
result['saved'] = True
else:
module.warn('Skipping command `copy running-config startup-config`'
'due to check_mode. Configuration not copied to '
'non-volatile storage')
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ekasitk/sahara | sahara/tests/unit/utils/test_general.py | 2 | 5560 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara import conductor
from sahara import context
from sahara.tests.unit import base
from sahara.tests.unit.conductor import test_api
from sahara.utils import general
class UtilsGeneralTest(base.SaharaWithDbTestCase):
def setUp(self):
super(UtilsGeneralTest, self).setUp()
self.api = conductor.API
def _make_sample(self):
ctx = context.ctx()
cluster = self.api.cluster_create(ctx, test_api.SAMPLE_CLUSTER)
return cluster
def test_find_dict(self):
iterable = [
{
"a": 1
},
{
"a": 1,
"b": 2,
"c": 3
},
{
"a": 2
},
{
"c": 3
}
]
self.assertEqual({"a": 1, "b": 2, "c": 3},
general.find_dict(iterable, a=1, b=2))
self.assertIsNone(general.find_dict(iterable, z=4))
def test_find(self):
lst = [mock.Mock(a=5), mock.Mock(b=5), mock.Mock(a=7, b=7)]
self.assertEqual(lst[0], general.find(lst, a=5))
self.assertEqual(lst[1], general.find(lst, b=5))
self.assertIsNone(general.find(lst, a=8))
self.assertEqual(lst[2], general.find(lst, a=7))
self.assertEqual(lst[2], general.find(lst, a=7, b=7))
def test_generate_instance_name(self):
inst_name = "cluster-worker-001"
self.assertEqual(
inst_name, general.generate_instance_name("cluster", "worker", 1))
self.assertEqual(
inst_name, general.generate_instance_name("CLUSTER", "WORKER", 1))
def test_get_by_id(self):
lst = [mock.Mock(id=5), mock.Mock(id=7)]
self.assertIsNone(general.get_by_id(lst, 9))
self.assertEqual(lst[0], general.get_by_id(lst, 5))
self.assertEqual(lst[1], general.get_by_id(lst, 7))
def test_change_cluster_status(self):
cluster = self._make_sample()
cluster = general.change_cluster_status(cluster, "Deleting", "desc")
self.assertEqual("Deleting", cluster.status)
self.assertEqual("desc", cluster.status_description)
general.change_cluster_status(cluster, "Spawning")
self.assertEqual("Deleting", cluster.status)
def test_change_status_description(self):
ctx = context.ctx()
cluster = self._make_sample()
cluster_id = cluster.id
cluster = general.change_cluster_status_description(cluster, "desc")
self.assertEqual('desc', cluster.status_description)
self.api.cluster_destroy(ctx, cluster)
cluster = general.change_cluster_status_description(cluster_id, "desc")
self.assertIsNone(cluster)
def test_get_instances(self):
cluster = self._make_sample()
ctx = context.ctx()
idx = 0
ids = []
for ng in cluster.node_groups:
for i in range(ng.count):
idx += 1
ids.append(self.api.instance_add(context.ctx(), ng, {
'instance_id': str(idx),
'instance_name': str(idx),
}))
cluster = self.api.cluster_get(ctx, cluster)
instances = general.get_instances(cluster, ids)
ids = set()
for inst in instances:
ids.add(inst.instance_id)
self.assertEqual(idx, len(ids))
for i in range(1, idx):
self.assertIn(str(i), ids)
instances = general.get_instances(cluster)
ids = set()
for inst in instances:
ids.add(inst.instance_id)
self.assertEqual(idx, len(ids))
for i in range(1, idx):
self.assertIn(str(i), ids)
def test_clean_cluster_from_empty_ng(self):
ctx = context.ctx()
cluster = self._make_sample()
ng = cluster.node_groups[0]
ng_len = len(cluster.node_groups)
self.api.node_group_update(ctx, ng, {'count': 0})
cluster = self.api.cluster_get(ctx, cluster.id)
general.clean_cluster_from_empty_ng(cluster)
cluster = self.api.cluster_get(ctx, cluster.id)
self.assertEqual(ng_len - 1, len(cluster.node_groups))
def test_generate_etc_hosts(self):
cluster = self._make_sample()
ctx = context.ctx()
idx = 0
for ng in cluster.node_groups:
for i in range(ng.count):
idx += 1
self.api.instance_add(ctx, ng, {
'instance_id': str(idx),
'instance_name': str(idx),
'internal_ip': str(idx),
})
cluster = self.api.cluster_get(ctx, cluster)
value = general.generate_etc_hosts(cluster)
expected = ("127.0.0.1 localhost\n"
"1 1.novalocal 1\n"
"2 2.novalocal 2\n"
"3 3.novalocal 3\n"
"4 4.novalocal 4\n")
self.assertEqual(expected, value)
| apache-2.0 |
kubeflow/fairing | containerregistry/client/v2/docker_session_.py | 2 | 11912 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package manages pushes to and deletes from a v2 docker registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import concurrent.futures
from containerregistry.client import docker_creds
from containerregistry.client import docker_name
from containerregistry.client.v2 import docker_http
from containerregistry.client.v2 import docker_image
import httplib2
import six.moves.http_client
import six.moves.urllib.parse
def _tag_or_digest(name):
if isinstance(name, docker_name.Tag):
return name.tag
else:
assert isinstance(name, docker_name.Digest)
return name.digest
class Push(object):
"""Push encapsulates a Registry v2 Docker push session."""
def __init__(self,
name,
creds,
transport,
mount = None,
threads = 1):
"""Constructor.
If multiple threads are used, the caller *must* ensure that the provided
transport is thread-safe, as well as the image that is being uploaded.
It is notable that tarfile and httplib2.Http in Python are NOT threadsafe.
Args:
name: the fully-qualified name of the tag to push
creds: provider for authorizing requests
transport: the http transport to use for sending requests
mount: list of repos from which to mount blobs.
threads: the number of threads to use for uploads.
Raises:
ValueError: an incorrectly typed argument was supplied.
"""
self._name = name
self._transport = docker_http.Transport(name, creds, transport,
docker_http.PUSH)
self._mount = mount
self._threads = threads
def _scheme_and_host(self):
return '{scheme}://{registry}'.format(
scheme=docker_http.Scheme(self._name.registry),
registry=self._name.registry)
def _base_url(self):
return self._scheme_and_host() + '/v2/{repository}'.format(
repository=self._name.repository)
def _get_absolute_url(self, location):
# If 'location' is an absolute URL (includes host), this will be a no-op.
return six.moves.urllib.parse.urljoin(
base=self._scheme_and_host(), url=location)
def _blob_exists(self, digest):
"""Check the remote for the given layer."""
# HEAD the blob, and check for a 200
resp, unused_content = self._transport.Request(
'{base_url}/blobs/{digest}'.format(
base_url=self._base_url(), digest=digest),
method='HEAD',
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.NOT_FOUND
])
return resp.status == six.moves.http_client.OK # pytype: disable=attribute-error
def _manifest_exists(self, image):
"""Check the remote for the given manifest by digest."""
# GET the manifest by digest, and check for 200
resp, unused_content = self._transport.Request(
'{base_url}/manifests/{digest}'.format(
base_url=self._base_url(), digest=image.digest()),
method='GET',
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.NOT_FOUND
])
return resp.status == six.moves.http_client.OK # pytype: disable=attribute-error
def _monolithic_upload(self, image,
digest):
self._transport.Request(
'{base_url}/blobs/uploads/?digest={digest}'.format(
base_url=self._base_url(), digest=digest),
method='POST',
body=image.blob(digest),
accepted_codes=[six.moves.http_client.CREATED])
def _add_digest(self, url, digest):
scheme, netloc, path, query_string, fragment = (
six.moves.urllib.parse.urlsplit(url))
qs = six.moves.urllib.parse.parse_qs(query_string)
qs['digest'] = [digest]
query_string = six.moves.urllib.parse.urlencode(qs, doseq=True)
return six.moves.urllib.parse.urlunsplit((scheme, netloc, path,
query_string, fragment))
def _put_upload(self, image, digest):
mounted, location = self._start_upload(digest, self._mount)
if mounted:
logging.info('Layer %s mounted.', digest)
return
location = self._add_digest(location, digest)
self._transport.Request(
location,
method='PUT',
body=image.blob(digest),
accepted_codes=[six.moves.http_client.CREATED])
# pylint: disable=missing-docstring
def _patch_upload(self, image,
digest):
mounted, location = self._start_upload(digest, self._mount)
if mounted:
logging.info('Layer %s mounted.', digest)
return
location = self._get_absolute_url(location)
resp, unused_content = self._transport.Request(
location,
method='PATCH',
body=image.blob(digest),
content_type='application/octet-stream',
accepted_codes=[
six.moves.http_client.NO_CONTENT, six.moves.http_client.ACCEPTED,
six.moves.http_client.CREATED
])
location = self._add_digest(resp['location'], digest)
location = self._get_absolute_url(location)
self._transport.Request(
location,
method='PUT',
body=None,
accepted_codes=[six.moves.http_client.CREATED])
def _put_blob(self, image, digest):
"""Upload the aufs .tgz for a single layer."""
# We have a few choices for unchunked uploading:
# POST to /v2/<name>/blobs/uploads/?digest=<digest>
# Fastest, but not supported by many registries.
# self._monolithic_upload(image, digest)
#
# or:
# POST /v2/<name>/blobs/uploads/ (no body*)
# PUT /v2/<name>/blobs/uploads/<uuid> (full body)
# Next fastest, but there is a mysterious bad interaction
# with Bintray. This pattern also hasn't been used in
# clients since 1.8, when they switched to the 3-stage
# method below.
# self._put_upload(image, digest)
# or:
# POST /v2/<name>/blobs/uploads/ (no body*)
# PATCH /v2/<name>/blobs/uploads/<uuid> (full body)
# PUT /v2/<name>/blobs/uploads/<uuid> (no body)
#
# * We attempt to perform a cross-repo mount if any repositories are
# specified in the "mount" parameter. This does a fast copy from a
# repository that is known to contain this blob and skips the upload.
self._patch_upload(image, digest)
def _remote_tag_digest(self):
"""Check the remote for the given manifest by digest."""
# GET the tag we're pushing
resp, unused_content = self._transport.Request(
'{base_url}/manifests/{tag}'.format(
base_url=self._base_url(),
tag=self._name.tag), # pytype: disable=attribute-error
method='GET',
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.NOT_FOUND
])
if resp.status == six.moves.http_client.NOT_FOUND: # pytype: disable=attribute-error
return None
return resp.get('docker-content-digest')
def _put_manifest(self, image):
"""Upload the manifest for this image."""
self._transport.Request(
'{base_url}/manifests/{tag_or_digest}'.format(
base_url=self._base_url(),
tag_or_digest=_tag_or_digest(self._name)),
method='PUT',
body=image.manifest().encode('utf8'),
accepted_codes=[
six.moves.http_client.OK, six.moves.http_client.CREATED,
six.moves.http_client.ACCEPTED
])
def _start_upload(self,
digest,
mount = None
):
"""POST to begin the upload process with optional cross-repo mount param."""
if not mount:
# Do a normal POST to initiate an upload if mount is missing.
url = '{base_url}/blobs/uploads/'.format(base_url=self._base_url())
accepted_codes = [six.moves.http_client.ACCEPTED]
else:
# If we have a mount parameter, try to mount the blob from another repo.
mount_from = '&'.join([
'from=' + six.moves.urllib.parse.quote(repo.repository, '')
for repo in self._mount
])
url = '{base_url}/blobs/uploads/?mount={digest}&{mount_from}'.format(
base_url=self._base_url(), digest=digest, mount_from=mount_from)
accepted_codes = [
six.moves.http_client.CREATED, six.moves.http_client.ACCEPTED
]
resp, unused_content = self._transport.Request(
url, method='POST', body=None, accepted_codes=accepted_codes)
# pytype: disable=attribute-error,bad-return-type
return resp.status == six.moves.http_client.CREATED, resp.get('location')
# pytype: enable=attribute-error,bad-return-type
def _upload_one(self, image, digest):
"""Upload a single layer, after checking whether it exists already."""
if self._blob_exists(digest):
logging.info('Layer %s exists, skipping', digest)
return
self._put_blob(image, digest)
logging.info('Layer %s pushed.', digest)
def upload(self, image):
"""Upload the layers of the given image.
Args:
image: the image to upload.
"""
# If the manifest (by digest) exists, then avoid N layer existence
# checks (they must exist).
if self._manifest_exists(image):
if isinstance(self._name, docker_name.Tag):
if self._remote_tag_digest() == image.digest():
logging.info('Tag points to the right manifest, skipping push.')
return
logging.info('Manifest exists, skipping blob uploads and pushing tag.')
else:
logging.info('Manifest exists, skipping upload.')
elif self._threads == 1:
for digest in image.blob_set():
self._upload_one(image, digest)
else:
with concurrent.futures.ThreadPoolExecutor(
max_workers=self._threads) as executor:
future_to_params = {
executor.submit(self._upload_one, image, digest): (image, digest)
for digest in image.blob_set()
}
for future in concurrent.futures.as_completed(future_to_params):
future.result()
# This should complete the upload by uploading the manifest.
self._put_manifest(image)
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
return self
def __exit__(self, exception_type, unused_value, unused_traceback):
if exception_type:
logging.error('Error during upload of: %s', self._name)
return
logging.info('Finished upload of: %s', self._name)
# pylint: disable=invalid-name
def Delete(name,
creds, transport):
"""Delete a tag or digest.
Args:
name: a tag or digest to be deleted.
creds: the credentials to use for deletion.
transport: the transport to use to contact the registry.
"""
docker_transport = docker_http.Transport(name, creds, transport,
docker_http.DELETE)
_, unused_content = docker_transport.Request(
'{scheme}://{registry}/v2/{repository}/manifests/{entity}'.format(
scheme=docker_http.Scheme(name.registry),
registry=name.registry,
repository=name.repository,
entity=_tag_or_digest(name)),
method='DELETE',
accepted_codes=[six.moves.http_client.OK, six.moves.http_client.ACCEPTED])
| apache-2.0 |
aselle/tensorflow | tensorflow/python/debug/cli/profile_analyzer_cli_test.py | 59 | 17966 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for profile_analyzer_cli."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import profile_analyzer_cli
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def _line_number_above():
return tf_inspect.stack()[1][2] - 1
def _at_least_one_line_matches(pattern, lines):
pattern_re = re.compile(pattern)
for i, line in enumerate(lines):
if pattern_re.search(line):
return True, i
return False, None
def _assert_at_least_one_line_matches(pattern, lines):
any_match, _ = _at_least_one_line_matches(pattern, lines)
if not any_match:
raise AssertionError(
"%s does not match any line in %s." % (pattern, str(lines)))
def _assert_no_lines_match(pattern, lines):
any_match, _ = _at_least_one_line_matches(pattern, lines)
if any_match:
raise AssertionError(
"%s matched at least one line in %s." % (pattern, str(lines)))
class ProfileAnalyzerListProfileTest(test_util.TensorFlowTestCase):
def testNodeInfoEmpty(self):
graph = ops.Graph()
run_metadata = config_pb2.RunMetadata()
prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(graph, run_metadata)
prof_output = prof_analyzer.list_profile([]).lines
self.assertEquals([""], prof_output)
def testSingleDevice(self):
node1 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
node2 = step_stats_pb2.NodeExecStats(
node_name="Mul/456",
op_start_rel_micros=1,
op_end_rel_micros=2,
all_end_rel_micros=3)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = "deviceA"
device1.node_stats.extend([node1, node2])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = "Add/123"
op1.traceback = [("a/b/file1", 10, "some_var")]
op1.type = "add"
op2 = test.mock.MagicMock()
op2.name = "Mul/456"
op2.traceback = [("a/b/file1", 11, "some_var")]
op2.type = "mul"
graph.get_operations.return_value = [op1, op2]
prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(graph, run_metadata)
prof_output = prof_analyzer.list_profile([]).lines
_assert_at_least_one_line_matches(r"Device 1 of 1: deviceA", prof_output)
_assert_at_least_one_line_matches(r"^Add/123.*add.*2us.*4us", prof_output)
_assert_at_least_one_line_matches(r"^Mul/456.*mul.*1us.*3us", prof_output)
def testMultipleDevices(self):
node1 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=3)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = "deviceA"
device1.node_stats.extend([node1])
device2 = run_metadata.step_stats.dev_stats.add()
device2.device = "deviceB"
device2.node_stats.extend([node1])
graph = test.mock.MagicMock()
op = test.mock.MagicMock()
op.name = "Add/123"
op.traceback = [("a/b/file1", 10, "some_var")]
op.type = "abc"
graph.get_operations.return_value = [op]
prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(graph, run_metadata)
prof_output = prof_analyzer.list_profile([]).lines
_assert_at_least_one_line_matches(r"Device 1 of 2: deviceA", prof_output)
_assert_at_least_one_line_matches(r"Device 2 of 2: deviceB", prof_output)
# Try filtering by device.
prof_output = prof_analyzer.list_profile(["-d", "deviceB"]).lines
_assert_at_least_one_line_matches(r"Device 2 of 2: deviceB", prof_output)
_assert_no_lines_match(r"Device 1 of 2: deviceA", prof_output)
def testWithSession(self):
options = config_pb2.RunOptions()
options.trace_level = config_pb2.RunOptions.FULL_TRACE
run_metadata = config_pb2.RunMetadata()
with session.Session(config=no_rewrite_session_config()) as sess:
a = constant_op.constant([1, 2, 3])
b = constant_op.constant([2, 2, 1])
result = math_ops.add(a, b)
sess.run(result, options=options, run_metadata=run_metadata)
prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(
sess.graph, run_metadata)
prof_output = prof_analyzer.list_profile([]).lines
_assert_at_least_one_line_matches("Device 1 of", prof_output)
expected_headers = [
"Node", r"Start Time \(us\)", r"Op Time \(.*\)", r"Exec Time \(.*\)",
r"Filename:Lineno\(function\)"]
_assert_at_least_one_line_matches(
".*".join(expected_headers), prof_output)
_assert_at_least_one_line_matches(r"^Add/", prof_output)
_assert_at_least_one_line_matches(r"Device Total", prof_output)
def testSorting(self):
node1 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
all_start_micros=123,
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
node2 = step_stats_pb2.NodeExecStats(
node_name="Mul/456",
all_start_micros=122,
op_start_rel_micros=1,
op_end_rel_micros=2,
all_end_rel_micros=5)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = "deviceA"
device1.node_stats.extend([node1, node2])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = "Add/123"
op1.traceback = [("a/b/file2", 10, "some_var")]
op1.type = "add"
op2 = test.mock.MagicMock()
op2.name = "Mul/456"
op2.traceback = [("a/b/file1", 11, "some_var")]
op2.type = "mul"
graph.get_operations.return_value = [op1, op2]
prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(graph, run_metadata)
# Default sort by start time (i.e. all_start_micros).
prof_output = prof_analyzer.list_profile([]).lines
self.assertRegexpMatches("".join(prof_output), r"Mul/456.*Add/123")
# Default sort in reverse.
prof_output = prof_analyzer.list_profile(["-r"]).lines
self.assertRegexpMatches("".join(prof_output), r"Add/123.*Mul/456")
# Sort by name.
prof_output = prof_analyzer.list_profile(["-s", "node"]).lines
self.assertRegexpMatches("".join(prof_output), r"Add/123.*Mul/456")
# Sort by op time (i.e. op_end_rel_micros - op_start_rel_micros).
prof_output = prof_analyzer.list_profile(["-s", "op_time"]).lines
self.assertRegexpMatches("".join(prof_output), r"Mul/456.*Add/123")
# Sort by exec time (i.e. all_end_rel_micros).
prof_output = prof_analyzer.list_profile(["-s", "exec_time"]).lines
self.assertRegexpMatches("".join(prof_output), r"Add/123.*Mul/456")
# Sort by line number.
prof_output = prof_analyzer.list_profile(["-s", "line"]).lines
self.assertRegexpMatches("".join(prof_output), r"Mul/456.*Add/123")
def testFiltering(self):
node1 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
all_start_micros=123,
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
node2 = step_stats_pb2.NodeExecStats(
node_name="Mul/456",
all_start_micros=122,
op_start_rel_micros=1,
op_end_rel_micros=2,
all_end_rel_micros=5)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = "deviceA"
device1.node_stats.extend([node1, node2])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = "Add/123"
op1.traceback = [("a/b/file2", 10, "some_var")]
op1.type = "add"
op2 = test.mock.MagicMock()
op2.name = "Mul/456"
op2.traceback = [("a/b/file1", 11, "some_var")]
op2.type = "mul"
graph.get_operations.return_value = [op1, op2]
prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(graph, run_metadata)
# Filter by name
prof_output = prof_analyzer.list_profile(["-n", "Add"]).lines
_assert_at_least_one_line_matches(r"Add/123", prof_output)
_assert_no_lines_match(r"Mul/456", prof_output)
# Filter by op_type
prof_output = prof_analyzer.list_profile(["-t", "mul"]).lines
_assert_at_least_one_line_matches(r"Mul/456", prof_output)
_assert_no_lines_match(r"Add/123", prof_output)
# Filter by file name.
prof_output = prof_analyzer.list_profile(["-f", ".*file2"]).lines
_assert_at_least_one_line_matches(r"Add/123", prof_output)
_assert_no_lines_match(r"Mul/456", prof_output)
# Fitler by execution time.
prof_output = prof_analyzer.list_profile(["-e", "[5, 10]"]).lines
_assert_at_least_one_line_matches(r"Mul/456", prof_output)
_assert_no_lines_match(r"Add/123", prof_output)
# Fitler by op time.
prof_output = prof_analyzer.list_profile(["-o", ">=2"]).lines
_assert_at_least_one_line_matches(r"Add/123", prof_output)
_assert_no_lines_match(r"Mul/456", prof_output)
def testSpecifyingTimeUnit(self):
node1 = step_stats_pb2.NodeExecStats(
node_name="Add/123",
all_start_micros=123,
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
node2 = step_stats_pb2.NodeExecStats(
node_name="Mul/456",
all_start_micros=122,
op_start_rel_micros=1,
op_end_rel_micros=2,
all_end_rel_micros=5)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = "deviceA"
device1.node_stats.extend([node1, node2])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = "Add/123"
op1.traceback = [("a/b/file2", 10, "some_var")]
op1.type = "add"
op2 = test.mock.MagicMock()
op2.name = "Mul/456"
op2.traceback = [("a/b/file1", 11, "some_var")]
op2.type = "mul"
graph.get_operations.return_value = [op1, op2]
prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(graph, run_metadata)
# Force time unit.
prof_output = prof_analyzer.list_profile(["--time_unit", "ms"]).lines
_assert_at_least_one_line_matches(r"Add/123.*add.*0\.002ms", prof_output)
_assert_at_least_one_line_matches(r"Mul/456.*mul.*0\.005ms", prof_output)
_assert_at_least_one_line_matches(r"Device Total.*0\.009ms", prof_output)
class ProfileAnalyzerPrintSourceTest(test_util.TensorFlowTestCase):
def setUp(self):
super(ProfileAnalyzerPrintSourceTest, self).setUp()
options = config_pb2.RunOptions()
options.trace_level = config_pb2.RunOptions.FULL_TRACE
run_metadata = config_pb2.RunMetadata()
with session.Session() as sess:
loop_cond = lambda x: math_ops.less(x, 10)
self.loop_cond_lineno = _line_number_above()
loop_body = lambda x: math_ops.add(x, 1)
self.loop_body_lineno = _line_number_above()
x = constant_op.constant(0, name="x")
self.x_lineno = _line_number_above()
loop = control_flow_ops.while_loop(loop_cond, loop_body, [x])
self.loop_lineno = _line_number_above()
self.assertEqual(
10, sess.run(loop, options=options, run_metadata=run_metadata))
self.prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(
sess.graph, run_metadata)
def tearDown(self):
ops.reset_default_graph()
super(ProfileAnalyzerPrintSourceTest, self).tearDown()
def testPrintSourceForWhileLoop(self):
prof_output = self.prof_analyzer.print_source([__file__])
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*us .*2\(22\) .*L%d.*(\S)+" % self.loop_cond_lineno,
prof_output.lines)
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*us .*2\(20\) .*L%d.*(\S)+" % self.loop_body_lineno,
prof_output.lines)
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*us .*7\(55\) .*L%d.*(\S)+" % self.loop_lineno,
prof_output.lines)
def testPrintSourceOutputContainsClickableLinks(self):
prof_output = self.prof_analyzer.print_source([__file__])
any_match, line_index = _at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*us .*2\(22\) .*L%d.*(\S)+" % self.loop_cond_lineno,
prof_output.lines)
self.assertTrue(any_match)
any_menu_item_match = False
for seg in prof_output.font_attr_segs[line_index]:
if (isinstance(seg[2][1], debugger_cli_common.MenuItem) and
seg[2][1].content.startswith("lp --file_path_filter ") and
"--min_lineno %d" % self.loop_cond_lineno in seg[2][1].content and
"--max_lineno %d" % (self.loop_cond_lineno + 1) in seg[2][1].content):
any_menu_item_match = True
break
self.assertTrue(any_menu_item_match)
def testPrintSourceWithNonDefaultTimeUnit(self):
prof_output = self.prof_analyzer.print_source([
__file__, "--time_unit", "ms"])
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*ms .*2\(22\) .*L%d.*(\S)+" % self.loop_cond_lineno,
prof_output.lines)
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*ms .*2\(20\) .*L%d.*(\S)+" % self.loop_body_lineno,
prof_output.lines)
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*ms .*7\(55\) .*L%d.*(\S)+" % self.loop_lineno,
prof_output.lines)
def testPrintSourceWithNodeNameFilter(self):
prof_output = self.prof_analyzer.print_source([
__file__, "--node_name_filter", "x$"])
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*us .*1\(1\) .*L%d.*(\S)+" % self.x_lineno,
prof_output.lines)
_assert_no_lines_match(
r"\[(\|)+(\s)*\] .*us .*2\(22\) .*L%d.*(\S)+" % self.loop_cond_lineno,
prof_output.lines)
_assert_no_lines_match(
r"\[(\|)+(\s)*\] .*us .*2\(20\) .*L%d.*(\S)+" % self.loop_body_lineno,
prof_output.lines)
_assert_no_lines_match(
r"\[(\|)+(\s)*\] .*ms .*7\(55\) .*L%d.*(\S)+" % self.loop_lineno,
prof_output.lines)
# Check clickable link.
_, line_index = _at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*us .*1\(1\) .*L%d.*(\S)+" % self.x_lineno,
prof_output.lines)
any_menu_item_match = False
for seg in prof_output.font_attr_segs[line_index]:
if (isinstance(seg[2][1], debugger_cli_common.MenuItem) and
seg[2][1].content.startswith("lp --file_path_filter ") and
"--node_name_filter x$" in seg[2][1].content and
"--min_lineno %d" % self.x_lineno in seg[2][1].content and
"--max_lineno %d" % (self.x_lineno + 1) in seg[2][1].content):
any_menu_item_match = True
break
self.assertTrue(any_menu_item_match)
def testPrintSourceWithOpTypeFilter(self):
prof_output = self.prof_analyzer.print_source([
__file__, "--op_type_filter", "Less"])
_assert_at_least_one_line_matches(
r"\[(\|)+(\s)*\] .*us .*1\(11\) .*L%d.*(\S)+" % self.loop_cond_lineno,
prof_output.lines)
_assert_no_lines_match(
r"\[(\|)+(\s)*\] .*us .*2\(20\) .*L%d.*(\S)+" % self.loop_body_lineno,
prof_output.lines)
_assert_no_lines_match(
r"\[(\|)+(\s)*\] .*us .*7\(55\) .*L%d.*(\S)+" % self.loop_lineno,
prof_output.lines)
def testPrintSourceWithNonexistentDeviceGivesCorrectErrorMessage(self):
prof_output = self.prof_analyzer.print_source([
__file__, "--device_name_filter", "foo_device"])
_assert_at_least_one_line_matches(
r"The source file .* does not contain any profile information for the "
"previous Session run", prof_output.lines)
_assert_at_least_one_line_matches(
r".*--device_name_filter: foo_device", prof_output.lines)
def testPrintSourceWithUnrelatedFileShowsCorrectErrorMessage(self):
prof_output = self.prof_analyzer.print_source([tf_inspect.__file__])
_assert_at_least_one_line_matches(
r"The source file .* does not contain any profile information for the "
"previous Session run", prof_output.lines)
def testPrintSourceOutputContainsInitScrollPosAnnotation(self):
prof_output = self.prof_analyzer.print_source([
__file__, "--init_line", str(self.loop_cond_lineno)])
self.assertEqual(
self.loop_cond_lineno + 1, # The extra line is due to the head lines.
prof_output.annotations[debugger_cli_common.INIT_SCROLL_POS_KEY])
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
StephanEwen/incubator-flink | flink-python/pyflink/table/utils.py | 9 | 6042 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import ast
from pyflink.common.types import RowKind
from pyflink.java_gateway import get_gateway
from pyflink.table.types import DataType, LocalZonedTimestampType, Row, RowType, \
TimeType, DateType, ArrayType, MapType, TimestampType, FloatType
from pyflink.util.java_utils import to_jarray
import datetime
import pickle
def pandas_to_arrow(schema, timezone, field_types, series):
import pyarrow as pa
import pandas as pd
def create_array(s, t):
try:
return pa.Array.from_pandas(s, mask=s.isnull(), type=t)
except pa.ArrowException as e:
error_msg = "Exception thrown when converting pandas.Series (%s) to " \
"pyarrow.Array (%s)."
raise RuntimeError(error_msg % (s.dtype, t), e)
arrays = []
for i in range(len(schema)):
s = series[i]
field_type = field_types[i]
schema_type = schema.types[i]
if type(s) == pd.DataFrame:
array_names = [(create_array(s[s.columns[j]], field.type), field.name)
for j, field in enumerate(schema_type)]
struct_arrays, struct_names = zip(*array_names)
arrays.append(pa.StructArray.from_arrays(struct_arrays, struct_names))
else:
arrays.append(create_array(
tz_convert_to_internal(s, field_type, timezone), schema_type))
return pa.RecordBatch.from_arrays(arrays, schema)
def arrow_to_pandas(timezone, field_types, batches):
def arrow_column_to_pandas(arrow_column, t: DataType):
if type(t) == RowType:
import pandas as pd
series = [column.to_pandas(date_as_object=True).rename(field.name)
for column, field in zip(arrow_column.flatten(), arrow_column.type)]
return pd.concat(series, axis=1)
else:
return arrow_column.to_pandas(date_as_object=True)
import pyarrow as pa
table = pa.Table.from_batches(batches)
return [tz_convert_from_internal(arrow_column_to_pandas(c, t), t, timezone)
for c, t in zip(table.itercolumns(), field_types)]
def tz_convert_from_internal(s, t: DataType, local_tz):
"""
Converts the timestamp series from internal according to the specified local timezone.
Returns the same series if the series is not a timestamp series. Otherwise,
returns a converted series.
"""
if type(t) == LocalZonedTimestampType:
return s.dt.tz_localize(local_tz)
else:
return s
def tz_convert_to_internal(s, t: DataType, local_tz):
"""
Converts the timestamp series to internal according to the specified local timezone.
"""
if type(t) == LocalZonedTimestampType:
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
if is_datetime64_dtype(s.dtype):
return s.dt.tz_localize(None)
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(local_tz).dt.tz_localize(None)
return s
def to_expression_jarray(exprs):
"""
Convert python list of Expression to java array of Expression.
"""
gateway = get_gateway()
return to_jarray(gateway.jvm.Expression, [expr._j_expr for expr in exprs])
def pickled_bytes_to_python_converter(data, field_type: DataType):
if isinstance(field_type, RowType):
row_kind = RowKind(int.from_bytes(data[0], byteorder='big', signed=False))
data = zip(list(data[1:]), field_type.field_types())
fields = []
for d, d_type in data:
fields.append(pickled_bytes_to_python_converter(d, d_type))
result_row = Row(fields)
result_row.set_row_kind(row_kind)
return result_row
else:
data = pickle.loads(data)
if isinstance(field_type, TimeType):
seconds, microseconds = divmod(data, 10 ** 6)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return datetime.time(hours, minutes, seconds, microseconds)
elif isinstance(field_type, DateType):
return field_type.from_sql_type(data)
elif isinstance(field_type, TimestampType):
return field_type.from_sql_type(int(data.timestamp() * 10**6))
elif isinstance(field_type, MapType):
key_type = field_type.key_type
value_type = field_type.value_type
zip_kv = zip(data[0], data[1])
return dict((pickled_bytes_to_python_converter(k, key_type),
pickled_bytes_to_python_converter(v, value_type))
for k, v in zip_kv)
elif isinstance(field_type, FloatType):
return field_type.from_sql_type(ast.literal_eval(data))
elif isinstance(field_type, ArrayType):
element_type = field_type.element_type
elements = []
for element_bytes in data:
elements.append(pickled_bytes_to_python_converter(element_bytes, element_type))
return elements
else:
return field_type.from_sql_type(data)
| apache-2.0 |
exploreodoo/datStruct | odoo/addons/mrp/report/workcenter_load.py | 437 | 9126 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report.render import render
from openerp.report.interface import report_int
import time
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.report.misc import choice_colors
import StringIO
from pychart import *
theme.use_color = 1
#
# TODO: Bad code, seems buggy, TO CHECK !
#
class external_pdf(render):
def __init__(self, pdf):
render.__init__(self)
self.pdf = pdf
self.output_type='pdf'
def _render(self):
return self.pdf
class report_custom(report_int):
def _compute_dates(self, time_unit, start, stop):
if not stop:
stop = start
if time_unit == 'month':
dates = {}
a = int(start.split("-")[0])*12 + int(start.split("-")[1])
z = int(stop.split("-")[0])*12 + int(stop.split("-")[1]) + 1
for i in range(a,z):
year = i/12
month = i%12
if month == 0:
year -= 1
month = 12
months = {1:"January",2:"February",3:"March",4:"April",5:"May",6:"June",7:"July",8:"August",9:"September",10:"October",11:"November",12:"December"}
dates[i] = {
'name' :months[month],
'start':(datetime(year, month, 2) + relativedelta(day=1)).strftime('%Y-%m-%d'),
'stop' :(datetime(year, month, 2) + relativedelta(day=31)).strftime('%Y-%m-%d'),
}
return dates
elif time_unit == 'week':
dates = {}
start_week = date(int(start.split("-")[0]),int(start.split("-")[1]),int(start.split("-")[2])).isocalendar()
end_week = date(int(stop.split("-")[0]),int(stop.split("-")[1]),int(stop.split("-")[2])).isocalendar()
a = int(start.split("-")[0])*52 + start_week[1]
z = int(stop.split("-")[0])*52 + end_week[1]
for i in range(a,z+1):
year = i/52
week = i%52
d = date(year, 1, 1)
dates[i] = {
'name' :"Week #%d" % week,
'start':(d + timedelta(days=-d.weekday(), weeks=week)).strftime('%Y-%m-%d'),
'stop' :(d + timedelta(days=6-d.weekday(), weeks=week)).strftime('%Y-%m-%d'),
}
return dates
else: # time_unit = day
dates = {}
a = datetime(int(start.split("-")[0]),int(start.split("-")[1]),int(start.split("-")[2]))
z = datetime(int(stop.split("-")[0]),int(stop.split("-")[1]),int(stop.split("-")[2]))
i = a
while i <= z:
dates[map(int,i.strftime('%Y%m%d').split())[0]] = {
'name' :i.strftime('%Y-%m-%d'),
'start':i.strftime('%Y-%m-%d'),
'stop' :i.strftime('%Y-%m-%d'),
}
i = i + relativedelta(days=+1)
return dates
return {}
def create(self, cr, uid, ids, datas, context=None):
assert len(ids), 'You should provide some ids!'
colors = choice_colors(len(ids))
cr.execute(
"SELECT MAX(mrp_production.date_planned) AS stop,MIN(mrp_production.date_planned) AS start "\
"FROM mrp_workcenter, mrp_production, mrp_production_workcenter_line "\
"WHERE mrp_production_workcenter_line.production_id=mrp_production.id "\
"AND mrp_production_workcenter_line.workcenter_id=mrp_workcenter.id "\
"AND mrp_production.state NOT IN ('cancel','done') "\
"AND mrp_workcenter.id IN %s",(tuple(ids),))
res = cr.dictfetchone()
if not res['stop']:
res['stop'] = time.strftime('%Y-%m-%d %H:%M:%S')
if not res['start']:
res['start'] = time.strftime('%Y-%m-%d %H:%M:%S')
dates = self._compute_dates(datas['form']['time_unit'], res['start'][:10], res['stop'][:10])
dates_list = dates.keys()
dates_list.sort()
x_index = []
for date in dates_list:
x_index.append((dates[date]['name'], date))
pdf_string = StringIO.StringIO()
can = canvas.init(fname=pdf_string, format='pdf')
can.set_title("Work Center Loads")
chart_object.set_defaults(line_plot.T, line_style=None)
if datas['form']['measure_unit'] == 'cycles':
y_label = "Load (Cycles)"
else:
y_label = "Load (Hours)"
# For add the report header on the top of the report.
tb = text_box.T(loc=(300, 500), text="/hL/15/bWork Center Loads", line_style=None)
tb.draw()
ar = area.T(legend = legend.T(),
x_grid_style = line_style.gray70_dash1,
x_axis = axis.X(label="Periods", format="/a90/hC%s"),
x_coord = category_coord.T(x_index, 0),
y_axis = axis.Y(label=y_label),
y_range = (0, None),
size = (640,480))
bar_plot.fill_styles.reset();
# select workcenters
cr.execute(
"SELECT mw.id, rs.name FROM mrp_workcenter mw, resource_resource rs " \
"WHERE mw.id IN %s and mw.resource_id=rs.id " \
"ORDER BY mw.id" ,(tuple(ids),))
workcenters = cr.dictfetchall()
data = []
for date in dates_list:
vals = []
for workcenter in workcenters:
cr.execute("SELECT SUM(mrp_production_workcenter_line.hour) AS hours, SUM(mrp_production_workcenter_line.cycle) AS cycles, \
resource_resource.name AS name, mrp_workcenter.id AS id \
FROM mrp_production_workcenter_line, mrp_production, mrp_workcenter, resource_resource \
WHERE (mrp_production_workcenter_line.production_id=mrp_production.id) \
AND (mrp_production_workcenter_line.workcenter_id=mrp_workcenter.id) \
AND (mrp_workcenter.resource_id=resource_resource.id) \
AND (mrp_workcenter.id=%s) \
AND (mrp_production.date_planned BETWEEN %s AND %s) \
GROUP BY mrp_production_workcenter_line.workcenter_id, resource_resource.name, mrp_workcenter.id \
ORDER BY mrp_workcenter.id", (workcenter['id'], dates[date]['start'] + ' 00:00:00', dates[date]['stop'] + ' 23:59:59'))
res = cr.dictfetchall()
if not res:
vals.append(0.0)
else:
if datas['form']['measure_unit'] == 'cycles':
vals.append(res[0]['cycles'] or 0.0)
else:
vals.append(res[0]['hours'] or 0.0)
toto = [dates[date]['name']]
for val in vals:
toto.append(val)
data.append(toto)
workcenter_num = 0
for workcenter in workcenters:
f = fill_style.Plain()
f.bgcolor = colors[workcenter_num]
ar.add_plot(bar_plot.T(label=workcenter['name'], data=data, fill_style=f, hcol=workcenter_num+1, cluster=(workcenter_num, len(res))))
workcenter_num += 1
if (not data) or (len(data[0]) <= 1):
ar = self._empty_graph(time.strftime('%Y-%m-%d'))
ar.draw(can)
# close canvas so that the file is written to "disk"
can.close()
self.obj = external_pdf(pdf_string.getvalue())
self.obj.render()
pdf_string.close()
return (self.obj.pdf, 'pdf')
def _empty_graph(self, date):
data = [[date, 0]]
ar = area.T(x_coord = category_coord.T(data, 0), y_range = (0, None),
x_axis = axis.X(label="Periods"),
y_axis = axis.Y(label="Load"))
ar.add_plot(bar_plot.T(data = data, label="No production order"))
return ar
report_custom('report.mrp.workcenter.load')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gpl-2.0 |
franciscofranco/mako | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
villalonreina/dipy | dipy/tracking/learning.py | 10 | 3678 | ''' Learning algorithms for tractography'''
import numpy as np
import dipy.tracking.distances as pf
def detect_corresponding_tracks(indices, tracks1, tracks2):
''' Detect corresponding tracks from list tracks1 to list tracks2
where tracks1 & tracks2 are lists of tracks
Parameters
------------
indices : sequence
of indices of tracks1 that are to be detected in tracks2
tracks1 : sequence
of tracks as arrays, shape (N1,3) .. (Nm,3)
tracks2 : sequence
of tracks as arrays, shape (M1,3) .. (Mm,3)
Returns
---------
track2track : array (N,2) where N is len(indices) of int
it shows the correspondance in the following way:
the first column is the current index in tracks1
the second column is the corresponding index in tracks2
Examples
----------
>>> import numpy as np
>>> import dipy.tracking.learning as tl
>>> A = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
>>> B = np.array([[1, 0, 0], [2, 0, 0], [3, 0, 0]])
>>> C = np.array([[0, 0, -1], [0, 0, -2], [0, 0, -3]])
>>> bundle1 = [A, B, C]
>>> bundle2 = [B, A]
>>> indices = [0, 1]
>>> arr = tl.detect_corresponding_tracks(indices, bundle1, bundle2)
Notes
-------
To find the corresponding tracks we use mam_distances with 'avg' option.
Then we calculate the argmin of all the calculated distances and return it
for every index. (See 3rd column of arr in the example given below.)
'''
li = len(indices)
track2track = np.zeros((li, 2))
cnt = 0
for i in indices:
rt = [pf.mam_distances(tracks1[i], t, 'avg') for t in tracks2]
rt = np.array(rt)
track2track[cnt] = np.array([i, rt.argmin()])
cnt += 1
return track2track.astype(int)
def detect_corresponding_tracks_plus(indices, tracks1, indices2, tracks2):
''' Detect corresponding tracks from 1 to 2 where tracks1 & tracks2 are
sequences of tracks
Parameters
------------
indices : sequence
of indices of tracks1 that are to be detected in tracks2
tracks1 : sequence
of tracks as arrays, shape (N1,3) .. (Nm,3)
indices2 : sequence
of indices of tracks2 in the initial brain
tracks2 : sequence
of tracks as arrays, shape (M1,3) .. (Mm,3)
Returns
---------
track2track : array (N,2) where N is len(indices)
of int showing the correspondance in th following way
the first colum is the current index of tracks1
the second column is the corresponding index in tracks2
Examples
----------
>>> import numpy as np
>>> import dipy.tracking.learning as tl
>>> A = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
>>> B = np.array([[1, 0, 0], [2, 0, 0], [3, 0, 0]])
>>> C = np.array([[0, 0, -1], [0, 0, -2], [0, 0, -3]])
>>> bundle1 = [A, B, C]
>>> bundle2 = [B, A]
>>> indices = [0, 1]
>>> indices2 = indices
>>> arr = tl.detect_corresponding_tracks_plus(indices, bundle1, indices2, bundle2)
Notes
-------
To find the corresponding tracks we use mam_distances with 'avg' option.
Then we calculate the argmin of all the calculated distances and return it
for every index. (See 3rd column of arr in the example given below.)
See also
----------
distances.mam_distances
'''
li = len(indices)
track2track = np.zeros((li, 2))
cnt = 0
for i in indices:
rt = [pf.mam_distances(tracks1[i], t, 'avg') for t in tracks2]
rt = np.array(rt)
track2track[cnt] = np.array([i, indices2[rt.argmin()]])
cnt += 1
return track2track.astype(int)
| bsd-3-clause |
EthanBlackburn/flanker | flanker/mime/message/fallback/part.py | 8 | 5091 | import logging
import email
from webob.multidict import MultiDict
from flanker.mime.message.charsets import convert_to_unicode
from flanker.mime.message.headers.headers import remove_newlines, MimeHeaders
from flanker.mime.message.part import RichPartMixin
from flanker.mime.message.scanner import ContentType
from flanker.mime.message import utils, headers
from flanker.mime.message.headers import parametrized, normalize
log = logging.getLogger(__name__)
class FallbackMimePart(RichPartMixin):
def __init__(self, message):
RichPartMixin.__init__(self, is_root=False)
self._m = message
self._headers = FallbackHeaders(message)
self._body = None
@property
def size(self):
if not self._m.is_multipart():
return len(self._m.get_payload(decode=False))
else:
return sum(p.size for p in self.parts)
@property
def headers(self):
return self._headers
@property
def content_type(self):
return ContentType(self._m.get_content_maintype(),
self._m.get_content_subtype(),
dict(self._m.get_params() or []))
@property
def content_disposition(self):
try:
return parametrized.decode(self._m.get('Content-Disposition', ''))
except:
return None, {}
@property
def content_encoding(self):
return self._m.get('Content-Transfer-Encoding')
@content_encoding.setter
def content_encoding(self, value):
pass # FIXME Not implement
@property
def body(self):
if self._body:
return self._body
if self.content_type.is_delivery_status():
self._body = self._m.get_payload(decode=True)
if self._body is None:
self._body = "\r\n".join(str(p) for p in self._m.get_payload())
elif not self._m.is_multipart():
self._body = self._m.get_payload(decode=True)
if self._m.get_content_maintype() == 'text':
self._body = convert_to_unicode(self.charset, self._body)
return self._body
@body.setter
def body(self, value):
self._body = None
if not self._m.is_multipart():
self._m.set_payload(value.encode('utf-8'), 'utf-8')
@property
def charset(self):
return self.content_type.get_charset()
@charset.setter
def charset(self, value):
pass # FIXME Not implement
def to_string(self):
return utils.python_message_to_string(self._m)
def to_stream(self, out):
out.write(self.to_string())
def was_changed(self):
return False # FIXME Not implement
def to_python_message(self):
return self._m
def append(self, *messages):
for m in messages:
part = FallbackMimePart(email.message_from_string(m.to_string()))
self._m.attach(part)
@property
def parts(self):
if self._m.is_multipart():
return [FallbackMimePart(p) for p in self._m.get_payload() if p]
else:
return []
@property
def enclosed(self):
if self.content_type == 'message/rfc822':
return FallbackMimePart(self._m.get_payload()[0])
def enclose(self, message):
pass # FIXME Not implemented
class FallbackHeaders(MimeHeaders):
def __init__(self, message):
MimeHeaders.__init__(self, [(k, _try_decode(k, v))
for k, v in message.items()])
self._m = message
def __setitem__(self, key, value):
MimeHeaders.__setitem__(self, key, value)
del self._m[key]
self._m[key] = remove_newlines(value)
def __delitem__(self, key):
MimeHeaders.__delitem__(self, key)
del self._m[key]
def prepend(self, key, value):
MimeHeaders.prepend(self, key, value)
self._m._headers.insert(0, (normalize(key), remove_newlines(value)))
def add(self, key, value):
MimeHeaders.add(self, key, value)
self._m[key] = headers.to_mime(normalize(key), remove_newlines(value))
def transform(self, func):
changed = [False]
def wrapped_func(key, value):
new_key, new_value = func(key, value)
if new_value != value or new_key != key:
changed[0] = True
return new_key, new_value
transformed_headers = [wrapped_func(k, v) for k, v in self._m.items()]
if changed[0]:
self._m._headers = transformed_headers
self._v = MultiDict([(normalize(k), remove_newlines(v))
for k, v in transformed_headers])
self.changed = True
def _try_decode(key, value):
if isinstance(value, (tuple, list)):
return value
elif isinstance(value, str):
try:
return headers.parse_header_value(key, value)
except Exception:
return unicode(value, 'utf-8', 'ignore')
elif isinstance(value, unicode):
return value
else:
return ""
| apache-2.0 |
tianweizhang/nova | nova/tests/image/fake.py | 11 | 9332 | # Copyright 2011 Justin Santa Barbara
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake image service."""
import copy
import datetime
import uuid
from oslo.config import cfg
from nova.compute import arch
from nova import exception
import nova.image.glance
from nova.openstack.common import log as logging
CONF = cfg.CONF
CONF.import_opt('null_kernel', 'nova.compute.api')
LOG = logging.getLogger(__name__)
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'size': '25165824',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': arch.X86_64}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '58145823',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': None,
'disk_format': None,
'size': '83594576',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '84035174',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'size': '26360814',
'properties': {'kernel_id':
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '49163826',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': arch.X86_64,
'auto_disk_config': 'False'}}
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'size': '74185822',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': arch.X86_64,
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
super(_FakeImageService, self).__init__()
# TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def download(self, context, image_id, dst_path=None, data=None):
self.show(context, image_id)
if data:
data.write(self._imagedata.get(image_id, ''))
elif dst_path:
with open(dst_path, 'wb') as data:
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id, include_locations=False):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
LOG.warn('Unable to find image id %s. Have images: %s',
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.CouldNotUploadImage(image_id=image_id)
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
if purge_props:
self.images[image_id] = copy.deepcopy(metadata)
else:
image = self.images[image_id]
try:
image['properties'].update(metadata.pop('properties'))
except KeyError:
pass
image.update(metadata)
return self.images[image_id]
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def get_location(self, context, image_id):
if image_id in self.images:
return 'fake_location'
return None
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
def get_valid_image_id():
return _fakeImageService.images.keys()[0]
def stub_out_image_service(stubs):
image_service = FakeImageService()
stubs.Set(nova.image.glance, 'get_remote_image_service',
lambda x, y: (image_service, y))
stubs.Set(nova.image.glance, 'get_default_image_service',
lambda: image_service)
return image_service
| apache-2.0 |
slohse/ansible | lib/ansible/modules/network/slxos/slxos_l2_interface.py | 83 | 16987 | #!/usr/bin/python
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: slxos_l2_interface
version_added: "2.6"
short_description: Manage Layer-2 interface on Extreme Networks SLX-OS devices.
description:
- This module provides declarative management of Layer-2 interface on
Extreme slxos devices.
author:
- Matthew Stone (@bigmstone)
options:
name:
description:
- Full name of the interface excluding any logical
unit number, i.e. Ethernet 0/1.
required: true
aliases: ['interface']
mode:
description:
- Mode in which interface needs to be configured.
default: access
choices: ['access', 'trunk']
access_vlan:
description:
- Configure given VLAN in access port.
If C(mode=access), used as the access VLAN ID.
trunk_vlans:
description:
- List of VLANs to be configured in trunk port.
If C(mode=trunk), used as the VLAN range to ADD or REMOVE
from the trunk.
native_vlan:
description:
- Native VLAN to be configured in trunk port.
If C(mode=trunk), used as the trunk native VLAN ID.
trunk_allowed_vlans:
description:
- List of allowed VLANs in a given trunk port.
If C(mode=trunk), these are the only VLANs that will be
configured on the trunk, i.e. "2-10,15".
aggregate:
description:
- List of Layer-2 interface definitions.
state:
description:
- Manage the state of the Layer-2 Interface configuration.
default: present
choices: ['present','absent', 'unconfigured']
"""
EXAMPLES = """
- name: Ensure Ethernet 0/5 is in its default l2 interface state
slxos_l2_interface:
name: Ethernet 0/5
state: unconfigured
- name: Ensure Ethernet 0/5 is configured for access vlan 20
slxos_l2_interface:
name: Ethernet 0/5
mode: access
access_vlan: 20
- name: Ensure Ethernet 0/5 only has vlans 5-10 as trunk vlans
slxos_l2_interface:
name: Ethernet 0/5
mode: trunk
native_vlan: 10
trunk_vlans: 5-10
- name: Ensure Ethernet 0/5 is a trunk port and ensure 2-50 are being tagged (doesn't mean others aren't also being tagged)
slxos_l2_interface:
name: Ethernet 0/5
mode: trunk
native_vlan: 10
trunk_vlans: 2-50
- name: Ensure these VLANs are not being tagged on the trunk
slxos_l2_interface:
name: Ethernet 0/5
mode: trunk
trunk_vlans: 51-4094
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- interface Ethernet 0/5
- switchport access vlan 20
"""
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.slxos.slxos import get_config, load_config, run_commands
def get_interface_type(interface):
intf_type = 'unknown'
if interface.upper()[:2] in ('ET', 'GI'):
intf_type = 'ethernet'
elif interface.upper().startswith('VL'):
intf_type = 'svi'
elif interface.upper().startswith('LO'):
intf_type = 'loopback'
elif interface.upper()[:2] in ('MG', 'MA'):
intf_type = 'management'
elif interface.upper().startswith('PO'):
intf_type = 'portchannel'
elif interface.upper().startswith('NV'):
intf_type = 'nve'
return intf_type
def is_switchport(name, module):
intf_type = get_interface_type(name)
if intf_type in ('ethernet', 'portchannel'):
config = run_commands(module, ['show interface {0} switchport'.format(name)])[0]
match = re.search(r'Interface name\s+:\s', config)
return bool(match)
return False
def interface_is_portchannel(name, module):
if get_interface_type(name) == 'ethernet':
config = get_config(module)
if 'channel group' in config:
return True
return False
def get_switchport(name, module):
config = run_commands(module, ['show interface {0} switchport'.format(name)])[0]
mode = re.search(r'Switchport mode\s+:\s(?:.* )?(\w+)$', config, re.M)
if mode:
mode = mode.group(1)
access = re.search(r'Default Vlan\s+:\s(\d+)', config)
if access:
access = access.group(1)
native = re.search(r'Native Vlan\s+:\s(\d+)', config)
if native:
native = native.group(1)
trunk = re.search(r'Active Vlans\s+:\s(.+)$', config, re.M)
if trunk:
trunk = trunk.group(1)
if trunk == 'ALL':
trunk = '1-4094'
switchport_config = {
"interface": name,
"mode": mode,
"access_vlan": access,
"native_vlan": native,
"trunk_vlans": trunk,
}
return switchport_config
def remove_switchport_config_commands(name, existing, proposed, module):
mode = proposed.get('mode')
commands = []
command = None
if mode == 'access':
av_check = existing.get('access_vlan') == proposed.get('access_vlan')
if av_check:
command = 'no switchport access vlan {0}'.format(existing.get('access_vlan'))
commands.append(command)
elif mode == 'trunk':
tv_check = existing.get('trunk_vlans_list') == proposed.get('trunk_vlans_list')
if not tv_check:
existing_vlans = existing.get('trunk_vlans_list')
proposed_vlans = proposed.get('trunk_vlans_list')
vlans_to_remove = set(proposed_vlans).intersection(existing_vlans)
if vlans_to_remove:
proposed_allowed_vlans = proposed.get('trunk_allowed_vlans')
remove_trunk_allowed_vlans = proposed.get('trunk_vlans', proposed_allowed_vlans)
command = 'switchport trunk allowed vlan remove {0}'.format(remove_trunk_allowed_vlans)
commands.append(command)
native_check = existing.get('native_vlan') == proposed.get('native_vlan')
if native_check and proposed.get('native_vlan'):
command = 'no switchport trunk native vlan {0}'.format(existing.get('native_vlan'))
commands.append(command)
if commands:
commands.insert(0, 'interface ' + name)
return commands
def get_switchport_config_commands(name, existing, proposed, module):
"""Gets commands required to config a given switchport interface
"""
proposed_mode = proposed.get('mode')
existing_mode = existing.get('mode')
commands = []
command = None
if proposed_mode != existing_mode:
if proposed_mode == 'trunk':
command = 'switchport mode trunk'
elif proposed_mode == 'access':
command = 'switchport mode access'
if command:
commands.append(command)
if proposed_mode == 'access':
av_check = str(existing.get('access_vlan')) == str(proposed.get('access_vlan'))
if not av_check:
command = 'switchport access vlan {0}'.format(proposed.get('access_vlan'))
commands.append(command)
elif proposed_mode == 'trunk':
tv_check = existing.get('trunk_vlans_list') == proposed.get('trunk_vlans_list')
if not tv_check:
if proposed.get('allowed'):
command = 'switchport trunk allowed vlan add {0}'.format(proposed.get('trunk_allowed_vlans'))
commands.append(command)
else:
existing_vlans = existing.get('trunk_vlans_list')
proposed_vlans = proposed.get('trunk_vlans_list')
vlans_to_add = set(proposed_vlans).difference(existing_vlans)
if vlans_to_add:
command = 'switchport trunk allowed vlan add {0}'.format(proposed.get('trunk_vlans'))
commands.append(command)
native_check = str(existing.get('native_vlan')) == str(proposed.get('native_vlan'))
if not native_check and proposed.get('native_vlan'):
command = 'switchport trunk native vlan {0}'.format(proposed.get('native_vlan'))
commands.append(command)
if commands:
commands.insert(0, 'interface ' + name)
return commands
def is_switchport_default(existing):
"""Determines if switchport has a default config based on mode
Args:
existing (dict): existing switchport configuration from Ansible mod
Returns:
boolean: True if switchport has OOB Layer 2 config, i.e.
vlan 1 and trunk all and mode is access
"""
c1 = str(existing['access_vlan']) == '1'
c2 = str(existing['native_vlan']) == '1'
c3 = existing['trunk_vlans'] == '1-4094'
c4 = existing['mode'] == 'access'
default = c1 and c2 and c3 and c4
return default
def default_switchport_config(name):
commands = []
commands.append('interface ' + name)
commands.append('switchport mode access')
commands.append('switch access vlan 1')
commands.append('switchport trunk native vlan 1')
commands.append('switchport trunk allowed vlan all')
return commands
def vlan_range_to_list(vlans):
result = []
if vlans:
for part in vlans.split(','):
if part == 'none':
break
if '-' in part:
start, stop = (int(i) for i in part.split('-'))
result.extend(range(start, stop + 1))
else:
result.append(int(part))
return sorted(result)
def get_list_of_vlans(module):
config = run_commands(module, ['show vlan brief'])[0]
vlans = set()
lines = config.strip().splitlines()
for line in lines:
line_parts = line.split()
if line_parts:
try:
int(line_parts[0])
except ValueError:
continue
vlans.add(line_parts[0])
return list(vlans)
def flatten_list(commands):
flat_list = []
for command in commands:
if isinstance(command, list):
flat_list.extend(command)
else:
flat_list.append(command)
return flat_list
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
obj.append(item.copy())
else:
obj.append({
'name': module.params['name'],
'mode': module.params['mode'],
'access_vlan': module.params['access_vlan'],
'native_vlan': module.params['native_vlan'],
'trunk_vlans': module.params['trunk_vlans'],
'trunk_allowed_vlans': module.params['trunk_allowed_vlans'],
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(type='str', aliases=['interface']),
mode=dict(choices=['access', 'trunk'], default='access'),
access_vlan=dict(type='str'),
native_vlan=dict(type='str'),
trunk_vlans=dict(type='str'),
trunk_allowed_vlans=dict(type='str'),
state=dict(choices=['absent', 'present', 'unconfigured'], default='present')
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['access_vlan', 'trunk_vlans'],
['access_vlan', 'native_vlan'],
['access_vlan', 'trunk_allowed_vlans']],
supports_check_mode=True)
warnings = list()
commands = []
result = {'changed': False, 'warnings': warnings}
want = map_params_to_obj(module)
for w in want:
name = w['name']
mode = w['mode']
access_vlan = w['access_vlan']
state = w['state']
trunk_vlans = w['trunk_vlans']
native_vlan = w['native_vlan']
trunk_allowed_vlans = w['trunk_allowed_vlans']
args = dict(name=name, mode=mode, access_vlan=access_vlan,
native_vlan=native_vlan, trunk_vlans=trunk_vlans,
trunk_allowed_vlans=trunk_allowed_vlans)
proposed = dict((k, v) for k, v in args.items() if v is not None)
name = name.lower()
if mode == 'access' and state == 'present' and not access_vlan:
module.fail_json(msg='access_vlan param is required when mode=access && state=present')
if mode == 'trunk' and access_vlan:
module.fail_json(msg='access_vlan param not supported when using mode=trunk')
if not is_switchport(name, module):
module.fail_json(msg='Ensure interface is configured to be a L2'
'\nport first before using this module. You can use'
'\nthe slxos_interface module for this.')
if interface_is_portchannel(name, module):
module.fail_json(msg='Cannot change L2 config on physical '
'\nport because it is in a portchannel. '
'\nYou should update the portchannel config.')
# existing will never be null for Eth intfs as there is always a default
existing = get_switchport(name, module)
# Safeguard check
# If there isn't an existing, something is wrong per previous comment
if not existing:
module.fail_json(msg='Make sure you are using the FULL interface name')
if trunk_vlans or trunk_allowed_vlans:
if trunk_vlans:
trunk_vlans_list = vlan_range_to_list(trunk_vlans)
elif trunk_allowed_vlans:
trunk_vlans_list = vlan_range_to_list(trunk_allowed_vlans)
proposed['allowed'] = True
existing_trunks_list = vlan_range_to_list((existing['trunk_vlans']))
existing['trunk_vlans_list'] = existing_trunks_list
proposed['trunk_vlans_list'] = trunk_vlans_list
current_vlans = get_list_of_vlans(module)
if state == 'present':
if access_vlan and access_vlan not in current_vlans:
module.fail_json(msg='You are trying to configure a VLAN'
' on an interface that\ndoes not exist on the '
' switch yet!', vlan=access_vlan)
elif native_vlan and native_vlan not in current_vlans:
module.fail_json(msg='You are trying to configure a VLAN'
' on an interface that\ndoes not exist on the '
' switch yet!', vlan=native_vlan)
else:
command = get_switchport_config_commands(name, existing, proposed, module)
commands.append(command)
elif state == 'unconfigured':
is_default = is_switchport_default(existing)
if not is_default:
command = default_switchport_config(name)
commands.append(command)
elif state == 'absent':
command = remove_switchport_config_commands(name, existing, proposed, module)
commands.append(command)
if trunk_vlans or trunk_allowed_vlans:
existing.pop('trunk_vlans_list')
proposed.pop('trunk_vlans_list')
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
result['changed'] = True
load_config(module, cmds)
if 'configure' in cmds:
cmds.pop(0)
result['commands'] = cmds
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
alekseyev/wheatleycms | docutils/writers/docutils_xml.py | 66 | 2727 | # $Id: docutils_xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Simple internal document tree Writer, writes Docutils XML.
"""
__docformat__ = 'reStructuredText'
import docutils
from docutils import frontend, writers
class Writer(writers.Writer):
supported = ('xml',)
"""Formats this writer supports."""
settings_spec = (
'"Docutils XML" Writer Options',
'Warning: the --newlines and --indents options may adversely affect '
'whitespace; use them only for reading convenience.',
(('Generate XML with newlines before and after tags.',
['--newlines'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Generate XML with indents and newlines.',
['--indents'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Omit the DOCTYPE declaration.',
['--no-doctype'],
{'dest': 'doctype_declaration', 'default': 1,
'action': 'store_false', 'validator': frontend.validate_boolean}),))
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
config_section = 'docutils_xml writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
xml_declaration = '<?xml version="1.0" encoding="%s"?>\n'
#xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n'
doctype = (
'<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n')
generator = '<!-- Generated by Docutils %s -->\n'
def translate(self):
settings = self.document.settings
indent = newline = ''
if settings.newlines:
newline = '\n'
if settings.indents:
newline = '\n'
indent = ' '
output_prefix = []
if settings.xml_declaration:
output_prefix.append(
self.xml_declaration % settings.output_encoding)
if settings.doctype_declaration:
output_prefix.append(self.doctype)
output_prefix.append(self.generator % docutils.__version__)
docnode = self.document.asdom().childNodes[0]
self.output = (''.join(output_prefix)
+ docnode.toprettyxml(indent, newline))
| bsd-3-clause |
Jumpscale/jumpscale_core8 | lib/JumpScale/tools/cuisine/CuisinePackage.py | 1 | 11267 |
from JumpScale import j
from JumpScale.sal.fs.SystemFS import FileLock
base = j.tools.cuisine._getBaseClass()
LOCK_NAME = 'APT-LOCK'
LOCK_TIMEOUT = 500
class CuisinePackage(base):
def __init__(self, executor, cuisine):
self.logger = j.logger.get('j.tools.cuisine.package')
self._executor = executor
self._cuisine = cuisine
def _repository_ensure_apt(self, repository):
self.ensure('python-software-properties')
self._cuisine.core.sudo("add-apt-repository --yes " + repository)
def _apt_get(self, cmd):
CMD_APT_GET = 'DEBIAN_FRONTEND=noninteractive apt-get -q --yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" '
cmd = CMD_APT_GET + cmd
with FileLock(LOCK_NAME, locktimeout=LOCK_TIMEOUT):
result = self._cuisine.core.sudo(cmd)
# If the installation process was interrupted, we might get the following message
# E: dpkg was interrupted, you must manually self._cuisine.core.run 'sudo
# dpkg --configure -a' to correct the problem.
if "sudo dpkg --configure -a" in result:
with FileLock(LOCK_NAME, locktimeout=LOCK_TIMEOUT):
self._cuisine.core.sudo("DEBIAN_FRONTEND=noninteractive dpkg --configure -a")
result = self._cuisine.core.sudo(cmd)
return result
def update(self, package=None):
if self._cuisine.core.isUbuntu:
if package is None:
return self._apt_get("-q --yes update")
else:
if type(package) in (list, tuple):
package = " ".join(package)
return self._apt_get(' upgrade ' + package)
else:
raise j.exceptions.RuntimeError("could not install:%s, platform not supported" % package)
def mdupdate(self):
"""
update metadata of system
"""
self.logger.info("packages mdupdate")
if self._cuisine.core.isUbuntu:
with FileLock(LOCK_NAME, locktimeout=LOCK_TIMEOUT):
self._cuisine.core.run("apt-get update")
elif self._cuisine.core.isMac:
location = self._cuisine.core.command_location("brew")
# self._cuisine.core.run("sudo chown root %s" % location)
self._cuisine.core.run("brew update")
elif self._cuisine.core.isArch:
self._cuisine.core.run("pacman -Syy")
def upgrade(self, distupgrade=False):
"""
upgrades system, distupgrade means ubuntu 14.04 will fo to e.g. 15.04
"""
self.mdupdate()
self.logger.info("packages upgrade")
if self._cuisine.core.isUbuntu:
if distupgrade:
return self._apt_get("dist-upgrade")
else:
return self._apt_get("upgrade")
elif self._cuisine.core.isArch:
self._cuisine.core.run("pacman -Syu --noconfirm;pacman -Sc --noconfirm")
elif self._cuisine.core.isMac:
self._cuisine.core.run("brew upgrade")
elif self._cuisine.core.isCygwin:
return # no such functionality in apt-cyg
else:
raise j.exceptions.RuntimeError("could not upgrade, platform not supported")
def install(self, package, allow_unauthenticated=False):
if self._cuisine.core.isUbuntu:
cmd = 'DEBIAN_FRONTEND=noninteractive apt-get -q --yes install '
if allow_unauthenticated:
cmd += ' --allow-unauthenticated '
cmd += package
elif self._cuisine.core.isArch:
if package.startswith("python3"):
package = "extra/python"
# ignore
for unsupported in ["libpython3.5-dev", "libffi-dev", "build-essential", "libpq-dev", "libsqlite3-dev"]:
if unsupported in package:
return
cmd = "pacman -S %s --noconfirm" % package
elif self._cuisine.core.isMac:
for unsupported in ["libpython3.4-dev", "python3.4-dev", "libpython3.5-dev", "python3.5-dev",
"libffi-dev", "libssl-dev", "make", "build-essential", "libpq-dev", "libsqlite3-dev"]:
if 'libsnappy-dev' in package or 'libsnappy1v5' in package:
package = 'snappy'
if unsupported in package:
return
_, installed, _ = self._cuisine.core.run("brew list")
if package in installed:
return # means was installed
# rc,out=self._cuisine.core.run("brew info --json=v1 %s"%package,showout=False,die=False)
# if rc==0:
# info=j.data.serializer.json.loads(out)
# return #means was installed
if "wget" == package:
package = "%s --enable-iri" % package
cmd = "brew install %s " % package
elif self._cuisine.core.isCygwin:
if package in ["sudo", "net-tools"]:
return
installed = self._cuisine.core.run("apt-cyg list&")[1].splitlines()
if package in installed:
return # means was installed
cmd = "apt-cyg install %s&" % package
else:
raise j.exceptions.RuntimeError("could not install:%s, platform not supported" % package)
mdupdate = False
with FileLock(LOCK_NAME, locktimeout=LOCK_TIMEOUT):
while True:
rc, out, err = self._cuisine.core.run(cmd, die=False)
if rc > 0:
if mdupdate is True:
raise j.exceptions.RuntimeError("Could not install:'%s' \n%s" % (package, out))
if out.find("not found") != -1 or out.find("failed to retrieve some files") != -1:
self.mdupdate()
mdupdate = True
continue
raise j.exceptions.RuntimeError("Could not install:%s %s" % (package, err))
return out
def multiInstall(self, packagelist, allow_unauthenticated=False):
"""
@param packagelist is text file and each line is name of package
can also be list
e.g.
# python
mongodb
@param runid, if specified actions will be used to execute
"""
previous_sudo = self._cuisine.core.sudomode
try:
self._cuisine.core.sudomode = True
if j.data.types.string.check(packagelist):
packages = packagelist.strip().splitlines()
elif j.data.types.list.check(packagelist):
packages = packagelist
else:
raise j.exceptions.Input('packagelist should be string or a list. received a %s' % type(packagelist))
to_install = []
for dep in packages:
dep = dep.strip()
if dep is None or dep == "" or dep[0] == '#':
continue
to_install.append(dep)
self.install(' '.join(to_install), allow_unauthenticated=allow_unauthenticated)
finally:
self._cuisine.core.sudomode = previous_sudo
def start(self, package):
if self._cuisine.core.isArch or self._cuisine.core.isUbuntu or self._cuisine.core.isMac:
self._cuisine.processmanager.ensure(package)
else:
raise j.exceptions.RuntimeError("could not install/ensure:%s, platform not supported" % package)
def ensure(self, package, update=False):
"""Ensure apt packages are installed"""
if self._cuisine.core.isUbuntu:
if isinstance(package, str):
package = package.split()
res = {}
for p in package:
p = p.strip()
if not p:
continue
# The most reliable way to detect success is to use the command status
# and suffix it with OK. This won't break with other locales.
with FileLock(LOCK_NAME, locktimeout=LOCK_TIMEOUT):
_, status, _ = self._cuisine.core.run("dpkg-query -W -f='${Status} ' %s && echo **OK**;true" % p)
if not status.endswith("OK") or "not-installed" in status:
self.install(p)
res[p] = False
else:
if update:
self.update(p)
res[p] = True
if len(res) == 1:
for _, value in res.items():
return value
else:
return res
elif self._cuisine.core.isArch:
self._cuisine.core.run("pacman -S %s" % package)
return
elif self._cuisine.core.isMac:
self.install(package)
return
else:
raise j.exceptions.RuntimeError("could not install/ensure:%s, platform not supported" % package)
raise j.exceptions.RuntimeError("not supported platform")
def clean(self, package=None, agressive=False):
"""
clean packaging system e.g. remove outdated packages & caching packages
@param agressive if True will delete full cache
"""
if self._cuisine.core.isUbuntu:
with FileLock(LOCK_NAME, locktimeout=LOCK_TIMEOUT):
if package is not None:
return self._apt_get("-y --purge remove %s" % package)
else:
self._cuisine.core.run("apt-get autoremove -y")
self._apt_get("autoclean")
C = """
apt-get clean
rm -rf /bd_build
rm -rf /tmp/* /var/tmp/*
rm -f /etc/dpkg/dpkg.cfg.d/02apt-speedup
find -regex '.*__pycache__.*' -delete
rm -rf /var/log
mkdir -p /var/log/apt
rm -rf /var/tmp
mkdir -p /var/tmp
"""
self._cuisine.core.execute_bash(C)
elif self._cuisine.core.isArch:
cmd = "pacman -Sc"
if agressive:
cmd += "c"
self._cuisine.core.run(cmd)
if agressive:
self._cuisine.core.run("pacman -Qdttq", showout=False)
elif self._cuisine.core.isMac:
if package:
self._cuisine.core.run("brew cleanup %s" % package)
self._cuisine.core.run("brew remove %s" % package)
else:
self._cuisine.core.run("brew cleanup")
elif self._cuisine.core.isCygwin:
if package:
self._cuisine.core.run("apt-cyg remove %s" % package)
else:
pass
else:
raise j.exceptions.RuntimeError("could not package clean:%s, platform not supported" % package)
def remove(self, package, autoclean=False):
if self._cuisine.core.isUbuntu:
self._apt_get('remove ' + package)
if autoclean:
self._apt_get("autoclean")
elif self.isMac:
self._cuisine.core.run("brew remove %s" % package)
def __repr__(self):
return "cuisine.package:%s:%s" % (self._executor.addr, self._executor.port)
__str__ = __repr__
| apache-2.0 |
sadleader/odoo | addons/anonymization/__init__.py | 441 | 1080 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import anonymization
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tojon/treeherder | tests/jenkins/tests/test_filter_panel.py | 2 | 2108 | import pytest
from pages.treeherder import TreeherderPage
@pytest.mark.nondestructive
def test_filter_by_test_status(base_url, selenium):
"""Open Treeherder page, open Filters Panel, select one filter,
verify results"""
page = TreeherderPage(selenium, base_url).open()
page.filter_unclassified_jobs()
page.click_on_filters_panel()
# Test 'testfailed' unclassified failures
page.deselect_busted_failures()
page.deselect_exception_failures()
if len(page.all_jobs) > 0:
page.open_next_unclassified_failure()
assert 'testfailed' == page.info_panel.job_details.job_result_status
# Test 'busted' unclassified failures
page.select_busted_failures()
page.deselect_testfailed_failures()
if len(page.all_jobs) > 0:
page.close_the_job_panel()
page.open_next_unclassified_failure()
assert 'busted' == page.info_panel.job_details.job_result_status
# Test 'exception' unclassified failures
page.select_exception_failures()
page.deselect_busted_failures()
if len(page.all_jobs) > 0:
page.close_the_job_panel()
page.open_next_unclassified_failure()
assert 'exception' == page.info_panel.job_details.job_result_status
@pytest.mark.nondestructive
def test_filter_panel_reset_button(base_url, selenium):
"""Open Treeherder page, open Filters Panel, disable all failures,
check that all checkboxes are not selected, check that there
are no failures, click reset button and verify that default checkboxes
are selected"""
page = TreeherderPage(selenium, base_url).open()
all_jobs = len(page.all_jobs)
page.click_on_filters_panel()
page.deselect_all_failures()
assert not page.checkbox_testfailed_is_selected
assert not page.checkbox_busted_is_selected
assert not page.checkbox_exception_is_selected
filtered_jobs = len(page.all_jobs)
assert not all_jobs == filtered_jobs
page.reset_filters()
assert page.checkbox_testfailed_is_selected
assert page.checkbox_busted_is_selected
assert page.checkbox_exception_is_selected
| mpl-2.0 |
anitzkin/opencog | opencog/python/spatiotemporal/temporal_events/animation.py | 34 | 4896 | from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
from numpy.core.multiarray import zeros
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from spatiotemporal.time_intervals import TimeInterval
from matplotlib import pyplot as plt
from matplotlib import animation
__author__ = 'keyvan'
x_axis = xrange(13)
zeros_13 = zeros(13)
class Animation(object):
def __init__(self, event_a, event_b, event_c, plt=plt):
self.event_a = event_a
self.event_c = event_c
self.event_b_length_beginning = event_b.beginning - event_b.a
self.event_b_length_middle = self.event_b_length_beginning + event_b.ending - event_b.beginning
self.event_b_length_total = event_b.b - event_b.ending
self.plt = plt
self.fig = plt.figure(1)
self.ax_a_b = self.fig.add_subplot(4, 1, 1)
self.ax_b_c = self.fig.add_subplot(4, 1, 2)
self.ax_a_c = self.fig.add_subplot(4, 1, 3)
self.ax_relations = self.fig.add_subplot(4, 1, 4)
self.ax_a_b.set_xlim(0, 13)
self.ax_a_b.set_ylim(0, 1)
self.ax_b_c.set_xlim(0, 13)
self.ax_b_c.set_ylim(0, 1)
self.ax_a_c.set_xlim(0, 13)
self.ax_a_c.set_ylim(0, 1)
self.rects_a_b = self.ax_a_b.bar(x_axis, zeros_13)
self.rects_b_c = self.ax_b_c.bar(x_axis, zeros_13)
self.rects_a_c = self.ax_a_c.bar(x_axis, zeros_13)
self.line_a = Line2D([], [])
self.line_b = Line2D([], [])
self.line_c = Line2D([], [])
self.ax_relations.add_line(self.line_a)
self.ax_relations.add_line(self.line_b)
self.ax_relations.add_line(self.line_c)
a = min(event_a.a, event_c.a) - self.event_b_length_total
b = max(event_a.b, event_c.b)
self.ax_relations.set_xlim(a, b + self.event_b_length_total)
self.ax_relations.set_ylim(0, 1.1)
# self.interval = TimeInterval(a, b, 150)
self.interval = TimeInterval(a, b, 2)
self.ax_a_b.xaxis.set_minor_formatter(self.ax_a_b.xaxis.get_major_formatter())
self.ax_a_b.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_b.xaxis.set_ticklabels('poDedOP')
self.ax_a_b.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_b_c.xaxis.set_minor_formatter(self.ax_b_c.xaxis.get_major_formatter())
self.ax_b_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_b_c.xaxis.set_ticklabels('poDedOP')
self.ax_b_c.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_a_c.xaxis.set_minor_formatter(self.ax_a_c.xaxis.get_major_formatter())
self.ax_a_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_c.xaxis.set_ticklabels('poDedOP')
self.ax_a_c.xaxis.set_ticklabels('mFsSfM', minor=True)
def init(self):
artists = []
self.line_a.set_data(self.event_a, self.event_a.membership_function)
self.line_b.set_data([], [])
self.line_c.set_data(self.event_c, self.event_c.membership_function)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
for rect, h in zip(self.rects_a_b, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_a_c, (self.event_a * self.event_c).to_list()):
rect.set_height(h)
artists.append(rect)
return artists
def animate(self, t):
interval = self.interval
B = TemporalEventTrapezium(interval[t], interval[t] + self.event_b_length_total,
interval[t] + self.event_b_length_beginning,
interval[t] + self.event_b_length_middle)
plt.figure()
B.plot().show()
a_b = (self.event_a * B).to_list()
b_c = (B * self.event_c).to_list()
self.line_b.set_data(B, B.membership_function)
artists = []
for rect, h in zip(self.rects_a_b, a_b):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, b_c):
rect.set_height(h)
artists.append(rect)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
return artists
def show(self):
fr = len(self.interval) - 1
anim = animation.FuncAnimation(self.fig, self.animate, init_func=self.init,
frames=fr, interval=fr, blit=True)
self.plt.show()
if __name__ == '__main__':
anim = Animation(TemporalEventTrapezium(4, 8, 5, 7),
TemporalEventTrapezium(0, 10, 6, 9),
TemporalEventTrapezium(0.5, 11, 1, 3))
# anim.show()
| agpl-3.0 |
andresailer/DIRAC | ConfigurationSystem/Client/Helpers/Registry.py | 2 | 10895 | """ Helper for /Registry section
"""
from DIRAC import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.CSGlobals import getVO
__RCSID__ = "$Id$"
# pylint: disable=missing-docstring
gBaseRegistrySection = "/Registry"
def getUsernameForDN(dn, usersList=False):
if not usersList:
retVal = gConfig.getSections("%s/Users" % gBaseRegistrySection)
if not retVal['OK']:
return retVal
usersList = retVal['Value']
for username in usersList:
if dn in gConfig.getValue("%s/Users/%s/DN" % (gBaseRegistrySection, username), []):
return S_OK(username)
return S_ERROR("No username found for dn %s" % dn)
def getDNForUsername(username):
dnList = gConfig.getValue("%s/Users/%s/DN" % (gBaseRegistrySection, username), [])
if dnList:
return S_OK(dnList)
return S_ERROR("No DN found for user %s" % username)
def getDNForHost(host):
dnList = gConfig.getValue("%s/Hosts/%s/DN" % (gBaseRegistrySection, host), [])
if dnList:
return S_OK(dnList)
return S_ERROR("No DN found for host %s" % host)
def getGroupsForDN(dn):
retVal = getUsernameForDN(dn)
if not retVal['OK']:
return retVal
return getGroupsForUser(retVal['Value'])
def __getGroupsWithAttr(attrName, value):
retVal = gConfig.getSections("%s/Groups" % gBaseRegistrySection)
if not retVal['OK']:
return retVal
groupsList = retVal['Value']
groups = []
for group in groupsList:
if value in gConfig.getValue("%s/Groups/%s/%s" % (gBaseRegistrySection, group, attrName), []):
groups.append(group)
if not groups:
return S_ERROR("No groups found for %s=%s" % (attrName, value))
groups.sort()
return S_OK(groups)
def getGroupsForUser(username):
return __getGroupsWithAttr('Users', username)
def getGroupsForVO(vo):
if getVO():
return gConfig.getSections("%s/Groups" % gBaseRegistrySection)
return __getGroupsWithAttr('VO', vo)
def getGroupsWithProperty(propName):
return __getGroupsWithAttr("Properties", propName)
def getHostnameForDN(dn):
retVal = gConfig.getSections("%s/Hosts" % gBaseRegistrySection)
if not retVal['OK']:
return retVal
hostList = retVal['Value']
for hostname in hostList:
if dn in gConfig.getValue("%s/Hosts/%s/DN" % (gBaseRegistrySection, hostname), []):
return S_OK(hostname)
return S_ERROR("No hostname found for dn %s" % dn)
def getDefaultUserGroup():
return gConfig.getValue("/%s/DefaultGroup" % gBaseRegistrySection, "user")
def findDefaultGroupForDN(dn):
result = getUsernameForDN(dn)
if not result['OK']:
return result
return findDefaultGroupForUser(result['Value'])
def findDefaultGroupForUser(userName):
userDefGroups = getUserOption(userName, "DefaultGroup", [])
defGroups = userDefGroups + gConfig.getValue("%s/DefaultGroup" % gBaseRegistrySection, ["user"])
result = getGroupsForUser(userName)
if not result['OK']:
return result
userGroups = result['Value']
for group in defGroups:
if group in userGroups:
return S_OK(group)
if userGroups:
return S_OK(userGroups[0])
return S_ERROR("User %s has no groups" % userName)
def getAllUsers():
retVal = gConfig.getSections("%s/Users" % gBaseRegistrySection)
if not retVal['OK']:
return []
return retVal['Value']
def getAllGroups():
retVal = gConfig.getSections("%s/Groups" % gBaseRegistrySection)
if not retVal['OK']:
return []
return retVal['Value']
def getUsersInGroup(groupName, defaultValue=None):
if defaultValue is None:
defaultValue = []
option = "%s/Groups/%s/Users" % (gBaseRegistrySection, groupName)
return gConfig.getValue(option, defaultValue)
def getUsersInVO(vo, defaultValue=None):
if defaultValue is None:
defaultValue = []
result = getGroupsForVO(vo)
if not result['OK']:
return defaultValue
groups = result['Value']
if not groups:
return defaultValue
userList = []
for group in groups:
userList += getUsersInGroup(group)
return userList
def getDNsInVO(vo):
DNs = []
for user in getUsersInVO(vo):
result = getDNForUsername(user)
if result['OK']:
DNs.extend(result['Value'])
return DNs
def getDNsInGroup(groupName):
DNs = []
for user in getUsersInGroup(groupName):
result = getDNForUsername(user)
if result['OK']:
DNs.extend(result['Value'])
return DNs
def getPropertiesForGroup(groupName, defaultValue=None):
if defaultValue is None:
defaultValue = []
option = "%s/Groups/%s/Properties" % (gBaseRegistrySection, groupName)
return gConfig.getValue(option, defaultValue)
def getPropertiesForHost(hostName, defaultValue=None):
if defaultValue is None:
defaultValue = []
option = "%s/Hosts/%s/Properties" % (gBaseRegistrySection, hostName)
return gConfig.getValue(option, defaultValue)
def getPropertiesForEntity(group, name="", dn="", defaultValue=None):
if defaultValue is None:
defaultValue = []
if group == 'hosts':
if not name:
result = getHostnameForDN(dn)
if not result['OK']:
return defaultValue
name = result['Value']
return getPropertiesForHost(name, defaultValue)
else:
return getPropertiesForGroup(group, defaultValue)
def __matchProps(sProps, rProps):
foundProps = []
for prop in sProps:
if prop in rProps:
foundProps.append(prop)
return foundProps
def groupHasProperties(groupName, propList):
if isinstance(propList, basestring):
propList = [propList]
return __matchProps(propList, getPropertiesForGroup(groupName))
def hostHasProperties(hostName, propList):
if isinstance(propList, basestring):
propList = [propList]
return __matchProps(propList, getPropertiesForHost(hostName))
def getUserOption(userName, optName, defaultValue=""):
return gConfig.getValue("%s/Users/%s/%s" % (gBaseRegistrySection, userName, optName), defaultValue)
def getGroupOption(groupName, optName, defaultValue=""):
return gConfig.getValue("%s/Groups/%s/%s" % (gBaseRegistrySection, groupName, optName), defaultValue)
def getHostOption(hostName, optName, defaultValue=""):
return gConfig.getValue("%s/Hosts/%s/%s" % (gBaseRegistrySection, hostName, optName), defaultValue)
def getHosts():
return gConfig.getSections('%s/Hosts' % gBaseRegistrySection)
def getVOOption(voName, optName, defaultValue=""):
return gConfig.getValue("%s/VO/%s/%s" % (gBaseRegistrySection, voName, optName), defaultValue)
def getBannedIPs():
return gConfig.getValue("%s/BannedIPs" % gBaseRegistrySection, [])
def getVOForGroup(group):
voName = getVO()
if voName:
return voName
return gConfig.getValue("%s/Groups/%s/VO" % (gBaseRegistrySection, group), "")
def getDefaultVOMSAttribute():
return gConfig.getValue("%s/DefaultVOMSAttribute" % gBaseRegistrySection, "")
def getVOMSAttributeForGroup(group):
return gConfig.getValue("%s/Groups/%s/VOMSRole" % (gBaseRegistrySection, group), getDefaultVOMSAttribute())
def getDefaultVOMSVO():
vomsVO = gConfig.getValue("%s/DefaultVOMSVO" % gBaseRegistrySection, "")
if vomsVO:
return vomsVO
return getVO()
def getVOMSVOForGroup(group):
vomsVO = gConfig.getValue("%s/Groups/%s/VOMSVO" % (gBaseRegistrySection, group), getDefaultVOMSVO())
if not vomsVO:
vo = getVOForGroup(group)
vomsVO = getVOOption(vo, 'VOMSName', '')
return vomsVO
def getGroupsWithVOMSAttribute(vomsAttr):
retVal = gConfig.getSections("%s/Groups" % (gBaseRegistrySection))
if not retVal['OK']:
return []
groups = []
for group in retVal['Value']:
if vomsAttr == gConfig.getValue("%s/Groups/%s/VOMSRole" % (gBaseRegistrySection, group), ""):
groups.append(group)
return groups
def getVOs():
""" Get all the configured VOs
"""
voName = getVO()
if voName:
return S_OK([voName])
return gConfig.getSections('%s/VO' % gBaseRegistrySection)
def getVOMSServerInfo(requestedVO=''):
""" Get information on VOMS servers for the given VO or for all of them
"""
vomsDict = {}
result = getVOs()
if result['OK']:
voNames = result['Value']
for vo in voNames:
if requestedVO and vo != requestedVO:
continue
vomsName = getVOOption(vo, 'VOMSName', '')
if not vomsName:
continue
vomsDict.setdefault(vo, {})
vomsDict[vo]['VOMSName'] = getVOOption(vo, 'VOMSName', '')
result = gConfig.getSections('%s/VO/%s/VOMSServers' % (gBaseRegistrySection, vo))
if result['OK']:
serverList = result['Value']
vomsDict[vo].setdefault("Servers", {})
for server in serverList:
vomsDict[vo]['Servers'].setdefault(server, {})
DN = gConfig.getValue('%s/VO/%s/VOMSServers/%s/DN' % (gBaseRegistrySection, vo, server), '')
CA = gConfig.getValue('%s/VO/%s/VOMSServers/%s/CA' % (gBaseRegistrySection, vo, server), '')
port = gConfig.getValue('%s/VO/%s/VOMSServers/%s/Port' % (gBaseRegistrySection, vo, server), 0)
vomsDict[vo]['Servers'][server]['DN'] = DN
vomsDict[vo]['Servers'][server]['CA'] = CA
vomsDict[vo]['Servers'][server]['Port'] = port
return S_OK(vomsDict)
def getVOMSRoleGroupMapping(vo=''):
""" Get mapping of the VOMS role to the DIRAC group
:param str vo: perform the operation for the given VO
:return: standard structure with two mappings: VOMS-DIRAC { <VOMS_Role>: [<DIRAC_Group>] }
and DIRAC-VOMS { <DIRAC_Group>: <VOMS_Role> } and a list of DIRAC groups without mapping
"""
result = getGroupsForVO(vo)
if not result['OK']:
return result
groupList = result['Value']
vomsGroupDict = {}
groupVomsDict = {}
noVOMSGroupList = []
noVOMSSyncGroupList = []
for group in groupList:
vomsRole = getGroupOption(group, 'VOMSRole')
if vomsRole:
vomsGroupDict.setdefault(vomsRole, [])
vomsGroupDict[vomsRole].append(group)
groupVomsDict[group] = vomsRole
syncVOMS = getGroupOption(group, 'AutoSyncVOMS', True)
if not syncVOMS:
noVOMSSyncGroupList.append(group)
for group in groupList:
if group not in groupVomsDict:
noVOMSGroupList.append(group)
return S_OK({"VOMSDIRAC": vomsGroupDict,
"DIRACVOMS": groupVomsDict,
"NoVOMS": noVOMSGroupList,
"NoSyncVOMS": noVOMSSyncGroupList})
def getUsernameForID(ID, usersList=False):
if not usersList:
retVal = gConfig.getSections("%s/Users" % gBaseRegistrySection)
if not retVal['OK']:
return retVal
usersList = retVal['Value']
for username in usersList:
if ID in gConfig.getValue("%s/Users/%s/ID" % (gBaseRegistrySection, username), []):
return S_OK(username)
return S_ERROR("No username found for ID %s" % ID)
def getCAForUsername(username):
dnList = gConfig.getValue("%s/Users/%s/CA" % (gBaseRegistrySection, username), [])
if dnList:
return S_OK(dnList)
return S_ERROR("No CA found for user %s" % username)
| gpl-3.0 |
petemounce/ansible | lib/ansible/modules/cloud/openstack/os_network.py | 22 | 8683 | #!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_network
short_description: Creates/removes networks from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or remove network from OpenStack.
options:
name:
description:
- Name to be assigned to the network.
required: true
shared:
description:
- Whether this network is shared or not.
required: false
default: false
admin_state_up:
description:
- Whether the state should be marked as up or down.
required: false
default: true
external:
description:
- Whether this network is externally accessible.
required: false
default: false
state:
description:
- Indicate desired state of the resource.
choices: ['present', 'absent']
required: false
default: present
provider_physical_network:
description:
- The physical network where this network object is implemented.
required: false
default: None
version_added: "2.1"
provider_network_type:
description:
- The type of physical network that maps to this network resource.
required: false
default: None
version_added: "2.1"
provider_segmentation_id:
description:
- An isolated segment on the physical network. The I(network_type)
attribute defines the segmentation model. For example, if the
I(network_type) value is vlan, this ID is a vlan identifier. If
the I(network_type) value is gre, this ID is a gre key.
required: false
default: None
version_added: "2.1"
project:
description:
- Project name or ID containing the network (name admin-only)
required: false
default: None
version_added: "2.1"
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements: ["shade"]
'''
EXAMPLES = '''
# Create an externally accessible network named 'ext_network'.
- os_network:
cloud: mycloud
state: present
name: ext_network
external: true
'''
RETURN = '''
network:
description: Dictionary describing the network.
returned: On success when I(state) is 'present'.
type: complex
contains:
id:
description: Network ID.
type: string
sample: "4bb4f9a5-3bd2-4562-bf6a-d17a6341bb56"
name:
description: Network name.
type: string
sample: "ext_network"
shared:
description: Indicates whether this network is shared across all tenants.
type: bool
sample: false
status:
description: Network status.
type: string
sample: "ACTIVE"
mtu:
description: The MTU of a network resource.
type: integer
sample: 0
admin_state_up:
description: The administrative state of the network.
type: bool
sample: true
port_security_enabled:
description: The port security status
type: bool
sample: true
router:external:
description: Indicates whether this network is externally accessible.
type: bool
sample: true
tenant_id:
description: The tenant ID.
type: string
sample: "06820f94b9f54b119636be2728d216fc"
subnets:
description: The associated subnets.
type: list
sample: []
"provider:physical_network":
description: The physical network where this network object is implemented.
type: string
sample: my_vlan_net
"provider:network_type":
description: The type of physical network that maps to this network resource.
type: string
sample: vlan
"provider:segmentation_id":
description: An isolated segment on the physical network.
type: string
sample: 101
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
shared=dict(default=False, type='bool'),
admin_state_up=dict(default=True, type='bool'),
external=dict(default=False, type='bool'),
provider_physical_network=dict(required=False),
provider_network_type=dict(required=False),
provider_segmentation_id=dict(required=False),
state=dict(default='present', choices=['absent', 'present']),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['project'] and
StrictVersion(shade.__version__) < StrictVersion('1.6.0')):
module.fail_json(msg="To utilize project, the installed version of"
"the shade library MUST be >=1.6.0")
state = module.params['state']
name = module.params['name']
shared = module.params['shared']
admin_state_up = module.params['admin_state_up']
external = module.params['external']
provider_physical_network = module.params['provider_physical_network']
provider_network_type = module.params['provider_network_type']
provider_segmentation_id = module.params['provider_segmentation_id']
project = module.params.pop('project')
try:
cloud = shade.openstack_cloud(**module.params)
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
net = cloud.get_network(name, filters=filters)
if state == 'present':
if not net:
provider = {}
if provider_physical_network:
provider['physical_network'] = provider_physical_network
if provider_network_type:
provider['network_type'] = provider_network_type
if provider_segmentation_id:
provider['segmentation_id'] = provider_segmentation_id
if provider and StrictVersion(shade.__version__) < StrictVersion('1.5.0'):
module.fail_json(msg="Shade >= 1.5.0 required to use provider options")
if project_id is not None:
net = cloud.create_network(name, shared, admin_state_up,
external, provider, project_id)
else:
net = cloud.create_network(name, shared, admin_state_up,
external, provider)
changed = True
else:
changed = False
module.exit_json(changed=changed, network=net, id=net['id'])
elif state == 'absent':
if not net:
module.exit_json(changed=False)
else:
cloud.delete_network(name)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == "__main__":
main()
| gpl-3.0 |
0x1997/webassets | src/webassets/filter/rjsmin/rjsmin.py | 15 | 13044 | #!/usr/bin/env python
# -*- coding: ascii -*-
#
# Copyright 2011
# Andr\xe9 Malo or his licensors, as applicable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
=====================
Javascript Minifier
=====================
Javascript Minifier based on `jsmin.c by Douglas Crockford`_\.
This module is a re-implementation based on the semantics of jsmin.c. Usually
it produces the same results. It differs in the following ways:
- there is no error detection: unterminated string, regex and comment
literals are treated as regular javascript code and minified as such.
- Control characters inside string and regex literals are left untouched; they
are not converted to spaces (nor to \n)
- Newline characters are not allowed inside string and regex literals, except
for line continuations in string literals (ECMA-5).
- rjsmin does not handle streams, but only complete strings. (However, the
module provides a "streamy" interface).
Besides the list above it differs from direct python ports of jsmin.c in
speed. Since most parts of the logic are handled by the regex engine it's way
faster than the original python port by Baruch Even. The speed factor varies
between about 6 and 55 depending on input and python version (it gets faster
the more compressed the input already is). Compared to the speed-refactored
python port by Dave St.Germain the performance gain is less dramatic but still
between 1.2 and 7. See the docs/BENCHMARKS file for details.
rjsmin.c is a reimplementation of rjsmin.py in C and speeds it up even more.
Both python 2 and python 3 are supported.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
"""
__author__ = "Andr\xe9 Malo"
__author__ = getattr(__author__, 'decode', lambda x: __author__)('latin-1')
__docformat__ = "restructuredtext en"
__license__ = "Apache License, Version 2.0"
__version__ = '1.0.1'
__all__ = ['jsmin', 'jsmin_for_posers']
import re as _re
from webassets.six.moves import map
from webassets.six.moves import zip
def _make_jsmin(extended=True, python_only=False):
"""
Generate JS minifier based on `jsmin.c by Douglas Crockford`_
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`extended` : ``bool``
Extended Regexps? (using lookahead and lookbehind). This is faster,
because it can be optimized way more. The regexps used with `extended`
being false are only left here to allow easier porting to platforms
without extended regex features (and for my own reference...)
`python_only` : ``bool``
Use only the python variant. If true, the c extension is not even
tried to be loaded.
:Return: Minifier
:Rtype: ``callable``
"""
# pylint: disable = R0912, R0914, W0612
if not python_only:
try:
import _rjsmin
except ImportError:
pass
else:
return _rjsmin.jsmin
try:
xrange
except NameError:
xrange = range # pylint: disable = W0622
space_chars = r'[\000-\011\013\014\016-\040]'
line_comment = r'(?://[^\r\n]*)'
space_comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
string1 = \
r'(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)'
string2 = r'(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^"\\\r\n]*)*")'
strings = r'(?:%s|%s)' % (string1, string2)
charclass = r'(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\])'
nospecial = r'[^/\\\[\r\n]'
if extended:
regex = r'(?:/(?![\r\n/*])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)' % (
nospecial, charclass, nospecial
)
else:
regex = (
r'(?:/(?:[^*/\\\r\n\[]|%s|\\[^\r\n])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)'
)
regex = regex % (charclass, nospecial, charclass, nospecial)
pre_regex = r'[(,=:\[!&|?{};\r\n]'
space = r'(?:%s|%s)' % (space_chars, space_comment)
newline = r'(?:%s?[\r\n])' % line_comment
def fix_charclass(result):
""" Fixup string of chars to fit into a regex char class """
pos = result.find('-')
if pos >= 0:
result = r'%s%s-' % (result[:pos], result[pos + 1:])
def sequentize(string):
"""
Notate consecutive characters as sequence
(1-4 instead of 1234)
"""
first, last, result = None, None, []
for char in map(ord, string):
if last is None:
first = last = char
elif last + 1 == char:
last = char
else:
result.append((first, last))
first = last = char
if last is not None:
result.append((first, last))
return ''.join(['%s%s%s' % (
chr(first),
last > first + 1 and '-' or '',
last != first and chr(last) or ''
) for first, last in result])
return _re.sub(r'([\000-\040\047])', # for better portability
lambda m: '\\%03o' % ord(m.group(1)), (sequentize(result)
.replace('\\', '\\\\')
.replace('[', '\\[')
.replace(']', '\\]')
)
)
def id_literal_(what):
""" Make id_literal like char class """
match = _re.compile(what).match
result = ''.join([
chr(c) for c in range(127) if not match(chr(c))
])
return '[^%s]' % fix_charclass(result)
def not_id_literal_(keep):
""" Make negated id_literal like char class """
match = _re.compile(id_literal_(keep)).match
result = ''.join([
chr(c) for c in range(127) if not match(chr(c))
])
return r'[%s]' % fix_charclass(result)
if extended:
id_literal = id_literal_(r'[a-zA-Z0-9_$]')
id_literal_open = id_literal_(r'[a-zA-Z0-9_${\[(+-]')
id_literal_close = id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]')
space_sub = _re.compile((
r'([^\047"/\000-\040]+)'
r'|(%(strings)s[^\047"/\000-\040]*)'
r'|(?:(?<=%(pre_regex)s)%(space)s*(%(regex)s[^\047"/\000-\040]*))'
r'|(?<=%(id_literal_close)s)'
r'%(space)s*(?:(%(newline)s)%(space)s*)+'
r'(?=%(id_literal_open)s)'
r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)'
r'|%(space)s+'
r'|(?:%(newline)s%(space)s*)+'
) % locals()).sub
def space_subber(match):
""" Substitution callback """
# pylint: disable = C0321
groups = match.groups()
if groups[0]: return groups[0]
elif groups[1]: return groups[1]
elif groups[2]: return groups[2]
elif groups[3]: return '\n'
elif groups[4]: return ' '
return ''
def jsmin(script): # pylint: disable = W0621
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`script` : ``str``
Script to minify
:Return: Minified script
:Rtype: ``str``
"""
return space_sub(space_subber, '\n%s\n' % script).strip()
else:
not_id_literal = not_id_literal_(r'[a-zA-Z0-9_$]')
not_id_literal_open = not_id_literal_(r'[a-zA-Z0-9_${\[(+-]')
not_id_literal_close = not_id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]')
space_norm_sub = _re.compile((
r'(%(strings)s)'
r'|(?:(%(pre_regex)s)%(space)s*(%(regex)s))'
r'|(%(space)s)+'
r'|(?:(%(newline)s)%(space)s*)+'
) % locals()).sub
def space_norm_subber(match):
""" Substitution callback """
# pylint: disable = C0321
groups = match.groups()
if groups[0]: return groups[0]
elif groups[1]: return groups[1].replace('\r', '\n') + groups[2]
elif groups[3]: return ' '
elif groups[4]: return '\n'
space_sub1 = _re.compile((
r'[\040\n]?(%(strings)s|%(pre_regex)s%(regex)s)'
r'|\040(%(not_id_literal)s)'
r'|\n(%(not_id_literal_open)s)'
) % locals()).sub
def space_subber1(match):
""" Substitution callback """
groups = match.groups()
return groups[0] or groups[1] or groups[2]
space_sub2 = _re.compile((
r'(%(strings)s)\040?'
r'|(%(pre_regex)s%(regex)s)[\040\n]?'
r'|(%(not_id_literal)s)\040'
r'|(%(not_id_literal_close)s)\n'
) % locals()).sub
def space_subber2(match):
""" Substitution callback """
groups = match.groups()
return groups[0] or groups[1] or groups[2] or groups[3]
def jsmin(script):
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach. The script is minified with three passes:
normalization
Control character are mapped to spaces, spaces and newlines
are squeezed and comments are stripped.
space removal 1
Spaces before certain tokens are removed
space removal 2
Spaces after certain tokens are remove
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`script` : ``str``
Script to minify
:Return: Minified script
:Rtype: ``str``
"""
return space_sub2(space_subber2,
space_sub1(space_subber1,
space_norm_sub(space_norm_subber, '\n%s\n' % script)
)
).strip()
return jsmin
jsmin = _make_jsmin()
def jsmin_for_posers(script):
r"""
Minify javascript based on `jsmin.c by Douglas Crockford`_\.
Instead of parsing the stream char by char, it uses a regular
expression approach which minifies the whole script with one big
substitution regex.
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Warning: This function is the digest of a _make_jsmin() call. It just
utilizes the resulting regex. It's just for fun here and may
vanish any time. Use the `jsmin` function instead.
:Parameters:
`script` : ``str``
Script to minify
:Return: Minified script
:Rtype: ``str``
"""
def subber(match):
""" Substitution callback """
groups = match.groups()
return (
groups[0] or
groups[1] or
groups[2] or
(groups[3] and '\n') or
(groups[4] and ' ') or
''
)
return _re.sub(
r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?'
r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|'
r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]'
r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/'
r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*'
r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*'
r'))|(?<=[^\000-!#%&(*,./:-@\[\\^`{|~])(?:[\000-\011\013\014\016-\04'
r'0]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n'
r']))(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)'
r'*/))*)+(?=[^\000-#%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-'
r'^`{-~-])((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*'
r']*\*+)*/)))+(?=[^\000-#%-,./:-@\[-^`{-~-])|(?:[\000-\011\013\014\0'
r'16-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:(?:(?://[^\r\n]*)'
r'?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]'
r'*\*+)*/))*)+', subber, '\n%s\n' % script
).strip()
if __name__ == '__main__':
import sys as _sys
_sys.stdout.write(jsmin(_sys.stdin.read()))
| bsd-2-clause |
mark-ignacio/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreview_unittest.py | 122 | 2840 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.updatechangelogswithreviewer import UpdateChangeLogsWithReviewer
class UpdateChangeLogsWithReviewerTest(unittest.TestCase):
def test_guess_reviewer_from_bug(self):
capture = OutputCapture()
step = UpdateChangeLogsWithReviewer(MockTool(), MockOptions())
expected_logs = "No reviewed patches on bug 50001, cannot infer reviewer.\n"
capture.assert_outputs(self, step._guess_reviewer_from_bug, [50001], expected_logs=expected_logs)
def test_guess_reviewer_from_multipatch_bug(self):
capture = OutputCapture()
step = UpdateChangeLogsWithReviewer(MockTool(), MockOptions())
expected_logs = "Guessing \"Reviewer2\" as reviewer from attachment 10001 on bug 50000.\n"
capture.assert_outputs(self, step._guess_reviewer_from_bug, [50000], expected_logs=expected_logs)
def test_empty_state(self):
capture = OutputCapture()
options = MockOptions()
options.reviewer = 'MOCK reviewer'
options.git_commit = 'MOCK git commit'
step = UpdateChangeLogsWithReviewer(MockTool(), options)
capture.assert_outputs(self, step.run, [{}])
| bsd-3-clause |
AudioHumLab/FIRtro | bin/server_input.py | 1 | 5590 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Modulo interno para conectar las entradas al procesador,
también las conecta a posibles monitores.
"""
# v2.0:
# - Adaptación a FIRtro 2.0. Se devuelven valores Boolean para indicar
# el resultado del cambio de entrada a server_process
# - Se suprimen los sleep. (!) OJO si aparece un "error de cambio de input"
# puede ser debido un escenario de CPU% muy cargada afectando
# a las operaciones jack.disconnect/connect. Para debug se usa logging.
# Es posible insertar delays en el alias jack_connect/disconnect de abajo.
#
#
# v2.1c
# - Revisión del pausado opcional de fuentes para ahorro de CPU
# - Rev de comentarios del código
#
# v2.1d
# - Se reordena el código para legibilidad
# - Se deja de intervenir aquí en los players integrados (radio, mpd, etc),
# se recurre al nuevo módulo players_integrated
# v2.1d2
# - Logging sobre $USER/tmp
#
# v2.1f
# - players_integrated.py renombrado players.py
# módulos genéricos
import os, sys, getpass
from time import sleep
import jack
# módulos de FIRtro
HOME = os.path.expanduser("~")
sys.path.append(HOME + "/bin")
from getconfig import *
# FIRtro2: puertos de monitores de la señal (los convertimos a lista)
ext_monitor_ports = jack_external_monitors.split()
int_monitor_ports = jack_internal_monitors.split()
# FIRtro2: gestiona sound cards adicionales resampleadas en jack
import soundcards as sc
# FIRtro2: gestiona los players integrados en un módulo separado
import players
# LOGGING para el DEBUG de excepciones
# https://julien.danjou.info/blog/2016/python-exceptions-guide
# https://docs.python.org/3/howto/logging.html#logging-basic-tutorial
import logging
# os.getlogin() con login remoto puede ocurrir "OSError: [Errno 25] Inappropriate ioctl for device",
# también puede fallar al arranque de la máquina por ser non login
try:
usuario = os.getlogin()
except:
#usuario = 'firtro'
usuario = getpass.getuser()
logFile = '/home/' + usuario + '/tmp/server_input.log'
if os.path.isfile(logFile):
os.remove(logFile)
logging.basicConfig(filename=logFile, level=logging.ERROR)
# Alias con retardos experimentales (normalmente no usados) para operaciones jack con/disconnect
def jack_connect(p1, p2):
jack.connect(p1, p2)
#sleep(.1)
def jack_disconnect(p1, p2):
jack.disconnect(p1, p2)
#sleep(.2)
def desconecta_fuentes_de(out_ports):
""" Desconectamos todos los clientes de la entrada del FIRtro y de los monitores
"""
sources_L_firtro = jack.get_connections(out_ports[0])
sources_R_firtro = jack.get_connections(out_ports[1])
for source in sources_L_firtro:
jack_disconnect(source, out_ports[0])
for source in sources_R_firtro:
jack_disconnect(source, out_ports[1])
# Los monitores son opcionales
try:
if ext_monitor_ports:
sources_L_extMon = jack.get_connections(ext_monitor_ports[0])
sources_R_extMon = jack.get_connections(ext_monitor_ports[1])
for source in sources_L_extMon: jack_disconnect(source, ext_monitor_ports[0])
for source in sources_R_extMon: jack_disconnect(source, ext_monitor_ports[1])
sources_L_intMon = jack.get_connections(int_monitor_ports[0])
sources_R_intMon = jack.get_connections(int_monitor_ports[1])
for source in sources_L_intMon: jack_disconnect(source, int_monitor_ports[0])
for source in sources_R_intMon: jack_disconnect(source, int_monitor_ports[1])
except:
logging.exception("error en desconexion de monitores")
# Funcion original de FIRtro1 levemente modificada con
# puertos de monitoreo y resincronización de posibles tarjetas externas
def change_input(input_name, in_ports, out_ports, resampled="no"):
# 'in_ports': Lista [L,R] de los puertos capture de jack de la fuente elegida
# 'out_ports': Lista de los puertos de la variable 'firtro_ports' que se resuelve
# en server_process en función de si se usa Brutefir/Ecasound
# FIRtro2: evaluamos si la entrada requiere resampling por estar en una tarjeta adicional
if resampled <> "no":
sc.external_card_resync(in_ports, resampled)
# CONMUTADOR, como el original de FIRtro1.
try:
jack.attach("tmp")
# Desconectamos todo lo que hubiera en la entrada de FIRtro (y en los posibles monitores):
desconecta_fuentes_de(out_ports)
# Posibles PAUSAS opcionales en los PLAYERS INTEGRADOS (ahorro de %CPU):
players.manage_pauses(input_name)
# Y ahora conectamos la entrada deseada a FIRtro y a los monitores:
for i in range(len(in_ports)):
# Entradas a FIRtro
try:
jack_connect(in_ports[i], out_ports[i])
except:
logging.exception("error conectando " + in_ports[i] + " <> " + out_ports[i])
# Los monitores opcionales:
try:
if ext_monitor_ports:
jack_connect(in_ports[i], ext_monitor_ports[i])
jack_connect(in_ports[i], int_monitor_ports[i])
except:
logging.exception("error en conexion de monitores")
jack.detach()
except:
# Si hay alguna excepción devolvemos boolean False
logging.exception("error en cuerpo ppal")
jack.detach()
print "(server_input) Problemas (ver ~/tmp/server_input.log)"
return False
# Y si todo ha ido bien, devolvemos el boolean True
return True
if __name__ == "__main__":
print __doc__
| gpl-3.0 |
google/edward2 | experimental/attentive_uncertainty/generalized_neural_process.py | 1 | 11726 | # coding=utf-8
# Copyright 2021 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Regression model for generalized neural processes.
"""
import edward2 as ed
from experimental.attentive_uncertainty import attention # local file import
from experimental.attentive_uncertainty import layers # local file import
from experimental.attentive_uncertainty import utils # local file import
import tensorflow.compat.v1 as tf
eps = tf.keras.backend.epsilon()
class Regressor(tf.keras.Model):
r"""Generalized neural process regressor.
A generalized neural process (GNP) expresses the following generative process
```
z ~ p(z | global_latent_layer(C))
zi ~ p(zi | local_latent_layer(z, xi, C))
yi ~ p(yi | decoder(z, zi, xi, C))
```
Maximizing the marginal likelihood is intractable and SNPs maximize the
evidence lower bound obtained via the following variational distributions
```
z ~ q(z | global_latent_layer(T))
zi ~ q(zi | local_latent_layer(z, xi, T))
```
Note that the global_latent_net and local_latent_net parameters are shared.
Different instantiations of GNP differ in the particular forms of
conditioning they use; in particular, what ancestors to condition on and how
to condition (via attention or not).
"""
def __init__(self,
input_dim,
output_dim,
x_encoder_net_sizes=None,
x_y_encoder_net_sizes=None,
heteroskedastic_net_sizes=None,
global_latent_net_sizes=None,
local_latent_net_sizes=None,
decoder_net_sizes=None,
att_type='multihead',
att_heads=8,
model_type='fully_connected',
activation=tf.nn.relu,
output_activation=None,
model_path=None,
data_uncertainty=True,
beta=1.,
temperature=1.):
"""Initializes the generalized neural process regressor.
D below denotes:
- Context dataset C during decoding phase
- Target dataset T during encoding phase
Args:
input_dim: (int) Dimensionality of covariates x.
output_dim: (int) Dimensionality of labels y.
x_encoder_net_sizes: (list of ints) Hidden layer sizes for network
featurizing x.
x_y_encoder_net_sizes: (list of ints) Hidden layer sizes for network
featurizing D.
heteroskedastic_net_sizes: (list of ints) Hidden layer sizes for network
that maps x to heteroskedastic variance.
global_latent_net_sizes: (list of ints) Hidden layer sizes for network
that maps D to mean and variance of predictive p(z | D).
local_latent_net_sizes: (list of ints) Hidden layer sizes for network
that maps xi, z, D to mean and variance of predictive p(zi | z, xi, D).
decoder_net_sizes: (list of ints) Hidden layer sizes for network that maps
xi, z, zi, D to mean and variance of predictive p(yi | z, zi, xi, D).
att_type: (string) Attention type for freeform attention.
att_heads: (int) Number of heads in case att_type='multihead'.
model_type: (string) One of 'fully_connected', 'cnp', 'acnp', 'acns',
'np', 'anp'.
activation: (callable) Non-linearity used for all neural networks.
output_activation: (callable) Non-linearity for predictive mean.
model_path: (string) File path for best early-stopped model.
data_uncertainty: (boolean) True if data uncertainty is explicit.
beta: (float) Scaling factor for global kl loss.
temperature: (float) Inverse scaling factor for temperature.
Raises:
ValueError: If model_type is unrecognized.
"""
if (model_type not in
['np', 'anp', 'acns', 'fully_connected', 'cnp', 'acnp']):
raise ValueError('Unrecognized model type: %s'% model_type)
super(Regressor, self).__init__()
self._input_dim = input_dim
self._output_dim = output_dim
self.model_type = model_type
self._output_activation = output_activation
self._data_uncertainty = data_uncertainty
self.beta = tf.constant(beta)
self.temperature = temperature
self._global_latent_layer = None
self._local_latent_layer = None
self._decoder_layer = None
self._dataset_encoding_layer = None
self._x_encoder = None
self._heteroskedastic_net = None
self._homoskedastic_net = None
contains_global = ['np', 'anp', 'acns', 'fully_connected']
contains_local = ['acns', 'fully_connected']
x_dim = input_dim
if x_encoder_net_sizes is not None:
self._x_encoder = utils.mlp_block(
input_dim,
x_encoder_net_sizes,
activation)
x_dim = x_encoder_net_sizes[-1]
x_y_net = None
self_dataset_attention = None
if x_y_encoder_net_sizes is not None:
x_y_net = utils.mlp_block(
x_dim + output_dim,
x_y_encoder_net_sizes,
activation)
dataset_encoding_dim = x_y_encoder_net_sizes[-1]
else:
# Use self-attention.
dataset_encoding_dim = x_dim + output_dim
self_dataset_attention = attention.AttentionLayer(
att_type=att_type, num_heads=att_heads)
self_dataset_attention.build([x_dim, x_dim])
self._dataset_encoding_layer = layers.DatasetEncodingLayer(
x_y_net,
self_dataset_attention)
self._cross_dataset_attention = attention.AttentionLayer(
att_type=att_type, num_heads=att_heads, scale=self.temperature)
self._cross_dataset_attention.build([x_dim, dataset_encoding_dim])
if model_type in contains_global:
global_latent_net = utils.mlp_block(
dataset_encoding_dim,
global_latent_net_sizes,
activation)
self._global_latent_layer = layers.GlobalLatentLayer(global_latent_net)
global_latent_dim = global_latent_net_sizes[-1]//2
if model_type in contains_local:
local_input_dim = global_latent_dim + dataset_encoding_dim
local_latent_net = utils.mlp_block(
local_input_dim,
local_latent_net_sizes,
activation)
self._local_latent_layer = layers.LocalLatentLayer(local_latent_net)
local_latent_dim = local_latent_net_sizes[-1]//2
separate_prior_net = (model_type != 'fully_connected')
if separate_prior_net:
local_latent_net = utils.mlp_block(
global_latent_dim,
local_latent_net_sizes,
activation)
self._prior_local_latent_layer = layers.LocalLatentLayer(
local_latent_net)
else:
self._prior_local_latent_layer = self._local_latent_layer
if decoder_net_sizes is not None:
decoder_input_dim = x_dim
if model_type == 'cnp' or model_type == 'acnp': # depend on C
decoder_input_dim += dataset_encoding_dim
elif model_type == 'np': # depend on z
decoder_input_dim += global_latent_dim
elif model_type == 'anp': # depend on z, C
decoder_input_dim += dataset_encoding_dim + global_latent_dim
elif model_type == 'acns':
decoder_input_dim += dataset_encoding_dim + local_latent_dim
elif model_type == 'fully_connected':
decoder_input_dim += (dataset_encoding_dim + global_latent_dim
+ local_latent_dim)
decoder_net = utils.mlp_block(
decoder_input_dim,
decoder_net_sizes,
activation)
self._decoder_layer = layers.DecoderLayer(
decoder_net,
model_type,
output_activation)
if data_uncertainty:
if heteroskedastic_net_sizes is not None:
self._heteroskedastic_net = utils.mlp_block(
x_dim,
heteroskedastic_net_sizes,
activation)
else:
self._homoskedastic_net = layers.DataNoise()
self._homoskedastic_net.build(None)
if model_path:
self.load_weights(model_path)
def call(self, context_x, context_y, target_x, target_y=None):
if self._x_encoder is not None:
context_x = self._x_encoder(context_x)
target_x = self._x_encoder(target_x)
if self._data_uncertainty:
if self._heteroskedastic_net is None:
data_var = tf.nn.softplus(self._homoskedastic_net(None))
else:
data_var = tf.nn.softplus(self._heteroskedastic_net(target_x))
else:
data_var = 0.
context_x_y_encodings = self._dataset_encoding_layer(context_x, context_y)
if target_y is None:
target_x_y_encodings = context_x_y_encodings
else:
target_x_y_encodings = self._dataset_encoding_layer(target_x, target_y)
avg_context_dataset_encodings = tf.reduce_mean(
context_x_y_encodings, axis=1, keepdims=True)
avg_target_dataset_encodings = tf.reduce_mean(
target_x_y_encodings, axis=1, keepdims=True)
global_z_prior = None
global_z_posterior = None
if self._global_latent_layer is not None:
global_z_prior = self._global_latent_layer(avg_context_dataset_encodings)
global_z_posterior = self._global_latent_layer(
avg_target_dataset_encodings)
global_z_kl = self.beta * global_z_posterior.distribution.kl_divergence(
global_z_prior.distribution)
else:
global_z_kl = tf.constant(0., shape=(1, 1))
self.add_loss(lambda: global_z_kl)
cross_attentive_encodings = None
if self.model_type not in ['cnp', 'np']:
cross_attentive_encodings = self._cross_dataset_attention(
target_x, context_x, context_x_y_encodings)
posterior_x_y_encodings = None
prior_x_y_encodings = None
if self.model_type == 'fully_connected':
prior_x_y_encodings = cross_attentive_encodings
posterior_x_y_encodings = self._cross_dataset_attention(
target_x, target_x, target_x_y_encodings)
else:
posterior_x_y_encodings = target_x_y_encodings
if self.model_type == 'cnp':
cross_attentive_encodings = tf.tile(
avg_context_dataset_encodings,
[1, tf.shape(target_x)[1], 1])
local_z_prior = None
local_z_posterior = None
num_targets = tf.shape(target_x)[1]
if self._local_latent_layer is not None:
local_z_prior = self._prior_local_latent_layer(
global_z_prior,
num_targets,
prior_x_y_encodings)
if target_y is None:
local_z_posterior = local_z_prior
else:
local_z_posterior = self._local_latent_layer(
global_z_posterior,
num_targets,
posterior_x_y_encodings)
local_z_kl = local_z_posterior.distribution.kl_divergence(
local_z_prior.distribution)
else:
local_z_kl = tf.constant(0., shape=(1, 1, 1))
self.add_loss(lambda: local_z_kl)
predictive = self._decoder_layer(
target_x,
cross_attentive_encodings,
local_z_posterior,
global_z_posterior)
posterior_predictive_mean = predictive.distribution.mean()
posterior_predictive_std = tf.sqrt(
tf.square(predictive.distribution.stddev()) + data_var + eps)
posterior_predictive = ed.Normal(loc=posterior_predictive_mean,
scale=posterior_predictive_std)
return posterior_predictive
| apache-2.0 |
baohaojun/dico | app/python/wit/test.py | 1 | 1907 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Sergey Poznyakoff
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import wiki2html
class TestMarkupParserBasic (unittest.TestCase):
def test_colon(self):
self.assert_(self.__test('colon'))
pass
def test_headings(self):
self.assert_(self.__test('headings'))
pass
def test_hz(self):
self.assert_(self.__test('hz'))
pass
def test_numlist(self):
self.assert_(self.__test('numlist'))
pass
def test_unlist(self):
self.assert_(self.__test('unlist'))
pass
def test_door(self):
self.assert_(self.__test('door'))
pass
def test_drzwi(self):
self.assert_(self.__test('drzwi'))
pass
def __test(self, filename):
name_in = 'testdata/' + filename + '.wiki'
name_out = 'testdata/' + filename + '.html'
fh = open(name_out)
buf = ''.join(fh.readlines()).strip()
hwm = wiki2html.HtmlWiktionaryMarkup(filename=name_in, lang="pl")
hwm.parse()
if str(hwm).strip() == buf:
return True
# fail
print "\n>>>%s<<<" % buf
print ">>>%s<<<" % str(hwm).strip()
return False
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
40223134/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/urllib/parse.py | 735 | 35170 | """Parse (absolute and relative) URLs.
urlparse module is based upon the following RFC specifications.
RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
and L. Masinter, January 2005.
RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
and L.Masinter, December 1999.
RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T.
Berners-Lee, R. Fielding, and L. Masinter, August 1998.
RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998.
RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
1995.
RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
McCahill, December 1994
RFC 3986 is considered the current standard and any future changes to
urlparse module should conform with it. The urlparse module is
currently not entirely compliant with this RFC due to defacto
scenarios for parsing, and for backward compatibility purposes, some
parsing quirks from older RFCs are retained. The testcases in
test_urlparse.py provides a good indicator of parsing behavior.
"""
import re
import sys
import collections
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "urlencode", "parse_qs",
"parse_qsl", "quote", "quote_plus", "quote_from_bytes",
"unquote", "unquote_plus", "unquote_to_bytes"]
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
'wais', 'file', 'https', 'shttp', 'mms',
'prospero', 'rtsp', 'rtspu', '', 'sftp',
'svn', 'svn+ssh']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'mms', '', 'sftp', 'tel']
# These are not actually used anymore, but should stay for backwards
# compatibility. (They are undocumented, but have a public-looking name.)
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
# XXX: Consider replacing with functools.lru_cache
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache and the quoters cache."""
_parse_cache.clear()
_safe_quoters.clear()
# Helpers for bytes handling
# For 3.2, we deliberately require applications that
# handle improperly quoted URLs to do their own
# decoding and encoding. If valid use cases are
# presented, we may relax this by using latin-1
# decoding internally for 3.3
_implicit_encoding = 'ascii'
_implicit_errors = 'strict'
def _noop(obj):
return obj
def _encode_result(obj, encoding=_implicit_encoding,
errors=_implicit_errors):
return obj.encode(encoding, errors)
def _decode_args(args, encoding=_implicit_encoding,
errors=_implicit_errors):
return tuple(x.decode(encoding, errors) if x else '' for x in args)
def _coerce_args(*args):
# Invokes decode if necessary to create str args
# and returns the coerced inputs along with
# an appropriate result coercion function
# - noop for str inputs
# - encoding function otherwise
str_input = isinstance(args[0], str)
for arg in args[1:]:
# We special-case the empty string to support the
# "scheme=''" default argument to some functions
if arg and isinstance(arg, str) != str_input:
raise TypeError("Cannot mix str and non-str arguments")
if str_input:
return args + (_noop,)
return _decode_args(args) + (_encode_result,)
# Result objects are more helpful than simple tuples
class _ResultMixinStr(object):
"""Standard approach to encoding parsed results from str to bytes"""
__slots__ = ()
def encode(self, encoding='ascii', errors='strict'):
return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self))
class _ResultMixinBytes(object):
"""Standard approach to decoding parsed results from bytes to str"""
__slots__ = ()
def decode(self, encoding='ascii', errors='strict'):
return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self))
class _NetlocResultMixinBase(object):
"""Shared methods for the parsed result objects containing a netloc element"""
__slots__ = ()
@property
def username(self):
return self._userinfo[0]
@property
def password(self):
return self._userinfo[1]
@property
def hostname(self):
hostname = self._hostinfo[0]
if not hostname:
hostname = None
elif hostname is not None:
hostname = hostname.lower()
return hostname
@property
def port(self):
port = self._hostinfo[1]
if port is not None:
port = int(port, 10)
# Return None on an illegal port
if not ( 0 <= port <= 65535):
return None
return port
class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition('@')
if have_info:
username, have_password, password = userinfo.partition(':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition('@')
_, have_open_br, bracketed = hostinfo.partition('[')
if have_open_br:
hostname, _, port = bracketed.partition(']')
_, have_port, port = port.partition(':')
else:
hostname, have_port, port = hostinfo.partition(':')
if not have_port:
port = None
return hostname, port
class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition(b'@')
if have_info:
username, have_password, password = userinfo.partition(b':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition(b'@')
_, have_open_br, bracketed = hostinfo.partition(b'[')
if have_open_br:
hostname, _, port = bracketed.partition(b']')
_, have_port, port = port.partition(b':')
else:
hostname, have_port, port = hostinfo.partition(b':')
if not have_port:
port = None
return hostname, port
from collections import namedtuple
_DefragResultBase = namedtuple('DefragResult', 'url fragment')
_SplitResultBase = namedtuple('SplitResult', 'scheme netloc path query fragment')
_ParseResultBase = namedtuple('ParseResult', 'scheme netloc path params query fragment')
# For backwards compatibility, alias _NetlocResultMixinStr
# ResultBase is no longer part of the documented API, but it is
# retained since deprecating it isn't worth the hassle
ResultBase = _NetlocResultMixinStr
# Structured result objects for string data
class DefragResult(_DefragResultBase, _ResultMixinStr):
__slots__ = ()
def geturl(self):
if self.fragment:
return self.url + '#' + self.fragment
else:
return self.url
class SplitResult(_SplitResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResult(_ParseResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunparse(self)
# Structured result objects for bytes data
class DefragResultBytes(_DefragResultBase, _ResultMixinBytes):
__slots__ = ()
def geturl(self):
if self.fragment:
return self.url + b'#' + self.fragment
else:
return self.url
class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunparse(self)
# Set up the encode/decode result pairs
def _fix_result_transcoding():
_result_pairs = (
(DefragResult, DefragResultBytes),
(SplitResult, SplitResultBytes),
(ParseResult, ParseResultBytes),
)
for _decoded, _encoded in _result_pairs:
_decoded._encoded_counterpart = _encoded
_encoded._decoded_counterpart = _decoded
_fix_result_transcoding()
del _fix_result_transcoding
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i+1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return _coerce_result(cached)
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
for c in url[:i]:
if c not in scheme_chars:
break
else:
# make sure "url" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i+1:]
if not rest or any(c not in '0123456789' for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
def urlunparse(components):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment, _coerce_result = (
_coerce_args(*components))
if params:
url = "%s;%s" % (url, params)
return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment)))
def urlunsplit(components):
"""Combine the elements of a tuple as returned by urlsplit() into a
complete URL as a string. The data argument can be any five-item iterable.
This may result in a slightly different, but equivalent URL, if the URL that
was parsed originally had unnecessary delimiters (for example, a ? with an
empty query; the RFC states that these are equivalent)."""
scheme, netloc, url, query, fragment, _coerce_result = (
_coerce_args(*components))
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return _coerce_result(url)
def urljoin(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter."""
if not base:
return url
if not url:
return base
base, url, _coerce_result = _coerce_args(base, url)
bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
urlparse(base, '', allow_fragments)
scheme, netloc, path, params, query, fragment = \
urlparse(url, bscheme, allow_fragments)
if scheme != bscheme or scheme not in uses_relative:
return _coerce_result(url)
if scheme in uses_netloc:
if netloc:
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
netloc = bnetloc
if path[:1] == '/':
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
if not path and not params:
path = bpath
params = bparams
if not query:
query = bquery
return _coerce_result(urlunparse((scheme, netloc, path,
params, query, fragment)))
segments = bpath.split('/')[:-1] + path.split('/')
# XXX The stuff below is bogus in various ways...
if segments[-1] == '.':
segments[-1] = ''
while '.' in segments:
segments.remove('.')
while 1:
i = 1
n = len(segments) - 1
while i < n:
if (segments[i] == '..'
and segments[i-1] not in ('', '..')):
del segments[i-1:i+1]
break
i = i+1
else:
break
if segments == ['', '..']:
segments[-1] = ''
elif len(segments) >= 2 and segments[-1] == '..':
segments[-2:] = ['']
return _coerce_result(urlunparse((scheme, netloc, '/'.join(segments),
params, query, fragment)))
def urldefrag(url):
"""Removes any existing fragment from URL.
Returns a tuple of the defragmented URL and the fragment. If
the URL contained no fragments, the second element is the
empty string.
"""
url, _coerce_result = _coerce_args(url)
if '#' in url:
s, n, p, a, q, frag = urlparse(url)
defrag = urlunparse((s, n, p, a, q, ''))
else:
frag = ''
defrag = url
return _coerce_result(DefragResult(defrag, frag))
_hexdig = '0123456789ABCDEFabcdef'
_hextobyte = {(a + b).encode(): bytes([int(a + b, 16)])
for a in _hexdig for b in _hexdig}
def unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not string:
# Is it a string-like object?
string.split
return b''
if isinstance(string, str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
return string
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(_hextobyte[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
return b''.join(res)
_asciire = re.compile('([\x00-\x7f]+)')
def unquote(string, encoding='utf-8', errors='replace'):
"""Replace %xx escapes by their single-character equivalent. The optional
encoding and errors parameters specify how to decode percent-encoded
sequences into Unicode characters, as accepted by the bytes.decode()
method.
By default, percent-encoded sequences are decoded with UTF-8, and invalid
sequences are replaced by a placeholder character.
unquote('abc%20def') -> 'abc def'.
"""
if '%' not in string:
string.split
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _asciire.split(string)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(unquote_to_bytes(bits[i]).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
"""
parsed_result = {}
pairs = parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
def parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
Returns a list, as G-d intended.
"""
qs, _coerce_result = _coerce_args(qs)
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def unquote_plus(string, encoding='utf-8', errors='replace'):
"""Like unquote(), but also replace plus signs by spaces, as required for
unquoting HTML form values.
unquote_plus('%7e/abc+def') -> '~/abc def'
"""
string = string.replace('+', ' ')
return unquote(string, encoding, errors)
_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'abcdefghijklmnopqrstuvwxyz'
b'0123456789'
b'_.-')
_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE)
_safe_quoters = {}
class Quoter(collections.defaultdict):
"""A mapping from bytes (in range(0,256)) to strings.
String values are percent-encoded byte values, unless the key < 128, and
in the "safe" set (either the specified safe set, or default set).
"""
# Keeps a cache internally, using defaultdict, for efficiency (lookups
# of cached keys don't call Python code at all).
def __init__(self, safe):
"""safe: bytes object."""
self.safe = _ALWAYS_SAFE.union(safe)
def __repr__(self):
# Without this, will just display as a defaultdict
return "<Quoter %r>" % dict(self)
def __missing__(self, b):
# Handle a cache miss. Store quoted string in cache and return.
res = chr(b) if b in self.safe else '%{:02X}'.format(b)
self[b] = res
return res
def quote(string, safe='/', encoding=None, errors=None):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
string and safe may be either str or bytes objects. encoding must
not be specified if string is a str.
The optional encoding and errors parameters specify how to deal with
non-ASCII characters, as accepted by the str.encode method.
By default, encoding='utf-8' (characters are encoded with UTF-8), and
errors='strict' (unsupported characters raise a UnicodeEncodeError).
"""
if isinstance(string, str):
if not string:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'strict'
string = string.encode(encoding, errors)
else:
if encoding is not None:
raise TypeError("quote() doesn't support 'encoding' for bytes")
if errors is not None:
raise TypeError("quote() doesn't support 'errors' for bytes")
return quote_from_bytes(string, safe)
def quote_plus(string, safe='', encoding=None, errors=None):
"""Like quote(), but also replace ' ' with '+', as required for quoting
HTML form values. Plus signs in the original string are escaped unless
they are included in safe. It also does not have safe default to '/'.
"""
# Check if ' ' in string, where string may either be a str or bytes. If
# there are no spaces, the regular quote will produce the right answer.
if ((isinstance(string, str) and ' ' not in string) or
(isinstance(string, bytes) and b' ' not in string)):
return quote(string, safe, encoding, errors)
if isinstance(safe, str):
space = ' '
else:
space = b' '
string = quote(string, safe + space, encoding, errors)
return string.replace(' ', '+')
def quote_from_bytes(bs, safe='/'):
"""Like quote(), but accepts a bytes object rather than a str, and does
not perform string-to-bytes encoding. It always returns an ASCII string.
quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f'
"""
if not isinstance(bs, (bytes, bytearray)):
raise TypeError("quote_from_bytes() expected bytes")
if not bs:
return ''
if isinstance(safe, str):
# Normalize 'safe' by converting to bytes and removing non-ASCII chars
safe = safe.encode('ascii', 'ignore')
else:
safe = bytes([c for c in safe if c < 128])
if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe):
return bs.decode()
try:
quoter = _safe_quoters[safe]
except KeyError:
_safe_quoters[safe] = quoter = Quoter(safe).__getitem__
return ''.join([quoter(char) for char in bs])
def urlencode(query, doseq=False, safe='', encoding=None, errors=None):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The query arg may be either a string or a bytes type. When query arg is a
string, the safe, encoding and error parameters are sent the quote_plus for
encoding.
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError("not a valid non-string sequence "
"or mapping object").with_traceback(tb)
l = []
if not doseq:
for k, v in query:
if isinstance(k, bytes):
k = quote_plus(k, safe)
else:
k = quote_plus(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_plus(v, safe)
else:
v = quote_plus(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
for k, v in query:
if isinstance(k, bytes):
k = quote_plus(k, safe)
else:
k = quote_plus(str(k), safe, encoding, errors)
if isinstance(v, bytes):
v = quote_plus(v, safe)
l.append(k + '=' + v)
elif isinstance(v, str):
v = quote_plus(v, safe, encoding, errors)
l.append(k + '=' + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v), safe, encoding, errors)
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_plus(elt, safe)
else:
elt = quote_plus(str(elt), safe, encoding, errors)
l.append(k + '=' + elt)
return '&'.join(l)
# Utilities to parse URLs (most of these return None for missing parts):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
# splittype('type:opaquestring') --> 'type', 'opaquestring'
# splithost('//host[:port]/path') --> 'host[:port]', '/path'
# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
# splitpasswd('user:passwd') -> 'user', 'passwd'
# splitport('host:port') --> 'host', 'port'
# splitquery('/path?query') --> '/path', 'query'
# splittag('/path#tag') --> '/path', 'tag'
# splitattr('/path;attr1=value1;attr2=value2;...') ->
# '/path', ['attr1=value1', 'attr2=value2', ...]
# splitvalue('attr=value') --> 'attr', 'value'
# urllib.parse.unquote('abc%20def') -> 'abc def'
# quote('abc def') -> 'abc%20def')
def to_bytes(url):
"""to_bytes(u"URL") --> 'URL'."""
# Most URL schemes require ASCII. If that changes, the conversion
# can be relaxed.
# XXX get rid of to_bytes()
if isinstance(url, str):
try:
url = url.encode("ASCII").decode()
except UnicodeError:
raise UnicodeError("URL " + repr(url) +
" contains non-ASCII characters")
return url
def unwrap(url):
"""unwrap('<URL:type://host/path>') --> 'type://host/path'."""
url = str(url).strip()
if url[:1] == '<' and url[-1:] == '>':
url = url[1:-1].strip()
if url[:4] == 'URL:': url = url[4:].strip()
return url
_typeprog = None
def splittype(url):
"""splittype('type:opaquestring') --> 'type', 'opaquestring'."""
global _typeprog
if _typeprog is None:
import re
_typeprog = re.compile('^([^/:]+):')
match = _typeprog.match(url)
if match:
scheme = match.group(1)
return scheme.lower(), url[len(scheme) + 1:]
return None, url
_hostprog = None
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
import re
_hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match:
host_port = match.group(1)
path = match.group(2)
if path and not path.startswith('/'):
path = '/' + path
return host_port, path
return None, url
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
_passwdprog = None
def splitpasswd(user):
"""splitpasswd('user:passwd') -> 'user', 'passwd'."""
global _passwdprog
if _passwdprog is None:
import re
_passwdprog = re.compile('^([^:]*):(.*)$',re.S)
match = _passwdprog.match(user)
if match: return match.group(1, 2)
return user, None
# splittag('/path#tag') --> '/path', 'tag'
_portprog = None
def splitport(host):
"""splitport('host:port') --> 'host', 'port'."""
global _portprog
if _portprog is None:
import re
_portprog = re.compile('^(.*):([0-9]+)$')
match = _portprog.match(host)
if match: return match.group(1, 2)
return host, None
_nportprog = None
def splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
global _nportprog
if _nportprog is None:
import re
_nportprog = re.compile('^(.*):(.*)$')
match = _nportprog.match(host)
if match:
host, port = match.group(1, 2)
try:
if not port: raise ValueError("no digits")
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport
_queryprog = None
def splitquery(url):
"""splitquery('/path?query') --> '/path', 'query'."""
global _queryprog
if _queryprog is None:
import re
_queryprog = re.compile('^(.*)\?([^?]*)$')
match = _queryprog.match(url)
if match: return match.group(1, 2)
return url, None
_tagprog = None
def splittag(url):
"""splittag('/path#tag') --> '/path', 'tag'."""
global _tagprog
if _tagprog is None:
import re
_tagprog = re.compile('^(.*)#([^#]*)$')
match = _tagprog.match(url)
if match: return match.group(1, 2)
return url, None
def splitattr(url):
"""splitattr('/path;attr1=value1;attr2=value2;...') ->
'/path', ['attr1=value1', 'attr2=value2', ...]."""
words = url.split(';')
return words[0], words[1:]
_valueprog = None
def splitvalue(attr):
"""splitvalue('attr=value') --> 'attr', 'value'."""
global _valueprog
if _valueprog is None:
import re
_valueprog = re.compile('^([^=]*)=(.*)$')
match = _valueprog.match(attr)
if match: return match.group(1, 2)
return attr, None
| gpl-3.0 |
GeekTrainer/Flask | Work/Trivia - Module 5/env/Lib/site-packages/pip/vcs/subversion.py | 473 | 10640 | import os
import re
from pip.backwardcompat import urlparse
from pip.index import Link
from pip.util import rmtree, display_path, call_subprocess
from pip.log import logger
from pip.vcs import vcs, VersionControl
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile('committed-rev="(\d+)"')
_svn_url_re = re.compile(r'URL: (.+)')
_svn_revision_re = re.compile(r'Revision: (.+)')
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
bundle_file = 'svn-checkout.txt'
guide = ('# This was an svn checkout; to make it a checkout again run:\n'
'svn checkout --force -r %(rev)s %(url)s .\n')
def get_info(self, location):
"""Returns (url, revision), where both are strings"""
assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location
output = call_subprocess(
[self.cmd, 'info', location], show_stdout=False, extra_environ={'LANG': 'C'})
match = _svn_url_re.search(output)
if not match:
logger.warn('Cannot determine URL of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return None, None
url = match.group(1).strip()
match = _svn_revision_re.search(output)
if not match:
logger.warn('Cannot determine revision of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return url, None
return url, match.group(1)
def parse_vcs_bundle_file(self, content):
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
match = re.search(r'^-r\s*([^ ])?', line)
if not match:
return None, None
rev = match.group(1)
rest = line[match.end():].strip().split(None, 1)[0]
return rest, rev
return None, None
def export(self, location):
"""Export the svn repository at the url to the destination location"""
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
logger.notify('Exporting svn repository %s to %s' % (url, location))
logger.indent += 2
try:
if os.path.exists(location):
# Subversion doesn't like to check out over an existing directory
# --force fixes this, but was only added in svn 1.5
rmtree(location)
call_subprocess(
[self.cmd, 'export'] + rev_options + [url, location],
filter_stdout=self._filter, show_stdout=False)
finally:
logger.indent -= 2
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'switch'] + rev_options + [url, dest])
def update(self, dest, rev_options):
call_subprocess(
[self.cmd, 'update'] + rev_options + [dest])
def obtain(self, dest):
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
if rev:
rev_display = ' (to revision %s)' % rev
else:
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Checking out %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options + [url, dest])
def get_location(self, dist, dependency_links):
for url in dependency_links:
egg_fragment = Link(url).egg_fragment
if not egg_fragment:
continue
if '-' in egg_fragment:
## FIXME: will this work when a package has - in the name?
key = '-'.join(egg_fragment.split('-')[:-1]).lower()
else:
key = egg_fragment
if key == dist.key:
return url.split('#', 1)[0]
return None
def get_revision(self, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if self.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(self.dirname)
entries_fn = os.path.join(base, self.dirname, 'entries')
if not os.path.exists(entries_fn):
## FIXME: should we warn?
continue
dirurl, localrev = self._get_svn_url_rev(base)
if base == location:
base_url = dirurl + '/' # save the root url
elif not dirurl or not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
def get_url_rev(self):
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev = super(Subversion, self).get_url_rev()
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev
def get_url(self, location):
# In cases where the source is in a subdirectory, not alongside setup.py
# we have to look up in the location until we find a real setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without finding setup.py
logger.warn("Could not find setup.py for directory %s (tried all parent directories)"
% orig_location)
return None
return self._get_svn_url_rev(location)[0]
def _get_svn_url_rev(self, location):
from pip.exceptions import InstallationError
f = open(os.path.join(location, self.dirname, 'entries'))
data = f.read()
f.close()
if data.startswith('8') or data.startswith('9') or data.startswith('10'):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
url = data[0][3]
revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError('Badly formatted data: %r' % data)
url = match.group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
else:
try:
# subversion >= 1.7
xml = call_subprocess([self.cmd, 'info', '--xml', location], show_stdout=False)
url = _svn_info_xml_url_re.search(xml).group(1)
revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)]
except InstallationError:
url, revs = None, []
if revs:
rev = max(revs)
else:
rev = 0
return url, rev
def get_tag_revs(self, svn_tag_url):
stdout = call_subprocess(
[self.cmd, 'ls', '-v', svn_tag_url], show_stdout=False)
results = []
for line in stdout.splitlines():
parts = line.split()
rev = int(parts[0])
tag = parts[-1].strip('/')
results.append((tag, rev))
return results
def find_tag_match(self, rev, tag_revs):
best_match_rev = None
best_tag = None
for tag, tag_rev in tag_revs:
if (tag_rev > rev and
(best_match_rev is None or best_match_rev > tag_rev)):
# FIXME: Is best_match > tag_rev really possible?
# or is it a sign something is wacky?
best_match_rev = tag_rev
best_tag = tag
return best_tag
def get_src_requirement(self, dist, location, find_tags=False):
repo = self.get_url(location)
if repo is None:
return None
parts = repo.split('/')
## FIXME: why not project name?
egg_project_name = dist.egg_name().split('-', 1)[0]
rev = self.get_revision(location)
if parts[-2] in ('tags', 'tag'):
# It's a tag, perfect!
full_egg_name = '%s-%s' % (egg_project_name, parts[-1])
elif parts[-2] in ('branches', 'branch'):
# It's a branch :(
full_egg_name = '%s-%s-r%s' % (dist.egg_name(), parts[-1], rev)
elif parts[-1] == 'trunk':
# Trunk :-/
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), rev)
if find_tags:
tag_url = '/'.join(parts[:-1]) + '/tags'
tag_revs = self.get_tag_revs(tag_url)
match = self.find_tag_match(rev, tag_revs)
if match:
logger.notify('trunk checkout %s seems to be equivalent to tag %s' % match)
repo = '%s/%s' % (tag_url, match)
full_egg_name = '%s-%s' % (egg_project_name, match)
else:
# Don't know what it is
logger.warn('svn URL does not fit normal structure (tags/branches/trunk): %s' % repo)
full_egg_name = '%s-dev_r%s' % (egg_project_name, rev)
return 'svn+%s@%s#egg=%s' % (repo, rev, full_egg_name)
def get_rev_options(url, rev):
if rev:
rev_options = ['-r', rev]
else:
rev_options = []
r = urlparse.urlsplit(url)
if hasattr(r, 'username'):
# >= Python-2.5
username, password = r.username, r.password
else:
netloc = r[1]
if '@' in netloc:
auth = netloc.split('@')[0]
if ':' in auth:
username, password = auth.split(':', 1)
else:
username, password = auth, None
else:
username, password = None, None
if username:
rev_options += ['--username', username]
if password:
rev_options += ['--password', password]
return rev_options
vcs.register(Subversion)
| apache-2.0 |
guettli/django | tests/validation/test_validators.py | 38 | 1428 | from __future__ import unicode_literals
from . import ValidationTestCase
from .models import ModelToValidate
class TestModelsWithValidators(ValidationTestCase):
def test_custom_validator_passes_for_correct_value(self):
mtv = ModelToValidate(number=10, name='Some Name', f_with_custom_validator=42,
f_with_iterable_of_validators=42)
self.assertIsNone(mtv.full_clean())
def test_custom_validator_raises_error_for_incorrect_value(self):
mtv = ModelToValidate(number=10, name='Some Name', f_with_custom_validator=12,
f_with_iterable_of_validators=42)
self.assertFailsValidation(mtv.full_clean, ['f_with_custom_validator'])
self.assertFieldFailsValidationWithMessage(
mtv.full_clean,
'f_with_custom_validator',
['This is not the answer to life, universe and everything!']
)
def test_field_validators_can_be_any_iterable(self):
mtv = ModelToValidate(number=10, name='Some Name', f_with_custom_validator=42,
f_with_iterable_of_validators=12)
self.assertFailsValidation(mtv.full_clean, ['f_with_iterable_of_validators'])
self.assertFieldFailsValidationWithMessage(
mtv.full_clean,
'f_with_iterable_of_validators',
['This is not the answer to life, universe and everything!']
)
| bsd-3-clause |
TechBK/horizon-dev | openstack_dashboard/dashboards/project/access_and_security/api_access/urls.py | 54 | 1185 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.access_and_security.\
api_access import views
urlpatterns = patterns(
'',
url(r'^ec2/$', views.download_ec2_bundle, name='ec2'),
url(r'^openrc/$', views.download_rc_file, name='openrc'),
url(r'^view_credentials/$', views.CredentialsView.as_view(),
name='view_credentials')
)
| apache-2.0 |
gitprouser/appengine-bottle-skeleton | lib/requests_toolbelt/auth/guess.py | 32 | 4944 | # -*- coding: utf-8 -*-
"""The module containing the code for GuessAuth."""
from requests import auth
from requests import cookies
from . import _digest_auth_compat as auth_compat, http_proxy_digest
class GuessAuth(auth.AuthBase):
"""Guesses the auth type by the WWW-Authentication header."""
def __init__(self, username, password):
self.username = username
self.password = password
self.auth = None
self.pos = None
def _handle_basic_auth_401(self, r, kwargs):
if self.pos is not None:
r.request.body.seek(self.pos)
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
if not hasattr(prep, '_cookies'):
prep._cookies = cookies.RequestsCookieJar()
cookies.extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
self.auth = auth.HTTPBasicAuth(self.username, self.password)
prep = self.auth(prep)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
def _handle_digest_auth_401(self, r, kwargs):
self.auth = auth_compat.HTTPDigestAuth(self.username, self.password)
try:
self.auth.init_per_thread_state()
except AttributeError:
# If we're not on requests 2.8.0+ this method does not exist and
# is not relevant.
pass
# Check that the attr exists because much older versions of requests
# set this attribute lazily. For example:
# https://github.com/kennethreitz/requests/blob/33735480f77891754304e7f13e3cdf83aaaa76aa/requests/auth.py#L59
if (hasattr(self.auth, 'num_401_calls') and
self.auth.num_401_calls is None):
self.auth.num_401_calls = 1
# Digest auth would resend the request by itself. We can take a
# shortcut here.
return self.auth.handle_401(r, **kwargs)
def handle_401(self, r, **kwargs):
"""Resends a request with auth headers, if needed."""
www_authenticate = r.headers.get('www-authenticate', '').lower()
if 'basic' in www_authenticate:
return self._handle_basic_auth_401(r, kwargs)
if 'digest' in www_authenticate:
return self._handle_digest_auth_401(r, kwargs)
def __call__(self, request):
if self.auth is not None:
return self.auth(request)
try:
self.pos = request.body.tell()
except AttributeError:
pass
request.register_hook('response', self.handle_401)
return request
class GuessProxyAuth(GuessAuth):
"""
Guesses the auth type by WWW-Authentication and Proxy-Authentication
headers
"""
def __init__(self, username=None, password=None,
proxy_username=None, proxy_password=None):
super(GuessProxyAuth, self).__init__(username, password)
self.proxy_username = proxy_username
self.proxy_password = proxy_password
self.proxy_auth = None
def _handle_basic_auth_407(self, r, kwargs):
if self.pos is not None:
r.request.body.seek(self.pos)
r.content
r.raw.release_conn()
prep = r.request.copy()
if not hasattr(prep, '_cookies'):
prep._cookies = cookies.RequestsCookieJar()
cookies.extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
self.proxy_auth = auth.HTTPProxyAuth(self.proxy_username,
self.proxy_password)
prep = self.proxy_auth(prep)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
def _handle_digest_auth_407(self, r, kwargs):
self.proxy_auth = http_proxy_digest.HTTPProxyDigestAuth(
username=self.proxy_username,
password=self.proxy_password)
try:
self.auth.init_per_thread_state()
except AttributeError:
pass
return self.proxy_auth.handle_407(r, **kwargs)
def handle_407(self, r, **kwargs):
proxy_authenticate = r.headers.get('Proxy-Authenticate', '').lower()
if 'basic' in proxy_authenticate:
return self._handle_basic_auth_407(r, kwargs)
if 'digest' in proxy_authenticate:
return self._handle_digest_auth_407(r, kwargs)
def __call__(self, request):
if self.proxy_auth is not None:
request = self.proxy_auth(request)
try:
self.pos = request.body.tell()
except AttributeError:
pass
request.register_hook('response', self.handle_407)
return super(GuessProxyAuth, self).__call__(request)
| apache-2.0 |
rcarauta/rcarauta-projects | node_modules/closure-util/.deps/library/87f29481f1e57fbba986e2304ac80d4173f94c9c/closure/bin/build/source.py | 74 | 3924 | # Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scans a source JS file for its provided and required namespaces.
Simple class to scan a JavaScript file and express its dependencies.
"""
__author__ = 'nnaze@google.com'
import codecs
import re
_BASE_REGEX_STRING = r'^\s*goog\.%s\(\s*[\'"](.+)[\'"]\s*\)'
_MODULE_REGEX = re.compile(_BASE_REGEX_STRING % 'module')
_PROVIDE_REGEX = re.compile(_BASE_REGEX_STRING % 'provide')
_REQUIRE_REGEX_STRING = (r'^\s*(?:(?:var|let|const)\s+[a-zA-Z_$][a-zA-Z0-9$_]*'
r'\s*=\s*)?goog\.require\(\s*[\'"](.+)[\'"]\s*\)')
_REQUIRES_REGEX = re.compile(_REQUIRE_REGEX_STRING)
class Source(object):
"""Scans a JavaScript source for its provided and required namespaces."""
# Matches a "/* ... */" comment.
# Note: We can't definitively distinguish a "/*" in a string literal without a
# state machine tokenizer. We'll assume that a line starting with whitespace
# and "/*" is a comment.
_COMMENT_REGEX = re.compile(
r"""
^\s* # Start of a new line and whitespace
/\* # Opening "/*"
.*? # Non greedy match of any characters (including newlines)
\*/ # Closing "*/""",
re.MULTILINE | re.DOTALL | re.VERBOSE)
def __init__(self, source):
"""Initialize a source.
Args:
source: str, The JavaScript source.
"""
self.provides = set()
self.requires = set()
self.is_goog_module = False
self._source = source
self._ScanSource()
def GetSource(self):
"""Get the source as a string."""
return self._source
@classmethod
def _StripComments(cls, source):
return cls._COMMENT_REGEX.sub('', source)
@classmethod
def _HasProvideGoogFlag(cls, source):
"""Determines whether the @provideGoog flag is in a comment."""
for comment_content in cls._COMMENT_REGEX.findall(source):
if '@provideGoog' in comment_content:
return True
return False
def _ScanSource(self):
"""Fill in provides and requires by scanning the source."""
stripped_source = self._StripComments(self.GetSource())
source_lines = stripped_source.splitlines()
for line in source_lines:
match = _PROVIDE_REGEX.match(line)
if match:
self.provides.add(match.group(1))
match = _MODULE_REGEX.match(line)
if match:
self.provides.add(match.group(1))
self.is_goog_module = True
match = _REQUIRES_REGEX.match(line)
if match:
self.requires.add(match.group(1))
# Closure's base file implicitly provides 'goog'.
# This is indicated with the @provideGoog flag.
if self._HasProvideGoogFlag(self.GetSource()):
if len(self.provides) or len(self.requires):
raise Exception(
'Base file should not provide or require namespaces.')
self.provides.add('goog')
def GetFileContents(path):
"""Get a file's contents as a string.
Args:
path: str, Path to file.
Returns:
str, Contents of file.
Raises:
IOError: An error occurred opening or reading the file.
"""
fileobj = None
try:
fileobj = codecs.open(path, encoding='utf-8-sig')
return fileobj.read()
except IOError as error:
raise IOError('An error occurred opening or reading the file: %s. %s'
% (path, error))
finally:
if fileobj is not None:
fileobj.close()
| mit |
eltoncarr/tubular | tubular/jenkins.py | 1 | 6697 | """
Methods to interact with the Jenkins API to perform various tasks.
"""
from __future__ import absolute_import
from __future__ import division
import logging
import math
import backoff
from jenkinsapi.jenkins import Jenkins
from jenkinsapi.custom_exceptions import JenkinsAPIException
from requests.exceptions import HTTPError
from tubular.exception import BackendError
LOG = logging.getLogger(__name__)
def _poll_giveup(data):
u""" Raise an error when the polling tries are exceeded."""
orig_args = data.get(u'args')
# The Build object was the only parameter to the original method call,
# and so it's the first and only item in the args.
build = orig_args[0]
msg = u'Timed out waiting for build {} to finish.'.format(build.name)
raise BackendError(msg)
def _backoff_timeout(timeout, base=2, factor=1):
u"""
Return a tuple of (wait_gen, max_tries) so that backoff will only try up to `timeout` seconds.
|timeout (s)|max attempts|wait durations |
|----------:|-----------:|---------------------:|
|1 |2 |1 |
|5 |4 |1, 2, 2 |
|10 |5 |1, 2, 4, 3 |
|30 |6 |1, 2, 4, 8, 13 |
|60 |8 |1, 2, 4, 8, 16, 32, 37|
|300 |10 |1, 2, 4, 8, 16, 32, 64|
| | |128, 44 |
|600 |11 |1, 2, 4, 8, 16, 32, 64|
| | |128, 256, 89 |
|3600 |13 |1, 2, 4, 8, 16, 32, 64|
| | |128, 256, 512, 1024, |
| | |1553 |
"""
# Total duration of sum(factor * base ** n for n in range(K)) = factor*(base**K - 1)/(base - 1),
# where K is the number of retries, or max_tries - 1 (since the first try doesn't require a wait)
#
# Solving for K, K = log(timeout * (base - 1) / factor + 1, base)
#
# Using the next smallest integer K will give us a number of elements from
# the exponential sequence to take and still be less than the timeout.
tries = int(math.log(timeout * (base - 1) / factor + 1, base))
remainder = timeout - (factor * (base ** tries - 1)) / (base - 1)
def expo():
u"""Compute an exponential backoff wait period, but capped to an expected max timeout"""
# pylint: disable=invalid-name
n = 0
while True:
a = factor * base ** n
if n >= tries:
yield remainder
else:
yield a
n += 1
# tries tells us the largest standard wait using the standard progression (before being capped)
# tries + 1 because backoff waits one fewer times than max_tries (the first attempt has no wait time).
# If a remainder, then we need to make one last attempt to get the target timeout (so tries + 2)
if remainder == 0:
return expo, tries + 1
else:
return expo, tries + 2
def trigger_build(base_url, user_name, user_token, job_name, job_token,
job_cause=None, job_params=None, timeout=60 * 30):
u"""
Trigger a jenkins job/project (note that jenkins uses these terms interchangeably)
Args:
base_url (str): The base URL for the jenkins server, e.g. https://test-jenkins.testeng.edx.org
user_name (str): The jenkins username
user_token (str): API token for the user. Available at {base_url}/user/{user_name)/configure
job_name (str): The Jenkins job name, e.g. test-project
job_token (str): Jobs must be configured with the option "Trigger builds remotely" selected.
Under this option, you must provide an authorization token (configured in the job)
in the form of a string so that only those who know it would be able to remotely
trigger this project's builds.
job_cause (str): Text that will be included in the recorded build cause
job_params (set of tuples): Parameter names and their values to pass to the job
timeout (int): The maximum number of seconds to wait for the jenkins build to complete (measured
from when the job is triggered.)
Returns:
A the status of the build that was triggered
Raises:
BackendError: if the Jenkins job could not be triggered successfully
"""
@backoff.on_predicate(
backoff.constant,
interval=60,
max_tries=timeout / 60 + 1,
on_giveup=_poll_giveup,
# We aren't worried about concurrent access, so turn off jitter
jitter=None,
)
def poll_build_for_result(build):
u"""
Poll for the build running, with exponential backoff, capped to ``timeout`` seconds.
The on_predicate decorator is used to retry when the return value
of the target function is True.
"""
return not build.is_running()
# Create a dict with key/value pairs from the job_params
# that were passed in like this: --param FOO bar --param BAZ biz
# These will get passed to the job as string parameters like this:
# {u'FOO': u'bar', u'BAX': u'biz'}
request_params = {}
for param in job_params:
request_params[param[0]] = param[1]
# Contact jenkins, log in, and get the base data on the system.
try:
jenkins = Jenkins(base_url, username=user_name, password=user_token)
except (JenkinsAPIException, HTTPError) as err:
raise BackendError(str(err))
if not jenkins.has_job(job_name):
msg = u'Job not found: {}.'.format(job_name)
msg += u' Verify that you have permissions for the job and double check the spelling of its name.'
raise BackendError(msg)
# This will start the job and will return a QueueItem object which can be used to get build results
job = jenkins[job_name]
queue_item = job.invoke(securitytoken=job_token, build_params=request_params, cause=job_cause)
LOG.info(u'Added item to jenkins. Server: {} Job: {} '.format(
jenkins.base_server_url(), queue_item
))
# Block this script until we are through the queue and the job has begun to build.
queue_item.block_until_building()
build = queue_item.get_build()
LOG.info(u'Created build {}'.format(build))
LOG.info(u'See {}'.format(build.baseurl))
# Now block until you get a result back from the build.
poll_build_for_result(build)
# Update the build's internal state, so that the final status is available
build.poll()
status = build.get_status()
LOG.info(u'Build status: {status}'.format(status=status))
return status
| agpl-3.0 |
mats116/ElasticBigQuery | fabfile.py | 4 | 2916 | """
REQUIREMENTS:
- install pip with distribute (http://packages.python.org/distribute/)
- sudo pip install Fabric
"""
from fabric.api import local
def lang(mode="extract"):
"""
REQUIREMENTS:
- Install before pip with distribute_setup.py (Read the environment setup document)
- sudo pip install babel
- sudo pip install jinja2
HOW TO RUN:
option 1) fab lang
option 2) fab lang:compile
"""
if mode == "compile":
local("pybabel compile -f -d ./locale")
else:
local("pybabel extract -F ./locale/babel.cfg -o ./locale/messages.pot ./ --sort-output --no-location --omit-header")
local("pybabel update -l cs_CZ -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l de_DE -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l en_US -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l es_ES -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l fr_FR -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l id_ID -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l it_IT -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l nl_NL -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l pt_BR -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l ru_RU -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l vi_VN -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l zh_CN -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
def start(mode="normal"):
"""
HOW TO RUN:
option 1) fab start
option 2) fab start:clear
"""
if mode == "clear":
local("dev_appserver.py ./ --host 0.0.0.0 --port 8002 --clear_datastore=yes")
else:
local("dev_appserver.py ./ --host 0.0.0.0 --port 8002")
def deploy():
"""
app.yaml never has to be version:default
"""
local("appcfg.py --oauth2 update .")
def test(os="mac"):
"""
REQUIREMENTS:
- install pip with distribute (http://packages.python.org/distribute/)
- sudo pip install mock
- sudo pip install webtest
- sudo pip install pyquery
HOW TO RUN:
option 1) fab test
option 2) fab test:mac
option 3) fab test:linux
"""
path = {
"mac": "/usr/local/google_appengine",
}[os]
local("python testrunner.py {0} ./".format(path)) | lgpl-3.0 |
cslzchen/osf.io | osf_tests/test_reviewable.py | 11 | 1303 | import mock
import pytest
from osf.models import Preprint
from osf.utils.workflows import DefaultStates
from osf_tests.factories import PreprintFactory, AuthUserFactory
@pytest.mark.django_db
class TestReviewable:
@mock.patch('website.identifiers.utils.request_identifiers')
def test_state_changes(self, _):
user = AuthUserFactory()
preprint = PreprintFactory(provider__reviews_workflow='pre-moderation', is_published=False)
assert preprint.machine_state == DefaultStates.INITIAL.value
preprint.run_submit(user)
assert preprint.machine_state == DefaultStates.PENDING.value
preprint.run_accept(user, 'comment')
assert preprint.machine_state == DefaultStates.ACCEPTED.value
from_db = Preprint.objects.get(id=preprint.id)
assert from_db.machine_state == DefaultStates.ACCEPTED.value
preprint.run_reject(user, 'comment')
assert preprint.machine_state == DefaultStates.REJECTED.value
from_db.refresh_from_db()
assert from_db.machine_state == DefaultStates.REJECTED.value
preprint.run_accept(user, 'comment')
assert preprint.machine_state == DefaultStates.ACCEPTED.value
from_db.refresh_from_db()
assert from_db.machine_state == DefaultStates.ACCEPTED.value
| apache-2.0 |
harshilasu/GraphicMelon | y/google-cloud-sdk/lib/googlecloudapis/compute/v1/compute_v1_messages.py | 5 | 211698 | """Generated message classes for compute version v1.
API for the Google Compute Engine service.
"""
from protorpc import messages
from googlecloudapis.apitools.base.py import encoding
package = 'compute'
class AccessConfig(messages.Message):
"""An access configuration attached to an instance's network interface.
Enums:
TypeValueValuesEnum: Type of configuration. Must be set to
"ONE_TO_ONE_NAT". This configures port-for-port NAT to the internet.
Fields:
kind: Type of the resource.
name: Name of this access configuration.
natIP: An external IP address associated with this instance. Specify an
unused static IP address available to the project. If not specified, the
external IP will be drawn from a shared ephemeral pool.
type: Type of configuration. Must be set to "ONE_TO_ONE_NAT". This
configures port-for-port NAT to the internet.
"""
class TypeValueValuesEnum(messages.Enum):
"""Type of configuration. Must be set to "ONE_TO_ONE_NAT". This configures
port-for-port NAT to the internet.
Values:
ONE_TO_ONE_NAT: <no description>
"""
ONE_TO_ONE_NAT = 0
kind = messages.StringField(1, default=u'compute#accessConfig')
name = messages.StringField(2)
natIP = messages.StringField(3)
type = messages.EnumField('TypeValueValuesEnum', 4, default=u'ONE_TO_ONE_NAT')
class Address(messages.Message):
"""A reserved address resource.
Enums:
StatusValueValuesEnum: The status of the address (output only).
Fields:
address: The IP address represented by this resource.
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the resource; provided by
the client when the resource is created.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
region: URL of the region where the regional address resides (output
only). This field is not applicable to global addresses.
selfLink: Server defined URL for the resource (output only).
status: The status of the address (output only).
users: The resources that are using this address resource.
"""
class StatusValueValuesEnum(messages.Enum):
"""The status of the address (output only).
Values:
IN_USE: <no description>
RESERVED: <no description>
"""
IN_USE = 0
RESERVED = 1
address = messages.StringField(1)
creationTimestamp = messages.StringField(2)
description = messages.StringField(3)
id = messages.IntegerField(4, variant=messages.Variant.UINT64)
kind = messages.StringField(5, default=u'compute#address')
name = messages.StringField(6)
region = messages.StringField(7)
selfLink = messages.StringField(8)
status = messages.EnumField('StatusValueValuesEnum', 9)
users = messages.StringField(10, repeated=True)
class AddressAggregatedList(messages.Message):
"""A AddressAggregatedList object.
Messages:
ItemsValue: A map of scoped address lists.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: A map of scoped address lists.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ItemsValue(messages.Message):
"""A map of scoped address lists.
Messages:
AdditionalProperty: An additional property for a ItemsValue object.
Fields:
additionalProperties: Name of the scope containing this set of
addresses.
"""
class AdditionalProperty(messages.Message):
"""An additional property for a ItemsValue object.
Fields:
key: Name of the additional property.
value: A AddressesScopedList attribute.
"""
key = messages.StringField(1)
value = messages.MessageField('AddressesScopedList', 2)
additionalProperties = messages.MessageField('AdditionalProperty', 1, repeated=True)
id = messages.StringField(1)
items = messages.MessageField('ItemsValue', 2)
kind = messages.StringField(3, default=u'compute#addressAggregatedList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class AddressList(messages.Message):
"""Contains a list of address resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The address resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for the resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('Address', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#addressList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class AddressesScopedList(messages.Message):
"""A AddressesScopedList object.
Messages:
WarningValue: Informational warning which replaces the list of addresses
when the list is empty.
Fields:
addresses: List of addresses contained in this scope.
warning: Informational warning which replaces the list of addresses when
the list is empty.
"""
class WarningValue(messages.Message):
"""Informational warning which replaces the list of addresses when the
list is empty.
Enums:
CodeValueValuesEnum: The warning type identifier for this warning.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: The warning type identifier for this warning.
data: Metadata for this warning in 'key: value' format.
message: Optional human-readable details for this warning.
"""
class CodeValueValuesEnum(messages.Enum):
"""The warning type identifier for this warning.
Values:
DEPRECATED_RESOURCE_USED: <no description>
DISK_SIZE_LARGER_THAN_IMAGE_SIZE: <no description>
INJECTED_KERNELS_DEPRECATED: <no description>
NEXT_HOP_ADDRESS_NOT_ASSIGNED: <no description>
NEXT_HOP_CANNOT_IP_FORWARD: <no description>
NEXT_HOP_INSTANCE_NOT_FOUND: <no description>
NEXT_HOP_INSTANCE_NOT_ON_NETWORK: <no description>
NEXT_HOP_NOT_RUNNING: <no description>
NO_RESULTS_ON_PAGE: <no description>
REQUIRED_TOS_AGREEMENT: <no description>
RESOURCE_NOT_DELETED: <no description>
UNREACHABLE: <no description>
"""
DEPRECATED_RESOURCE_USED = 0
DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 1
INJECTED_KERNELS_DEPRECATED = 2
NEXT_HOP_ADDRESS_NOT_ASSIGNED = 3
NEXT_HOP_CANNOT_IP_FORWARD = 4
NEXT_HOP_INSTANCE_NOT_FOUND = 5
NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 6
NEXT_HOP_NOT_RUNNING = 7
NO_RESULTS_ON_PAGE = 8
REQUIRED_TOS_AGREEMENT = 9
RESOURCE_NOT_DELETED = 10
UNREACHABLE = 11
class DataValueListEntry(messages.Message):
"""A DataValueListEntry object.
Fields:
key: A key for the warning data.
value: A warning data value corresponding to the key.
"""
key = messages.StringField(1)
value = messages.StringField(2)
code = messages.EnumField('CodeValueValuesEnum', 1)
data = messages.MessageField('DataValueListEntry', 2, repeated=True)
message = messages.StringField(3)
addresses = messages.MessageField('Address', 1, repeated=True)
warning = messages.MessageField('WarningValue', 2)
class AttachedDisk(messages.Message):
"""An instance-attached disk resource.
Enums:
InterfaceValueValuesEnum:
ModeValueValuesEnum: The mode in which to attach this disk, either
"READ_WRITE" or "READ_ONLY".
TypeValueValuesEnum: Type of the disk, either "SCRATCH" or "PERSISTENT".
Note that persistent disks must be created before you can specify them
here.
Fields:
autoDelete: Whether the disk will be auto-deleted when the instance is
deleted (but not when the disk is detached from the instance).
boot: Indicates that this is a boot disk. VM will use the first partition
of the disk for its root filesystem.
deviceName: Persistent disk only; must be unique within the instance when
specified. This represents a unique device name that is reflected into
the /dev/ tree of a Linux operating system running within the instance.
If not specified, a default will be chosen by the system.
index: A zero-based index to assign to this disk, where 0 is reserved for
the boot disk. If not specified, the server will choose an appropriate
value (output only).
initializeParams: Initialization parameters.
interface: A InterfaceValueValuesEnum attribute.
kind: Type of the resource.
licenses: Public visible licenses.
mode: The mode in which to attach this disk, either "READ_WRITE" or
"READ_ONLY".
source: Persistent disk only; the URL of the persistent disk resource.
type: Type of the disk, either "SCRATCH" or "PERSISTENT". Note that
persistent disks must be created before you can specify them here.
"""
class InterfaceValueValuesEnum(messages.Enum):
"""InterfaceValueValuesEnum enum type.
Values:
NVME: <no description>
SCSI: <no description>
"""
NVME = 0
SCSI = 1
class ModeValueValuesEnum(messages.Enum):
"""The mode in which to attach this disk, either "READ_WRITE" or
"READ_ONLY".
Values:
READ_ONLY: <no description>
READ_WRITE: <no description>
"""
READ_ONLY = 0
READ_WRITE = 1
class TypeValueValuesEnum(messages.Enum):
"""Type of the disk, either "SCRATCH" or "PERSISTENT". Note that
persistent disks must be created before you can specify them here.
Values:
PERSISTENT: <no description>
SCRATCH: <no description>
"""
PERSISTENT = 0
SCRATCH = 1
autoDelete = messages.BooleanField(1)
boot = messages.BooleanField(2)
deviceName = messages.StringField(3)
index = messages.IntegerField(4, variant=messages.Variant.INT32)
initializeParams = messages.MessageField('AttachedDiskInitializeParams', 5)
interface = messages.EnumField('InterfaceValueValuesEnum', 6)
kind = messages.StringField(7, default=u'compute#attachedDisk')
licenses = messages.StringField(8, repeated=True)
mode = messages.EnumField('ModeValueValuesEnum', 9)
source = messages.StringField(10)
type = messages.EnumField('TypeValueValuesEnum', 11)
class AttachedDiskInitializeParams(messages.Message):
"""Initialization parameters for the new disk (input-only). Can only be
specified on the boot disk or local SSDs. Mutually exclusive with 'source'.
Fields:
diskName: Name of the disk (when not provided defaults to the name of the
instance).
diskSizeGb: Size of the disk in base-2 GB.
diskType: URL of the disk type resource describing which disk type to use
to create the disk; provided by the client when the disk is created.
sourceImage: The source image used to create this disk.
"""
diskName = messages.StringField(1)
diskSizeGb = messages.IntegerField(2)
diskType = messages.StringField(3)
sourceImage = messages.StringField(4)
class Backend(messages.Message):
"""Message containing information of one individual backend.
Enums:
BalancingModeValueValuesEnum: The balancing mode of this backend, default
is UTILIZATION.
Fields:
balancingMode: The balancing mode of this backend, default is UTILIZATION.
capacityScaler: The multiplier (a value between 0 and 1e6) of the max
capacity (CPU or RPS, depending on 'balancingMode') the group should
serve up to. 0 means the group is totally drained. Default value is 1.
Valid range is [0, 1e6].
description: An optional textual description of the resource, which is
provided by the client when the resource is created.
group: URL of a zonal Cloud Resource View resource. This resource view
defines the list of instances that serve traffic. Member virtual machine
instances from each resource view must live in the same zone as the
resource view itself. No two backends in a backend service are allowed
to use same Resource View resource.
maxRate: The max RPS of the group. Can be used with either balancing mode,
but required if RATE mode. For RATE mode, either maxRate or
maxRatePerInstance must be set.
maxRatePerInstance: The max RPS that a single backed instance can handle.
This is used to calculate the capacity of the group. Can be used in
either balancing mode. For RATE mode, either maxRate or
maxRatePerInstance must be set.
maxUtilization: Used when 'balancingMode' is UTILIZATION. This ratio
defines the CPU utilization target for the group. The default is 0.8.
Valid range is [0, 1].
"""
class BalancingModeValueValuesEnum(messages.Enum):
"""The balancing mode of this backend, default is UTILIZATION.
Values:
RATE: <no description>
UTILIZATION: <no description>
"""
RATE = 0
UTILIZATION = 1
balancingMode = messages.EnumField('BalancingModeValueValuesEnum', 1)
capacityScaler = messages.FloatField(2, variant=messages.Variant.FLOAT)
description = messages.StringField(3)
group = messages.StringField(4)
maxRate = messages.IntegerField(5, variant=messages.Variant.INT32)
maxRatePerInstance = messages.FloatField(6, variant=messages.Variant.FLOAT)
maxUtilization = messages.FloatField(7, variant=messages.Variant.FLOAT)
class BackendService(messages.Message):
"""A BackendService resource. This resource defines a group of backend VMs
together with their serving capacity.
Enums:
ProtocolValueValuesEnum:
Fields:
backends: The list of backends that serve this BackendService.
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the resource; provided by
the client when the resource is created.
fingerprint: Fingerprint of this resource. A hash of the contents stored
in this object. This field is used in optimistic locking. This field
will be ignored when inserting a BackendService. An up-to-date
fingerprint must be provided in order to update the BackendService.
healthChecks: The list of URLs to the HttpHealthCheck resource for health
checking this BackendService. Currently at most one health check can be
specified, and a health check is required.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
port: Deprecated in favor of port_name. The TCP port to connect on the
backend. The default value is 80.
portName: Name of backend port. The same name should appear in the
resource views referenced by this service. Required.
protocol: A ProtocolValueValuesEnum attribute.
selfLink: Server defined URL for the resource (output only).
timeoutSec: How many seconds to wait for the backend before considering it
a failed request. Default is 30 seconds.
"""
class ProtocolValueValuesEnum(messages.Enum):
"""ProtocolValueValuesEnum enum type.
Values:
HTTP: <no description>
"""
HTTP = 0
backends = messages.MessageField('Backend', 1, repeated=True)
creationTimestamp = messages.StringField(2)
description = messages.StringField(3)
fingerprint = messages.BytesField(4)
healthChecks = messages.StringField(5, repeated=True)
id = messages.IntegerField(6, variant=messages.Variant.UINT64)
kind = messages.StringField(7, default=u'compute#backendService')
name = messages.StringField(8)
port = messages.IntegerField(9, variant=messages.Variant.INT32)
portName = messages.StringField(10)
protocol = messages.EnumField('ProtocolValueValuesEnum', 11)
selfLink = messages.StringField(12)
timeoutSec = messages.IntegerField(13, variant=messages.Variant.INT32)
class BackendServiceGroupHealth(messages.Message):
"""A BackendServiceGroupHealth object.
Fields:
healthStatus: A HealthStatus attribute.
kind: Type of resource.
"""
healthStatus = messages.MessageField('HealthStatus', 1, repeated=True)
kind = messages.StringField(2, default=u'compute#backendServiceGroupHealth')
class BackendServiceList(messages.Message):
"""Contains a list of BackendService resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The BackendService resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('BackendService', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#backendServiceList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class ComputeAddressesAggregatedListRequest(messages.Message):
"""A ComputeAddressesAggregatedListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeAddressesDeleteRequest(messages.Message):
"""A ComputeAddressesDeleteRequest object.
Fields:
address: Name of the address resource to delete.
project: Name of the project scoping this request.
region: Name of the region scoping this request.
"""
address = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
region = messages.StringField(3, required=True)
class ComputeAddressesGetRequest(messages.Message):
"""A ComputeAddressesGetRequest object.
Fields:
address: Name of the address resource to return.
project: Name of the project scoping this request.
region: Name of the region scoping this request.
"""
address = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
region = messages.StringField(3, required=True)
class ComputeAddressesInsertRequest(messages.Message):
"""A ComputeAddressesInsertRequest object.
Fields:
address: A Address resource to be passed as the request body.
project: Name of the project scoping this request.
region: Name of the region scoping this request.
"""
address = messages.MessageField('Address', 1)
project = messages.StringField(2, required=True)
region = messages.StringField(3, required=True)
class ComputeAddressesListRequest(messages.Message):
"""A ComputeAddressesListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
region: Name of the region scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
region = messages.StringField(5, required=True)
class ComputeBackendServicesDeleteRequest(messages.Message):
"""A ComputeBackendServicesDeleteRequest object.
Fields:
backendService: Name of the BackendService resource to delete.
project: Name of the project scoping this request.
"""
backendService = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeBackendServicesGetHealthRequest(messages.Message):
"""A ComputeBackendServicesGetHealthRequest object.
Fields:
backendService: Name of the BackendService resource to which the queried
instance belongs.
project: A string attribute.
resourceGroupReference: A ResourceGroupReference resource to be passed as
the request body.
"""
backendService = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
resourceGroupReference = messages.MessageField('ResourceGroupReference', 3)
class ComputeBackendServicesGetRequest(messages.Message):
"""A ComputeBackendServicesGetRequest object.
Fields:
backendService: Name of the BackendService resource to return.
project: Name of the project scoping this request.
"""
backendService = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeBackendServicesInsertRequest(messages.Message):
"""A ComputeBackendServicesInsertRequest object.
Fields:
backendService: A BackendService resource to be passed as the request
body.
project: Name of the project scoping this request.
"""
backendService = messages.MessageField('BackendService', 1)
project = messages.StringField(2, required=True)
class ComputeBackendServicesListRequest(messages.Message):
"""A ComputeBackendServicesListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeBackendServicesPatchRequest(messages.Message):
"""A ComputeBackendServicesPatchRequest object.
Fields:
backendService: Name of the BackendService resource to update.
backendServiceResource: A BackendService resource to be passed as the
request body.
project: Name of the project scoping this request.
"""
backendService = messages.StringField(1, required=True)
backendServiceResource = messages.MessageField('BackendService', 2)
project = messages.StringField(3, required=True)
class ComputeBackendServicesUpdateRequest(messages.Message):
"""A ComputeBackendServicesUpdateRequest object.
Fields:
backendService: Name of the BackendService resource to update.
backendServiceResource: A BackendService resource to be passed as the
request body.
project: Name of the project scoping this request.
"""
backendService = messages.StringField(1, required=True)
backendServiceResource = messages.MessageField('BackendService', 2)
project = messages.StringField(3, required=True)
class ComputeDiskTypesAggregatedListRequest(messages.Message):
"""A ComputeDiskTypesAggregatedListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeDiskTypesGetRequest(messages.Message):
"""A ComputeDiskTypesGetRequest object.
Fields:
diskType: Name of the disk type resource to return.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
diskType = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeDiskTypesListRequest(messages.Message):
"""A ComputeDiskTypesListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
zone = messages.StringField(5, required=True)
class ComputeDisksAggregatedListRequest(messages.Message):
"""A ComputeDisksAggregatedListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeDisksCreateSnapshotRequest(messages.Message):
"""A ComputeDisksCreateSnapshotRequest object.
Fields:
disk: Name of the persistent disk resource to snapshot.
project: Name of the project scoping this request.
snapshot: A Snapshot resource to be passed as the request body.
zone: Name of the zone scoping this request.
"""
disk = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
snapshot = messages.MessageField('Snapshot', 3)
zone = messages.StringField(4, required=True)
class ComputeDisksDeleteRequest(messages.Message):
"""A ComputeDisksDeleteRequest object.
Fields:
disk: Name of the persistent disk resource to delete.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
disk = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeDisksGetRequest(messages.Message):
"""A ComputeDisksGetRequest object.
Fields:
disk: Name of the persistent disk resource to return.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
disk = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeDisksInsertRequest(messages.Message):
"""A ComputeDisksInsertRequest object.
Fields:
disk: A Disk resource to be passed as the request body.
project: Name of the project scoping this request.
sourceImage: Optional. Source image to restore onto a disk.
zone: Name of the zone scoping this request.
"""
disk = messages.MessageField('Disk', 1)
project = messages.StringField(2, required=True)
sourceImage = messages.StringField(3)
zone = messages.StringField(4, required=True)
class ComputeDisksListRequest(messages.Message):
"""A ComputeDisksListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
zone = messages.StringField(5, required=True)
class ComputeFirewallsDeleteRequest(messages.Message):
"""A ComputeFirewallsDeleteRequest object.
Fields:
firewall: Name of the firewall resource to delete.
project: Name of the project scoping this request.
"""
firewall = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeFirewallsGetRequest(messages.Message):
"""A ComputeFirewallsGetRequest object.
Fields:
firewall: Name of the firewall resource to return.
project: Name of the project scoping this request.
"""
firewall = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeFirewallsInsertRequest(messages.Message):
"""A ComputeFirewallsInsertRequest object.
Fields:
firewall: A Firewall resource to be passed as the request body.
project: Name of the project scoping this request.
"""
firewall = messages.MessageField('Firewall', 1)
project = messages.StringField(2, required=True)
class ComputeFirewallsListRequest(messages.Message):
"""A ComputeFirewallsListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeFirewallsPatchRequest(messages.Message):
"""A ComputeFirewallsPatchRequest object.
Fields:
firewall: Name of the firewall resource to update.
firewallResource: A Firewall resource to be passed as the request body.
project: Name of the project scoping this request.
"""
firewall = messages.StringField(1, required=True)
firewallResource = messages.MessageField('Firewall', 2)
project = messages.StringField(3, required=True)
class ComputeFirewallsUpdateRequest(messages.Message):
"""A ComputeFirewallsUpdateRequest object.
Fields:
firewall: Name of the firewall resource to update.
firewallResource: A Firewall resource to be passed as the request body.
project: Name of the project scoping this request.
"""
firewall = messages.StringField(1, required=True)
firewallResource = messages.MessageField('Firewall', 2)
project = messages.StringField(3, required=True)
class ComputeForwardingRulesAggregatedListRequest(messages.Message):
"""A ComputeForwardingRulesAggregatedListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeForwardingRulesDeleteRequest(messages.Message):
"""A ComputeForwardingRulesDeleteRequest object.
Fields:
forwardingRule: Name of the ForwardingRule resource to delete.
project: Name of the project scoping this request.
region: Name of the region scoping this request.
"""
forwardingRule = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
region = messages.StringField(3, required=True)
class ComputeForwardingRulesGetRequest(messages.Message):
"""A ComputeForwardingRulesGetRequest object.
Fields:
forwardingRule: Name of the ForwardingRule resource to return.
project: Name of the project scoping this request.
region: Name of the region scoping this request.
"""
forwardingRule = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
region = messages.StringField(3, required=True)
class ComputeForwardingRulesInsertRequest(messages.Message):
"""A ComputeForwardingRulesInsertRequest object.
Fields:
forwardingRule: A ForwardingRule resource to be passed as the request
body.
project: Name of the project scoping this request.
region: Name of the region scoping this request.
"""
forwardingRule = messages.MessageField('ForwardingRule', 1)
project = messages.StringField(2, required=True)
region = messages.StringField(3, required=True)
class ComputeForwardingRulesListRequest(messages.Message):
"""A ComputeForwardingRulesListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
region: Name of the region scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
region = messages.StringField(5, required=True)
class ComputeForwardingRulesSetTargetRequest(messages.Message):
"""A ComputeForwardingRulesSetTargetRequest object.
Fields:
forwardingRule: Name of the ForwardingRule resource in which target is to
be set.
project: Name of the project scoping this request.
region: Name of the region scoping this request.
targetReference: A TargetReference resource to be passed as the request
body.
"""
forwardingRule = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
region = messages.StringField(3, required=True)
targetReference = messages.MessageField('TargetReference', 4)
class ComputeGlobalAddressesDeleteRequest(messages.Message):
"""A ComputeGlobalAddressesDeleteRequest object.
Fields:
address: Name of the address resource to delete.
project: Name of the project scoping this request.
"""
address = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeGlobalAddressesGetRequest(messages.Message):
"""A ComputeGlobalAddressesGetRequest object.
Fields:
address: Name of the address resource to return.
project: Name of the project scoping this request.
"""
address = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeGlobalAddressesInsertRequest(messages.Message):
"""A ComputeGlobalAddressesInsertRequest object.
Fields:
address: A Address resource to be passed as the request body.
project: Name of the project scoping this request.
"""
address = messages.MessageField('Address', 1)
project = messages.StringField(2, required=True)
class ComputeGlobalAddressesListRequest(messages.Message):
"""A ComputeGlobalAddressesListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeGlobalForwardingRulesDeleteRequest(messages.Message):
"""A ComputeGlobalForwardingRulesDeleteRequest object.
Fields:
forwardingRule: Name of the ForwardingRule resource to delete.
project: Name of the project scoping this request.
"""
forwardingRule = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeGlobalForwardingRulesGetRequest(messages.Message):
"""A ComputeGlobalForwardingRulesGetRequest object.
Fields:
forwardingRule: Name of the ForwardingRule resource to return.
project: Name of the project scoping this request.
"""
forwardingRule = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeGlobalForwardingRulesInsertRequest(messages.Message):
"""A ComputeGlobalForwardingRulesInsertRequest object.
Fields:
forwardingRule: A ForwardingRule resource to be passed as the request
body.
project: Name of the project scoping this request.
"""
forwardingRule = messages.MessageField('ForwardingRule', 1)
project = messages.StringField(2, required=True)
class ComputeGlobalForwardingRulesListRequest(messages.Message):
"""A ComputeGlobalForwardingRulesListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeGlobalForwardingRulesSetTargetRequest(messages.Message):
"""A ComputeGlobalForwardingRulesSetTargetRequest object.
Fields:
forwardingRule: Name of the ForwardingRule resource in which target is to
be set.
project: Name of the project scoping this request.
targetReference: A TargetReference resource to be passed as the request
body.
"""
forwardingRule = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
targetReference = messages.MessageField('TargetReference', 3)
class ComputeGlobalOperationsAggregatedListRequest(messages.Message):
"""A ComputeGlobalOperationsAggregatedListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeGlobalOperationsDeleteRequest(messages.Message):
"""A ComputeGlobalOperationsDeleteRequest object.
Fields:
operation: Name of the operation resource to delete.
project: Name of the project scoping this request.
"""
operation = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeGlobalOperationsDeleteResponse(messages.Message):
"""An empty ComputeGlobalOperationsDelete response."""
class ComputeGlobalOperationsGetRequest(messages.Message):
"""A ComputeGlobalOperationsGetRequest object.
Fields:
operation: Name of the operation resource to return.
project: Name of the project scoping this request.
"""
operation = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeGlobalOperationsListRequest(messages.Message):
"""A ComputeGlobalOperationsListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeHttpHealthChecksDeleteRequest(messages.Message):
"""A ComputeHttpHealthChecksDeleteRequest object.
Fields:
httpHealthCheck: Name of the HttpHealthCheck resource to delete.
project: Name of the project scoping this request.
"""
httpHealthCheck = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeHttpHealthChecksGetRequest(messages.Message):
"""A ComputeHttpHealthChecksGetRequest object.
Fields:
httpHealthCheck: Name of the HttpHealthCheck resource to return.
project: Name of the project scoping this request.
"""
httpHealthCheck = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeHttpHealthChecksInsertRequest(messages.Message):
"""A ComputeHttpHealthChecksInsertRequest object.
Fields:
httpHealthCheck: A HttpHealthCheck resource to be passed as the request
body.
project: Name of the project scoping this request.
"""
httpHealthCheck = messages.MessageField('HttpHealthCheck', 1)
project = messages.StringField(2, required=True)
class ComputeHttpHealthChecksListRequest(messages.Message):
"""A ComputeHttpHealthChecksListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeHttpHealthChecksPatchRequest(messages.Message):
"""A ComputeHttpHealthChecksPatchRequest object.
Fields:
httpHealthCheck: Name of the HttpHealthCheck resource to update.
httpHealthCheckResource: A HttpHealthCheck resource to be passed as the
request body.
project: Name of the project scoping this request.
"""
httpHealthCheck = messages.StringField(1, required=True)
httpHealthCheckResource = messages.MessageField('HttpHealthCheck', 2)
project = messages.StringField(3, required=True)
class ComputeHttpHealthChecksUpdateRequest(messages.Message):
"""A ComputeHttpHealthChecksUpdateRequest object.
Fields:
httpHealthCheck: Name of the HttpHealthCheck resource to update.
httpHealthCheckResource: A HttpHealthCheck resource to be passed as the
request body.
project: Name of the project scoping this request.
"""
httpHealthCheck = messages.StringField(1, required=True)
httpHealthCheckResource = messages.MessageField('HttpHealthCheck', 2)
project = messages.StringField(3, required=True)
class ComputeImagesDeleteRequest(messages.Message):
"""A ComputeImagesDeleteRequest object.
Fields:
image: Name of the image resource to delete.
project: Name of the project scoping this request.
"""
image = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeImagesDeprecateRequest(messages.Message):
"""A ComputeImagesDeprecateRequest object.
Fields:
deprecationStatus: A DeprecationStatus resource to be passed as the
request body.
image: Image name.
project: Name of the project scoping this request.
"""
deprecationStatus = messages.MessageField('DeprecationStatus', 1)
image = messages.StringField(2, required=True)
project = messages.StringField(3, required=True)
class ComputeImagesGetRequest(messages.Message):
"""A ComputeImagesGetRequest object.
Fields:
image: Name of the image resource to return.
project: Name of the project scoping this request.
"""
image = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeImagesInsertRequest(messages.Message):
"""A ComputeImagesInsertRequest object.
Fields:
image: A Image resource to be passed as the request body.
project: Name of the project scoping this request.
"""
image = messages.MessageField('Image', 1)
project = messages.StringField(2, required=True)
class ComputeImagesListRequest(messages.Message):
"""A ComputeImagesListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeInstanceTemplatesDeleteRequest(messages.Message):
"""A ComputeInstanceTemplatesDeleteRequest object.
Fields:
instanceTemplate: Name of the instance template resource to delete.
project: Name of the project scoping this request.
"""
instanceTemplate = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeInstanceTemplatesGetRequest(messages.Message):
"""A ComputeInstanceTemplatesGetRequest object.
Fields:
instanceTemplate: Name of the instance template resource to return.
project: Name of the project scoping this request.
"""
instanceTemplate = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeInstanceTemplatesInsertRequest(messages.Message):
"""A ComputeInstanceTemplatesInsertRequest object.
Fields:
instanceTemplate: A InstanceTemplate resource to be passed as the request
body.
project: Name of the project scoping this request.
"""
instanceTemplate = messages.MessageField('InstanceTemplate', 1)
project = messages.StringField(2, required=True)
class ComputeInstanceTemplatesListRequest(messages.Message):
"""A ComputeInstanceTemplatesListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeInstancesAddAccessConfigRequest(messages.Message):
"""A ComputeInstancesAddAccessConfigRequest object.
Fields:
accessConfig: A AccessConfig resource to be passed as the request body.
instance: Instance name.
networkInterface: Network interface name.
project: Project name.
zone: Name of the zone scoping this request.
"""
accessConfig = messages.MessageField('AccessConfig', 1)
instance = messages.StringField(2, required=True)
networkInterface = messages.StringField(3, required=True)
project = messages.StringField(4, required=True)
zone = messages.StringField(5, required=True)
class ComputeInstancesAggregatedListRequest(messages.Message):
"""A ComputeInstancesAggregatedListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeInstancesAttachDiskRequest(messages.Message):
"""A ComputeInstancesAttachDiskRequest object.
Fields:
attachedDisk: A AttachedDisk resource to be passed as the request body.
instance: Instance name.
project: Project name.
zone: Name of the zone scoping this request.
"""
attachedDisk = messages.MessageField('AttachedDisk', 1)
instance = messages.StringField(2, required=True)
project = messages.StringField(3, required=True)
zone = messages.StringField(4, required=True)
class ComputeInstancesDeleteAccessConfigRequest(messages.Message):
"""A ComputeInstancesDeleteAccessConfigRequest object.
Fields:
accessConfig: Access config name.
instance: Instance name.
networkInterface: Network interface name.
project: Project name.
zone: Name of the zone scoping this request.
"""
accessConfig = messages.StringField(1, required=True)
instance = messages.StringField(2, required=True)
networkInterface = messages.StringField(3, required=True)
project = messages.StringField(4, required=True)
zone = messages.StringField(5, required=True)
class ComputeInstancesDeleteRequest(messages.Message):
"""A ComputeInstancesDeleteRequest object.
Fields:
instance: Name of the instance resource to delete.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
instance = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeInstancesDetachDiskRequest(messages.Message):
"""A ComputeInstancesDetachDiskRequest object.
Fields:
deviceName: Disk device name to detach.
instance: Instance name.
project: Project name.
zone: Name of the zone scoping this request.
"""
deviceName = messages.StringField(1, required=True)
instance = messages.StringField(2, required=True)
project = messages.StringField(3, required=True)
zone = messages.StringField(4, required=True)
class ComputeInstancesGetRequest(messages.Message):
"""A ComputeInstancesGetRequest object.
Fields:
instance: Name of the instance resource to return.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
instance = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeInstancesGetSerialPortOutputRequest(messages.Message):
"""A ComputeInstancesGetSerialPortOutputRequest object.
Fields:
instance: Name of the instance scoping this request.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
instance = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeInstancesInsertRequest(messages.Message):
"""A ComputeInstancesInsertRequest object.
Fields:
instance: A Instance resource to be passed as the request body.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
instance = messages.MessageField('Instance', 1)
project = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeInstancesListRequest(messages.Message):
"""A ComputeInstancesListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
zone = messages.StringField(5, required=True)
class ComputeInstancesResetRequest(messages.Message):
"""A ComputeInstancesResetRequest object.
Fields:
instance: Name of the instance scoping this request.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
instance = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeInstancesSetDiskAutoDeleteRequest(messages.Message):
"""A ComputeInstancesSetDiskAutoDeleteRequest object.
Fields:
autoDelete: Whether to auto-delete the disk when the instance is deleted.
deviceName: Disk device name to modify.
instance: Instance name.
project: Project name.
zone: Name of the zone scoping this request.
"""
autoDelete = messages.BooleanField(1, required=True)
deviceName = messages.StringField(2, required=True)
instance = messages.StringField(3, required=True)
project = messages.StringField(4, required=True)
zone = messages.StringField(5, required=True)
class ComputeInstancesSetMetadataRequest(messages.Message):
"""A ComputeInstancesSetMetadataRequest object.
Fields:
instance: Name of the instance scoping this request.
metadata: A Metadata resource to be passed as the request body.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
instance = messages.StringField(1, required=True)
metadata = messages.MessageField('Metadata', 2)
project = messages.StringField(3, required=True)
zone = messages.StringField(4, required=True)
class ComputeInstancesSetSchedulingRequest(messages.Message):
"""A ComputeInstancesSetSchedulingRequest object.
Fields:
instance: Instance name.
project: Project name.
scheduling: A Scheduling resource to be passed as the request body.
zone: Name of the zone scoping this request.
"""
instance = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
scheduling = messages.MessageField('Scheduling', 3)
zone = messages.StringField(4, required=True)
class ComputeInstancesSetTagsRequest(messages.Message):
"""A ComputeInstancesSetTagsRequest object.
Fields:
instance: Name of the instance scoping this request.
project: Name of the project scoping this request.
tags: A Tags resource to be passed as the request body.
zone: Name of the zone scoping this request.
"""
instance = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
tags = messages.MessageField('Tags', 3)
zone = messages.StringField(4, required=True)
class ComputeInstancesStartRequest(messages.Message):
"""A ComputeInstancesStartRequest object.
Fields:
instance: Name of the instance resource to start.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
instance = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeInstancesStopRequest(messages.Message):
"""A ComputeInstancesStopRequest object.
Fields:
instance: Name of the instance resource to start.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
instance = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeLicensesGetRequest(messages.Message):
"""A ComputeLicensesGetRequest object.
Fields:
license: Name of the license resource to return.
project: Name of the project scoping this request.
"""
license = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeMachineTypesAggregatedListRequest(messages.Message):
"""A ComputeMachineTypesAggregatedListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Project ID for this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeMachineTypesGetRequest(messages.Message):
"""A ComputeMachineTypesGetRequest object.
Fields:
machineType: Name of the machine type resource to return.
project: Project ID for this request.
zone: Name of the zone scoping this request.
"""
machineType = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeMachineTypesListRequest(messages.Message):
"""A ComputeMachineTypesListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Project ID for this request.
zone: Name of the zone scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
zone = messages.StringField(5, required=True)
class ComputeNetworksDeleteRequest(messages.Message):
"""A ComputeNetworksDeleteRequest object.
Fields:
network: Name of the network resource to delete.
project: Name of the project scoping this request.
"""
network = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeNetworksGetRequest(messages.Message):
"""A ComputeNetworksGetRequest object.
Fields:
network: Name of the network resource to return.
project: Name of the project scoping this request.
"""
network = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
class ComputeNetworksInsertRequest(messages.Message):
"""A ComputeNetworksInsertRequest object.
Fields:
network: A Network resource to be passed as the request body.
project: Name of the project scoping this request.
"""
network = messages.MessageField('Network', 1)
project = messages.StringField(2, required=True)
class ComputeNetworksListRequest(messages.Message):
"""A ComputeNetworksListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeProjectsGetRequest(messages.Message):
"""A ComputeProjectsGetRequest object.
Fields:
project: Name of the project resource to retrieve.
"""
project = messages.StringField(1, required=True)
class ComputeProjectsSetCommonInstanceMetadataRequest(messages.Message):
"""A ComputeProjectsSetCommonInstanceMetadataRequest object.
Fields:
metadata: A Metadata resource to be passed as the request body.
project: Name of the project scoping this request.
"""
metadata = messages.MessageField('Metadata', 1)
project = messages.StringField(2, required=True)
class ComputeProjectsSetUsageExportBucketRequest(messages.Message):
"""A ComputeProjectsSetUsageExportBucketRequest object.
Fields:
project: Name of the project scoping this request.
usageExportLocation: A UsageExportLocation resource to be passed as the
request body.
"""
project = messages.StringField(1, required=True)
usageExportLocation = messages.MessageField('UsageExportLocation', 2)
class ComputeRegionOperationsDeleteRequest(messages.Message):
"""A ComputeRegionOperationsDeleteRequest object.
Fields:
operation: Name of the operation resource to delete.
project: Name of the project scoping this request.
region: Name of the region scoping this request.
"""
operation = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
region = messages.StringField(3, required=True)
class ComputeRegionOperationsDeleteResponse(messages.Message):
"""An empty ComputeRegionOperationsDelete response."""
class ComputeRegionOperationsGetRequest(messages.Message):
"""A ComputeRegionOperationsGetRequest object.
Fields:
operation: Name of the operation resource to return.
project: Name of the project scoping this request.
region: Name of the zone scoping this request.
"""
operation = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
region = messages.StringField(3, required=True)
class ComputeRegionOperationsListRequest(messages.Message):
"""A ComputeRegionOperationsListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
region: Name of the region scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
region = messages.StringField(5, required=True)
class ComputeRegionsGetRequest(messages.Message):
"""A ComputeRegionsGetRequest object.
Fields:
project: Name of the project scoping this request.
region: Name of the region resource to return.
"""
project = messages.StringField(1, required=True)
region = messages.StringField(2, required=True)
class ComputeRegionsListRequest(messages.Message):
"""A ComputeRegionsListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeRoutesDeleteRequest(messages.Message):
"""A ComputeRoutesDeleteRequest object.
Fields:
project: Name of the project scoping this request.
route: Name of the route resource to delete.
"""
project = messages.StringField(1, required=True)
route = messages.StringField(2, required=True)
class ComputeRoutesGetRequest(messages.Message):
"""A ComputeRoutesGetRequest object.
Fields:
project: Name of the project scoping this request.
route: Name of the route resource to return.
"""
project = messages.StringField(1, required=True)
route = messages.StringField(2, required=True)
class ComputeRoutesInsertRequest(messages.Message):
"""A ComputeRoutesInsertRequest object.
Fields:
project: Name of the project scoping this request.
route: A Route resource to be passed as the request body.
"""
project = messages.StringField(1, required=True)
route = messages.MessageField('Route', 2)
class ComputeRoutesListRequest(messages.Message):
"""A ComputeRoutesListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeSnapshotsDeleteRequest(messages.Message):
"""A ComputeSnapshotsDeleteRequest object.
Fields:
project: Name of the project scoping this request.
snapshot: Name of the persistent disk snapshot resource to delete.
"""
project = messages.StringField(1, required=True)
snapshot = messages.StringField(2, required=True)
class ComputeSnapshotsGetRequest(messages.Message):
"""A ComputeSnapshotsGetRequest object.
Fields:
project: Name of the project scoping this request.
snapshot: Name of the persistent disk snapshot resource to return.
"""
project = messages.StringField(1, required=True)
snapshot = messages.StringField(2, required=True)
class ComputeSnapshotsListRequest(messages.Message):
"""A ComputeSnapshotsListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeTargetHttpProxiesDeleteRequest(messages.Message):
"""A ComputeTargetHttpProxiesDeleteRequest object.
Fields:
project: Name of the project scoping this request.
targetHttpProxy: Name of the TargetHttpProxy resource to delete.
"""
project = messages.StringField(1, required=True)
targetHttpProxy = messages.StringField(2, required=True)
class ComputeTargetHttpProxiesGetRequest(messages.Message):
"""A ComputeTargetHttpProxiesGetRequest object.
Fields:
project: Name of the project scoping this request.
targetHttpProxy: Name of the TargetHttpProxy resource to return.
"""
project = messages.StringField(1, required=True)
targetHttpProxy = messages.StringField(2, required=True)
class ComputeTargetHttpProxiesInsertRequest(messages.Message):
"""A ComputeTargetHttpProxiesInsertRequest object.
Fields:
project: Name of the project scoping this request.
targetHttpProxy: A TargetHttpProxy resource to be passed as the request
body.
"""
project = messages.StringField(1, required=True)
targetHttpProxy = messages.MessageField('TargetHttpProxy', 2)
class ComputeTargetHttpProxiesListRequest(messages.Message):
"""A ComputeTargetHttpProxiesListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeTargetHttpProxiesSetUrlMapRequest(messages.Message):
"""A ComputeTargetHttpProxiesSetUrlMapRequest object.
Fields:
project: Name of the project scoping this request.
targetHttpProxy: Name of the TargetHttpProxy resource whose URL map is to
be set.
urlMapReference: A UrlMapReference resource to be passed as the request
body.
"""
project = messages.StringField(1, required=True)
targetHttpProxy = messages.StringField(2, required=True)
urlMapReference = messages.MessageField('UrlMapReference', 3)
class ComputeTargetInstancesAggregatedListRequest(messages.Message):
"""A ComputeTargetInstancesAggregatedListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeTargetInstancesDeleteRequest(messages.Message):
"""A ComputeTargetInstancesDeleteRequest object.
Fields:
project: Name of the project scoping this request.
targetInstance: Name of the TargetInstance resource to delete.
zone: Name of the zone scoping this request.
"""
project = messages.StringField(1, required=True)
targetInstance = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeTargetInstancesGetRequest(messages.Message):
"""A ComputeTargetInstancesGetRequest object.
Fields:
project: Name of the project scoping this request.
targetInstance: Name of the TargetInstance resource to return.
zone: Name of the zone scoping this request.
"""
project = messages.StringField(1, required=True)
targetInstance = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeTargetInstancesInsertRequest(messages.Message):
"""A ComputeTargetInstancesInsertRequest object.
Fields:
project: Name of the project scoping this request.
targetInstance: A TargetInstance resource to be passed as the request
body.
zone: Name of the zone scoping this request.
"""
project = messages.StringField(1, required=True)
targetInstance = messages.MessageField('TargetInstance', 2)
zone = messages.StringField(3, required=True)
class ComputeTargetInstancesListRequest(messages.Message):
"""A ComputeTargetInstancesListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
zone = messages.StringField(5, required=True)
class ComputeTargetPoolsAddHealthCheckRequest(messages.Message):
"""A ComputeTargetPoolsAddHealthCheckRequest object.
Fields:
project: A string attribute.
region: Name of the region scoping this request.
targetPool: Name of the TargetPool resource to which health_check_url is
to be added.
targetPoolsAddHealthCheckRequest: A TargetPoolsAddHealthCheckRequest
resource to be passed as the request body.
"""
project = messages.StringField(1, required=True)
region = messages.StringField(2, required=True)
targetPool = messages.StringField(3, required=True)
targetPoolsAddHealthCheckRequest = messages.MessageField('TargetPoolsAddHealthCheckRequest', 4)
class ComputeTargetPoolsAddInstanceRequest(messages.Message):
"""A ComputeTargetPoolsAddInstanceRequest object.
Fields:
project: A string attribute.
region: Name of the region scoping this request.
targetPool: Name of the TargetPool resource to which instance_url is to be
added.
targetPoolsAddInstanceRequest: A TargetPoolsAddInstanceRequest resource to
be passed as the request body.
"""
project = messages.StringField(1, required=True)
region = messages.StringField(2, required=True)
targetPool = messages.StringField(3, required=True)
targetPoolsAddInstanceRequest = messages.MessageField('TargetPoolsAddInstanceRequest', 4)
class ComputeTargetPoolsAggregatedListRequest(messages.Message):
"""A ComputeTargetPoolsAggregatedListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeTargetPoolsDeleteRequest(messages.Message):
"""A ComputeTargetPoolsDeleteRequest object.
Fields:
project: Name of the project scoping this request.
region: Name of the region scoping this request.
targetPool: Name of the TargetPool resource to delete.
"""
project = messages.StringField(1, required=True)
region = messages.StringField(2, required=True)
targetPool = messages.StringField(3, required=True)
class ComputeTargetPoolsGetHealthRequest(messages.Message):
"""A ComputeTargetPoolsGetHealthRequest object.
Fields:
instanceReference: A InstanceReference resource to be passed as the
request body.
project: A string attribute.
region: Name of the region scoping this request.
targetPool: Name of the TargetPool resource to which the queried instance
belongs.
"""
instanceReference = messages.MessageField('InstanceReference', 1)
project = messages.StringField(2, required=True)
region = messages.StringField(3, required=True)
targetPool = messages.StringField(4, required=True)
class ComputeTargetPoolsGetRequest(messages.Message):
"""A ComputeTargetPoolsGetRequest object.
Fields:
project: Name of the project scoping this request.
region: Name of the region scoping this request.
targetPool: Name of the TargetPool resource to return.
"""
project = messages.StringField(1, required=True)
region = messages.StringField(2, required=True)
targetPool = messages.StringField(3, required=True)
class ComputeTargetPoolsInsertRequest(messages.Message):
"""A ComputeTargetPoolsInsertRequest object.
Fields:
project: Name of the project scoping this request.
region: Name of the region scoping this request.
targetPool: A TargetPool resource to be passed as the request body.
"""
project = messages.StringField(1, required=True)
region = messages.StringField(2, required=True)
targetPool = messages.MessageField('TargetPool', 3)
class ComputeTargetPoolsListRequest(messages.Message):
"""A ComputeTargetPoolsListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
region: Name of the region scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
region = messages.StringField(5, required=True)
class ComputeTargetPoolsRemoveHealthCheckRequest(messages.Message):
"""A ComputeTargetPoolsRemoveHealthCheckRequest object.
Fields:
project: A string attribute.
region: Name of the region scoping this request.
targetPool: Name of the TargetPool resource to which health_check_url is
to be removed.
targetPoolsRemoveHealthCheckRequest: A TargetPoolsRemoveHealthCheckRequest
resource to be passed as the request body.
"""
project = messages.StringField(1, required=True)
region = messages.StringField(2, required=True)
targetPool = messages.StringField(3, required=True)
targetPoolsRemoveHealthCheckRequest = messages.MessageField('TargetPoolsRemoveHealthCheckRequest', 4)
class ComputeTargetPoolsRemoveInstanceRequest(messages.Message):
"""A ComputeTargetPoolsRemoveInstanceRequest object.
Fields:
project: A string attribute.
region: Name of the region scoping this request.
targetPool: Name of the TargetPool resource to which instance_url is to be
removed.
targetPoolsRemoveInstanceRequest: A TargetPoolsRemoveInstanceRequest
resource to be passed as the request body.
"""
project = messages.StringField(1, required=True)
region = messages.StringField(2, required=True)
targetPool = messages.StringField(3, required=True)
targetPoolsRemoveInstanceRequest = messages.MessageField('TargetPoolsRemoveInstanceRequest', 4)
class ComputeTargetPoolsSetBackupRequest(messages.Message):
"""A ComputeTargetPoolsSetBackupRequest object.
Fields:
failoverRatio: New failoverRatio value for the containing target pool.
project: Name of the project scoping this request.
region: Name of the region scoping this request.
targetPool: Name of the TargetPool resource for which the backup is to be
set.
targetReference: A TargetReference resource to be passed as the request
body.
"""
failoverRatio = messages.FloatField(1, variant=messages.Variant.FLOAT)
project = messages.StringField(2, required=True)
region = messages.StringField(3, required=True)
targetPool = messages.StringField(4, required=True)
targetReference = messages.MessageField('TargetReference', 5)
class ComputeUrlMapsDeleteRequest(messages.Message):
"""A ComputeUrlMapsDeleteRequest object.
Fields:
project: Name of the project scoping this request.
urlMap: Name of the UrlMap resource to delete.
"""
project = messages.StringField(1, required=True)
urlMap = messages.StringField(2, required=True)
class ComputeUrlMapsGetRequest(messages.Message):
"""A ComputeUrlMapsGetRequest object.
Fields:
project: Name of the project scoping this request.
urlMap: Name of the UrlMap resource to return.
"""
project = messages.StringField(1, required=True)
urlMap = messages.StringField(2, required=True)
class ComputeUrlMapsInsertRequest(messages.Message):
"""A ComputeUrlMapsInsertRequest object.
Fields:
project: Name of the project scoping this request.
urlMap: A UrlMap resource to be passed as the request body.
"""
project = messages.StringField(1, required=True)
urlMap = messages.MessageField('UrlMap', 2)
class ComputeUrlMapsListRequest(messages.Message):
"""A ComputeUrlMapsListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class ComputeUrlMapsPatchRequest(messages.Message):
"""A ComputeUrlMapsPatchRequest object.
Fields:
project: Name of the project scoping this request.
urlMap: Name of the UrlMap resource to update.
urlMapResource: A UrlMap resource to be passed as the request body.
"""
project = messages.StringField(1, required=True)
urlMap = messages.StringField(2, required=True)
urlMapResource = messages.MessageField('UrlMap', 3)
class ComputeUrlMapsUpdateRequest(messages.Message):
"""A ComputeUrlMapsUpdateRequest object.
Fields:
project: Name of the project scoping this request.
urlMap: Name of the UrlMap resource to update.
urlMapResource: A UrlMap resource to be passed as the request body.
"""
project = messages.StringField(1, required=True)
urlMap = messages.StringField(2, required=True)
urlMapResource = messages.MessageField('UrlMap', 3)
class ComputeUrlMapsValidateRequest(messages.Message):
"""A ComputeUrlMapsValidateRequest object.
Fields:
project: Name of the project scoping this request.
urlMap: Name of the UrlMap resource to be validated as.
urlMapsValidateRequest: A UrlMapsValidateRequest resource to be passed as
the request body.
"""
project = messages.StringField(1, required=True)
urlMap = messages.StringField(2, required=True)
urlMapsValidateRequest = messages.MessageField('UrlMapsValidateRequest', 3)
class ComputeZoneOperationsDeleteRequest(messages.Message):
"""A ComputeZoneOperationsDeleteRequest object.
Fields:
operation: Name of the operation resource to delete.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
operation = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeZoneOperationsDeleteResponse(messages.Message):
"""An empty ComputeZoneOperationsDelete response."""
class ComputeZoneOperationsGetRequest(messages.Message):
"""A ComputeZoneOperationsGetRequest object.
Fields:
operation: Name of the operation resource to return.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
operation = messages.StringField(1, required=True)
project = messages.StringField(2, required=True)
zone = messages.StringField(3, required=True)
class ComputeZoneOperationsListRequest(messages.Message):
"""A ComputeZoneOperationsListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
zone: Name of the zone scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
zone = messages.StringField(5, required=True)
class ComputeZonesGetRequest(messages.Message):
"""A ComputeZonesGetRequest object.
Fields:
project: Name of the project scoping this request.
zone: Name of the zone resource to return.
"""
project = messages.StringField(1, required=True)
zone = messages.StringField(2, required=True)
class ComputeZonesListRequest(messages.Message):
"""A ComputeZonesListRequest object.
Fields:
filter: Optional. Filter expression for filtering listed resources.
maxResults: Optional. Maximum count of results to be returned. Maximum
value is 500 and default value is 500.
pageToken: Optional. Tag returned by a previous list request truncated by
maxResults. Used to continue a previous list request.
project: Name of the project scoping this request.
"""
filter = messages.StringField(1)
maxResults = messages.IntegerField(2, variant=messages.Variant.UINT32, default=500)
pageToken = messages.StringField(3)
project = messages.StringField(4, required=True)
class DeprecationStatus(messages.Message):
"""Deprecation status for a public resource.
Enums:
StateValueValuesEnum: The deprecation state. Can be "DEPRECATED",
"OBSOLETE", or "DELETED". Operations which create a new resource using a
"DEPRECATED" resource will return successfully, but with a warning
indicating the deprecated resource and recommending its replacement. New
uses of "OBSOLETE" or "DELETED" resources will result in an error.
Fields:
deleted: An optional RFC3339 timestamp on or after which the deprecation
state of this resource will be changed to DELETED.
deprecated: An optional RFC3339 timestamp on or after which the
deprecation state of this resource will be changed to DEPRECATED.
obsolete: An optional RFC3339 timestamp on or after which the deprecation
state of this resource will be changed to OBSOLETE.
replacement: A URL of the suggested replacement for the deprecated
resource. The deprecated resource and its replacement must be resources
of the same kind.
state: The deprecation state. Can be "DEPRECATED", "OBSOLETE", or
"DELETED". Operations which create a new resource using a "DEPRECATED"
resource will return successfully, but with a warning indicating the
deprecated resource and recommending its replacement. New uses of
"OBSOLETE" or "DELETED" resources will result in an error.
"""
class StateValueValuesEnum(messages.Enum):
"""The deprecation state. Can be "DEPRECATED", "OBSOLETE", or "DELETED".
Operations which create a new resource using a "DEPRECATED" resource will
return successfully, but with a warning indicating the deprecated resource
and recommending its replacement. New uses of "OBSOLETE" or "DELETED"
resources will result in an error.
Values:
DELETED: <no description>
DEPRECATED: <no description>
OBSOLETE: <no description>
"""
DELETED = 0
DEPRECATED = 1
OBSOLETE = 2
deleted = messages.StringField(1)
deprecated = messages.StringField(2)
obsolete = messages.StringField(3)
replacement = messages.StringField(4)
state = messages.EnumField('StateValueValuesEnum', 5)
class Disk(messages.Message):
"""A persistent disk resource.
Enums:
StatusValueValuesEnum: The status of disk creation (output only).
Fields:
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the resource; provided by
the client when the resource is created.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
licenses: Public visible licenses.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
options: Internal use only.
selfLink: Server defined URL for the resource (output only).
sizeGb: Size of the persistent disk, specified in GB. This parameter is
optional when creating a disk from a disk image or a snapshot, otherwise
it is required.
sourceImage: The source image used to create this disk.
sourceImageId: The 'id' value of the image used to create this disk. This
value may be used to determine whether the disk was created from the
current or a previous instance of a given image.
sourceSnapshot: The source snapshot used to create this disk.
sourceSnapshotId: The 'id' value of the snapshot used to create this disk.
This value may be used to determine whether the disk was created from
the current or a previous instance of a given disk snapshot.
status: The status of disk creation (output only).
type: URL of the disk type resource describing which disk type to use to
create the disk; provided by the client when the disk is created.
zone: URL of the zone where the disk resides (output only).
"""
class StatusValueValuesEnum(messages.Enum):
"""The status of disk creation (output only).
Values:
CREATING: <no description>
FAILED: <no description>
READY: <no description>
RESTORING: <no description>
"""
CREATING = 0
FAILED = 1
READY = 2
RESTORING = 3
creationTimestamp = messages.StringField(1)
description = messages.StringField(2)
id = messages.IntegerField(3, variant=messages.Variant.UINT64)
kind = messages.StringField(4, default=u'compute#disk')
licenses = messages.StringField(5, repeated=True)
name = messages.StringField(6)
options = messages.StringField(7)
selfLink = messages.StringField(8)
sizeGb = messages.IntegerField(9)
sourceImage = messages.StringField(10)
sourceImageId = messages.StringField(11)
sourceSnapshot = messages.StringField(12)
sourceSnapshotId = messages.StringField(13)
status = messages.EnumField('StatusValueValuesEnum', 14)
type = messages.StringField(15)
zone = messages.StringField(16)
class DiskAggregatedList(messages.Message):
"""A DiskAggregatedList object.
Messages:
ItemsValue: A map of scoped disk lists.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: A map of scoped disk lists.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ItemsValue(messages.Message):
"""A map of scoped disk lists.
Messages:
AdditionalProperty: An additional property for a ItemsValue object.
Fields:
additionalProperties: Name of the scope containing this set of disks.
"""
class AdditionalProperty(messages.Message):
"""An additional property for a ItemsValue object.
Fields:
key: Name of the additional property.
value: A DisksScopedList attribute.
"""
key = messages.StringField(1)
value = messages.MessageField('DisksScopedList', 2)
additionalProperties = messages.MessageField('AdditionalProperty', 1, repeated=True)
id = messages.StringField(1)
items = messages.MessageField('ItemsValue', 2)
kind = messages.StringField(3, default=u'compute#diskAggregatedList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class DiskList(messages.Message):
"""Contains a list of persistent disk resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The persistent disk resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('Disk', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#diskList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class DiskType(messages.Message):
"""A disk type resource.
Fields:
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
defaultDiskSizeGb: Server defined default disk size in gb (output only).
deprecated: The deprecation status associated with this disk type.
description: An optional textual description of the resource.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
name: Name of the resource.
selfLink: Server defined URL for the resource (output only).
validDiskSize: An optional textual descroption of the valid disk size,
e.g., "10GB-10TB".
zone: Url of the zone where the disk type resides (output only).
"""
creationTimestamp = messages.StringField(1)
defaultDiskSizeGb = messages.IntegerField(2)
deprecated = messages.MessageField('DeprecationStatus', 3)
description = messages.StringField(4)
id = messages.IntegerField(5, variant=messages.Variant.UINT64)
kind = messages.StringField(6, default=u'compute#diskType')
name = messages.StringField(7)
selfLink = messages.StringField(8)
validDiskSize = messages.StringField(9)
zone = messages.StringField(10)
class DiskTypeAggregatedList(messages.Message):
"""A DiskTypeAggregatedList object.
Messages:
ItemsValue: A map of scoped disk type lists.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: A map of scoped disk type lists.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ItemsValue(messages.Message):
"""A map of scoped disk type lists.
Messages:
AdditionalProperty: An additional property for a ItemsValue object.
Fields:
additionalProperties: Name of the scope containing this set of disk
types.
"""
class AdditionalProperty(messages.Message):
"""An additional property for a ItemsValue object.
Fields:
key: Name of the additional property.
value: A DiskTypesScopedList attribute.
"""
key = messages.StringField(1)
value = messages.MessageField('DiskTypesScopedList', 2)
additionalProperties = messages.MessageField('AdditionalProperty', 1, repeated=True)
id = messages.StringField(1)
items = messages.MessageField('ItemsValue', 2)
kind = messages.StringField(3, default=u'compute#diskTypeAggregatedList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class DiskTypeList(messages.Message):
"""Contains a list of disk type resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The disk type resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('DiskType', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#diskTypeList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class DiskTypesScopedList(messages.Message):
"""A DiskTypesScopedList object.
Messages:
WarningValue: Informational warning which replaces the list of disk types
when the list is empty.
Fields:
diskTypes: List of disk types contained in this scope.
warning: Informational warning which replaces the list of disk types when
the list is empty.
"""
class WarningValue(messages.Message):
"""Informational warning which replaces the list of disk types when the
list is empty.
Enums:
CodeValueValuesEnum: The warning type identifier for this warning.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: The warning type identifier for this warning.
data: Metadata for this warning in 'key: value' format.
message: Optional human-readable details for this warning.
"""
class CodeValueValuesEnum(messages.Enum):
"""The warning type identifier for this warning.
Values:
DEPRECATED_RESOURCE_USED: <no description>
DISK_SIZE_LARGER_THAN_IMAGE_SIZE: <no description>
INJECTED_KERNELS_DEPRECATED: <no description>
NEXT_HOP_ADDRESS_NOT_ASSIGNED: <no description>
NEXT_HOP_CANNOT_IP_FORWARD: <no description>
NEXT_HOP_INSTANCE_NOT_FOUND: <no description>
NEXT_HOP_INSTANCE_NOT_ON_NETWORK: <no description>
NEXT_HOP_NOT_RUNNING: <no description>
NO_RESULTS_ON_PAGE: <no description>
REQUIRED_TOS_AGREEMENT: <no description>
RESOURCE_NOT_DELETED: <no description>
UNREACHABLE: <no description>
"""
DEPRECATED_RESOURCE_USED = 0
DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 1
INJECTED_KERNELS_DEPRECATED = 2
NEXT_HOP_ADDRESS_NOT_ASSIGNED = 3
NEXT_HOP_CANNOT_IP_FORWARD = 4
NEXT_HOP_INSTANCE_NOT_FOUND = 5
NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 6
NEXT_HOP_NOT_RUNNING = 7
NO_RESULTS_ON_PAGE = 8
REQUIRED_TOS_AGREEMENT = 9
RESOURCE_NOT_DELETED = 10
UNREACHABLE = 11
class DataValueListEntry(messages.Message):
"""A DataValueListEntry object.
Fields:
key: A key for the warning data.
value: A warning data value corresponding to the key.
"""
key = messages.StringField(1)
value = messages.StringField(2)
code = messages.EnumField('CodeValueValuesEnum', 1)
data = messages.MessageField('DataValueListEntry', 2, repeated=True)
message = messages.StringField(3)
diskTypes = messages.MessageField('DiskType', 1, repeated=True)
warning = messages.MessageField('WarningValue', 2)
class DisksScopedList(messages.Message):
"""A DisksScopedList object.
Messages:
WarningValue: Informational warning which replaces the list of disks when
the list is empty.
Fields:
disks: List of disks contained in this scope.
warning: Informational warning which replaces the list of disks when the
list is empty.
"""
class WarningValue(messages.Message):
"""Informational warning which replaces the list of disks when the list is
empty.
Enums:
CodeValueValuesEnum: The warning type identifier for this warning.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: The warning type identifier for this warning.
data: Metadata for this warning in 'key: value' format.
message: Optional human-readable details for this warning.
"""
class CodeValueValuesEnum(messages.Enum):
"""The warning type identifier for this warning.
Values:
DEPRECATED_RESOURCE_USED: <no description>
DISK_SIZE_LARGER_THAN_IMAGE_SIZE: <no description>
INJECTED_KERNELS_DEPRECATED: <no description>
NEXT_HOP_ADDRESS_NOT_ASSIGNED: <no description>
NEXT_HOP_CANNOT_IP_FORWARD: <no description>
NEXT_HOP_INSTANCE_NOT_FOUND: <no description>
NEXT_HOP_INSTANCE_NOT_ON_NETWORK: <no description>
NEXT_HOP_NOT_RUNNING: <no description>
NO_RESULTS_ON_PAGE: <no description>
REQUIRED_TOS_AGREEMENT: <no description>
RESOURCE_NOT_DELETED: <no description>
UNREACHABLE: <no description>
"""
DEPRECATED_RESOURCE_USED = 0
DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 1
INJECTED_KERNELS_DEPRECATED = 2
NEXT_HOP_ADDRESS_NOT_ASSIGNED = 3
NEXT_HOP_CANNOT_IP_FORWARD = 4
NEXT_HOP_INSTANCE_NOT_FOUND = 5
NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 6
NEXT_HOP_NOT_RUNNING = 7
NO_RESULTS_ON_PAGE = 8
REQUIRED_TOS_AGREEMENT = 9
RESOURCE_NOT_DELETED = 10
UNREACHABLE = 11
class DataValueListEntry(messages.Message):
"""A DataValueListEntry object.
Fields:
key: A key for the warning data.
value: A warning data value corresponding to the key.
"""
key = messages.StringField(1)
value = messages.StringField(2)
code = messages.EnumField('CodeValueValuesEnum', 1)
data = messages.MessageField('DataValueListEntry', 2, repeated=True)
message = messages.StringField(3)
disks = messages.MessageField('Disk', 1, repeated=True)
warning = messages.MessageField('WarningValue', 2)
class Firewall(messages.Message):
"""A firewall resource.
Messages:
AllowedValueListEntry: A AllowedValueListEntry object.
Fields:
allowed: The list of rules specified by this firewall. Each rule specifies
a protocol and port-range tuple that describes a permitted connection.
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the resource; provided by
the client when the resource is created.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
network: URL of the network to which this firewall is applied; provided by
the client when the firewall is created.
selfLink: Server defined URL for the resource (output only).
sourceRanges: A list of IP address blocks expressed in CIDR format which
this rule applies to. One or both of sourceRanges and sourceTags may be
set; an inbound connection is allowed if either the range or the tag of
the source matches.
sourceTags: A list of instance tags which this rule applies to. One or
both of sourceRanges and sourceTags may be set; an inbound connection is
allowed if either the range or the tag of the source matches.
targetTags: A list of instance tags indicating sets of instances located
on network which may make network connections as specified in allowed.
If no targetTags are specified, the firewall rule applies to all
instances on the specified network.
"""
class AllowedValueListEntry(messages.Message):
"""A AllowedValueListEntry object.
Fields:
IPProtocol: Required; this is the IP protocol that is allowed for this
rule. This can either be one of the following well known protocol
strings ["tcp", "udp", "icmp", "esp", "ah", "sctp"], or the IP
protocol number.
ports: An optional list of ports which are allowed. It is an error to
specify this for any protocol that isn't UDP or TCP. Each entry must
be either an integer or a range. If not specified, connections through
any port are allowed. Example inputs include: ["22"], ["80","443"]
and ["12345-12349"].
"""
IPProtocol = messages.StringField(1)
ports = messages.StringField(2, repeated=True)
allowed = messages.MessageField('AllowedValueListEntry', 1, repeated=True)
creationTimestamp = messages.StringField(2)
description = messages.StringField(3)
id = messages.IntegerField(4, variant=messages.Variant.UINT64)
kind = messages.StringField(5, default=u'compute#firewall')
name = messages.StringField(6)
network = messages.StringField(7)
selfLink = messages.StringField(8)
sourceRanges = messages.StringField(9, repeated=True)
sourceTags = messages.StringField(10, repeated=True)
targetTags = messages.StringField(11, repeated=True)
class FirewallList(messages.Message):
"""Contains a list of firewall resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The firewall resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('Firewall', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#firewallList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class ForwardingRule(messages.Message):
"""A ForwardingRule resource. A ForwardingRule resource specifies which pool
of target VMs to forward a packet to if it matches the given [IPAddress,
IPProtocol, portRange] tuple.
Enums:
IPProtocolValueValuesEnum: The IP protocol to which this rule applies,
valid options are 'TCP', 'UDP', 'ESP', 'AH' or 'SCTP'.
Fields:
IPAddress: Value of the reserved IP address that this forwarding rule is
serving on behalf of. For global forwarding rules, the address must be a
global IP; for regional forwarding rules, the address must live in the
same region as the forwarding rule. If left empty (default value), an
ephemeral IP from the same scope (global or regional) will be assigned.
IPProtocol: The IP protocol to which this rule applies, valid options are
'TCP', 'UDP', 'ESP', 'AH' or 'SCTP'.
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the resource; provided by
the client when the resource is created.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
portRange: Applicable only when 'IPProtocol' is 'TCP', 'UDP' or 'SCTP',
only packets addressed to ports in the specified range will be forwarded
to 'target'. If 'portRange' is left empty (default value), all ports are
forwarded. Forwarding rules with the same [IPAddress, IPProtocol] pair
must have disjoint port ranges.
region: URL of the region where the regional forwarding rule resides
(output only). This field is not applicable to global forwarding rules.
selfLink: Server defined URL for the resource (output only).
target: The URL of the target resource to receive the matched traffic. For
regional forwarding rules, this target must live in the same region as
the forwarding rule. For global forwarding rules, this target must be a
global TargetHttpProxy resource.
"""
class IPProtocolValueValuesEnum(messages.Enum):
"""The IP protocol to which this rule applies, valid options are 'TCP',
'UDP', 'ESP', 'AH' or 'SCTP'.
Values:
AH: <no description>
ESP: <no description>
SCTP: <no description>
TCP: <no description>
UDP: <no description>
"""
AH = 0
ESP = 1
SCTP = 2
TCP = 3
UDP = 4
IPAddress = messages.StringField(1)
IPProtocol = messages.EnumField('IPProtocolValueValuesEnum', 2)
creationTimestamp = messages.StringField(3)
description = messages.StringField(4)
id = messages.IntegerField(5, variant=messages.Variant.UINT64)
kind = messages.StringField(6, default=u'compute#forwardingRule')
name = messages.StringField(7)
portRange = messages.StringField(8)
region = messages.StringField(9)
selfLink = messages.StringField(10)
target = messages.StringField(11)
class ForwardingRuleAggregatedList(messages.Message):
"""A ForwardingRuleAggregatedList object.
Messages:
ItemsValue: A map of scoped forwarding rule lists.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: A map of scoped forwarding rule lists.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ItemsValue(messages.Message):
"""A map of scoped forwarding rule lists.
Messages:
AdditionalProperty: An additional property for a ItemsValue object.
Fields:
additionalProperties: Name of the scope containing this set of
addresses.
"""
class AdditionalProperty(messages.Message):
"""An additional property for a ItemsValue object.
Fields:
key: Name of the additional property.
value: A ForwardingRulesScopedList attribute.
"""
key = messages.StringField(1)
value = messages.MessageField('ForwardingRulesScopedList', 2)
additionalProperties = messages.MessageField('AdditionalProperty', 1, repeated=True)
id = messages.StringField(1)
items = messages.MessageField('ItemsValue', 2)
kind = messages.StringField(3, default=u'compute#forwardingRuleAggregatedList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class ForwardingRuleList(messages.Message):
"""Contains a list of ForwardingRule resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The ForwardingRule resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('ForwardingRule', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#forwardingRuleList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class ForwardingRulesScopedList(messages.Message):
"""A ForwardingRulesScopedList object.
Messages:
WarningValue: Informational warning which replaces the list of forwarding
rules when the list is empty.
Fields:
forwardingRules: List of forwarding rules contained in this scope.
warning: Informational warning which replaces the list of forwarding rules
when the list is empty.
"""
class WarningValue(messages.Message):
"""Informational warning which replaces the list of forwarding rules when
the list is empty.
Enums:
CodeValueValuesEnum: The warning type identifier for this warning.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: The warning type identifier for this warning.
data: Metadata for this warning in 'key: value' format.
message: Optional human-readable details for this warning.
"""
class CodeValueValuesEnum(messages.Enum):
"""The warning type identifier for this warning.
Values:
DEPRECATED_RESOURCE_USED: <no description>
DISK_SIZE_LARGER_THAN_IMAGE_SIZE: <no description>
INJECTED_KERNELS_DEPRECATED: <no description>
NEXT_HOP_ADDRESS_NOT_ASSIGNED: <no description>
NEXT_HOP_CANNOT_IP_FORWARD: <no description>
NEXT_HOP_INSTANCE_NOT_FOUND: <no description>
NEXT_HOP_INSTANCE_NOT_ON_NETWORK: <no description>
NEXT_HOP_NOT_RUNNING: <no description>
NO_RESULTS_ON_PAGE: <no description>
REQUIRED_TOS_AGREEMENT: <no description>
RESOURCE_NOT_DELETED: <no description>
UNREACHABLE: <no description>
"""
DEPRECATED_RESOURCE_USED = 0
DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 1
INJECTED_KERNELS_DEPRECATED = 2
NEXT_HOP_ADDRESS_NOT_ASSIGNED = 3
NEXT_HOP_CANNOT_IP_FORWARD = 4
NEXT_HOP_INSTANCE_NOT_FOUND = 5
NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 6
NEXT_HOP_NOT_RUNNING = 7
NO_RESULTS_ON_PAGE = 8
REQUIRED_TOS_AGREEMENT = 9
RESOURCE_NOT_DELETED = 10
UNREACHABLE = 11
class DataValueListEntry(messages.Message):
"""A DataValueListEntry object.
Fields:
key: A key for the warning data.
value: A warning data value corresponding to the key.
"""
key = messages.StringField(1)
value = messages.StringField(2)
code = messages.EnumField('CodeValueValuesEnum', 1)
data = messages.MessageField('DataValueListEntry', 2, repeated=True)
message = messages.StringField(3)
forwardingRules = messages.MessageField('ForwardingRule', 1, repeated=True)
warning = messages.MessageField('WarningValue', 2)
class HealthCheckReference(messages.Message):
"""A HealthCheckReference object.
Fields:
healthCheck: A string attribute.
"""
healthCheck = messages.StringField(1)
class HealthStatus(messages.Message):
"""A HealthStatus object.
Enums:
HealthStateValueValuesEnum: Health state of the instance.
Fields:
healthState: Health state of the instance.
instance: URL of the instance resource.
ipAddress: The IP address represented by this resource.
port: The port on the instance.
"""
class HealthStateValueValuesEnum(messages.Enum):
"""Health state of the instance.
Values:
HEALTHY: <no description>
UNHEALTHY: <no description>
"""
HEALTHY = 0
UNHEALTHY = 1
healthState = messages.EnumField('HealthStateValueValuesEnum', 1)
instance = messages.StringField(2)
ipAddress = messages.StringField(3)
port = messages.IntegerField(4, variant=messages.Variant.INT32)
class HostRule(messages.Message):
"""A host-matching rule for a URL. If matched, will use the named
PathMatcher to select the BackendService.
Fields:
description: A string attribute.
hosts: The list of host patterns to match. They must be valid hostnames
except that they may start with *. or *-. The * acts like a glob and
will match any string of atoms (separated by .s and -s) to the left.
pathMatcher: The name of the PathMatcher to match the path portion of the
URL, if the this HostRule matches the URL's host portion.
"""
description = messages.StringField(1)
hosts = messages.StringField(2, repeated=True)
pathMatcher = messages.StringField(3)
class HttpHealthCheck(messages.Message):
"""An HttpHealthCheck resource. This resource defines a template for how
individual VMs should be checked for health, via HTTP.
Fields:
checkIntervalSec: How often (in seconds) to send a health check. The
default value is 5 seconds.
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the resource; provided by
the client when the resource is created.
healthyThreshold: A so-far unhealthy VM will be marked healthy after this
many consecutive successes. The default value is 2.
host: The value of the host header in the HTTP health check request. If
left empty (default value), the public IP on behalf of which this health
check is performed will be used.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
port: The TCP port number for the HTTP health check request. The default
value is 80.
requestPath: The request path of the HTTP health check request. The
default value is "/".
selfLink: Server defined URL for the resource (output only).
timeoutSec: How long (in seconds) to wait before claiming failure. The
default value is 5 seconds.
unhealthyThreshold: A so-far healthy VM will be marked unhealthy after
this many consecutive failures. The default value is 2.
"""
checkIntervalSec = messages.IntegerField(1, variant=messages.Variant.INT32)
creationTimestamp = messages.StringField(2)
description = messages.StringField(3)
healthyThreshold = messages.IntegerField(4, variant=messages.Variant.INT32)
host = messages.StringField(5)
id = messages.IntegerField(6, variant=messages.Variant.UINT64)
kind = messages.StringField(7, default=u'compute#httpHealthCheck')
name = messages.StringField(8)
port = messages.IntegerField(9, variant=messages.Variant.INT32)
requestPath = messages.StringField(10)
selfLink = messages.StringField(11)
timeoutSec = messages.IntegerField(12, variant=messages.Variant.INT32)
unhealthyThreshold = messages.IntegerField(13, variant=messages.Variant.INT32)
class HttpHealthCheckList(messages.Message):
"""Contains a list of HttpHealthCheck resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The HttpHealthCheck resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('HttpHealthCheck', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#httpHealthCheckList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class Image(messages.Message):
"""A disk image resource.
Enums:
SourceTypeValueValuesEnum: Must be "RAW"; provided by the client when the
disk image is created.
StatusValueValuesEnum: Status of the image (output only). It will be one
of the following READY - after image has been successfully created and
is ready for use FAILED - if creating the image fails for some reason
PENDING - the image creation is in progress An image can be used to
create other resources suck as instances only after the image has been
successfully created and the status is set to READY.
Messages:
RawDiskValue: The raw disk image parameters.
Fields:
archiveSizeBytes: Size of the image tar.gz archive stored in Google Cloud
Storage (in bytes).
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
deprecated: The deprecation status associated with this image.
description: Textual description of the resource; provided by the client
when the resource is created.
diskSizeGb: Size of the image when restored onto a disk (in GiB).
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
licenses: Public visible licenses.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
rawDisk: The raw disk image parameters.
selfLink: Server defined URL for the resource (output only).
sourceDisk: The source disk used to create this image.
sourceDiskId: The 'id' value of the disk used to create this image. This
value may be used to determine whether the image was taken from the
current or a previous instance of a given disk name.
sourceType: Must be "RAW"; provided by the client when the disk image is
created.
status: Status of the image (output only). It will be one of the following
READY - after image has been successfully created and is ready for use
FAILED - if creating the image fails for some reason PENDING - the image
creation is in progress An image can be used to create other resources
suck as instances only after the image has been successfully created and
the status is set to READY.
"""
class SourceTypeValueValuesEnum(messages.Enum):
"""Must be "RAW"; provided by the client when the disk image is created.
Values:
RAW: <no description>
"""
RAW = 0
class StatusValueValuesEnum(messages.Enum):
"""Status of the image (output only). It will be one of the following
READY - after image has been successfully created and is ready for use
FAILED - if creating the image fails for some reason PENDING - the image
creation is in progress An image can be used to create other resources
suck as instances only after the image has been successfully created and
the status is set to READY.
Values:
FAILED: <no description>
PENDING: <no description>
READY: <no description>
"""
FAILED = 0
PENDING = 1
READY = 2
class RawDiskValue(messages.Message):
"""The raw disk image parameters.
Enums:
ContainerTypeValueValuesEnum: The format used to encode and transmit the
block device. Should be TAR. This is just a container and transmission
format and not a runtime format. Provided by the client when the disk
image is created.
Fields:
containerType: The format used to encode and transmit the block device.
Should be TAR. This is just a container and transmission format and
not a runtime format. Provided by the client when the disk image is
created.
sha1Checksum: An optional SHA1 checksum of the disk image before
unpackaging; provided by the client when the disk image is created.
source: The full Google Cloud Storage URL where the disk image is
stored; provided by the client when the disk image is created.
"""
class ContainerTypeValueValuesEnum(messages.Enum):
"""The format used to encode and transmit the block device. Should be
TAR. This is just a container and transmission format and not a runtime
format. Provided by the client when the disk image is created.
Values:
TAR: <no description>
"""
TAR = 0
containerType = messages.EnumField('ContainerTypeValueValuesEnum', 1)
sha1Checksum = messages.StringField(2)
source = messages.StringField(3)
archiveSizeBytes = messages.IntegerField(1)
creationTimestamp = messages.StringField(2)
deprecated = messages.MessageField('DeprecationStatus', 3)
description = messages.StringField(4)
diskSizeGb = messages.IntegerField(5)
id = messages.IntegerField(6, variant=messages.Variant.UINT64)
kind = messages.StringField(7, default=u'compute#image')
licenses = messages.StringField(8, repeated=True)
name = messages.StringField(9)
rawDisk = messages.MessageField('RawDiskValue', 10)
selfLink = messages.StringField(11)
sourceDisk = messages.StringField(12)
sourceDiskId = messages.StringField(13)
sourceType = messages.EnumField('SourceTypeValueValuesEnum', 14, default=u'RAW')
status = messages.EnumField('StatusValueValuesEnum', 15)
class ImageList(messages.Message):
"""Contains a list of disk image resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The disk image resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('Image', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#imageList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class Instance(messages.Message):
"""An instance resource.
Enums:
StatusValueValuesEnum: Instance status. One of the following values:
"PROVISIONING", "STAGING", "RUNNING", "STOPPING", "STOPPED",
"TERMINATED" (output only).
Fields:
canIpForward: Allows this instance to send packets with source IP
addresses other than its own and receive packets with destination IP
addresses other than its own. If this instance will be used as an IP
gateway or it will be set as the next-hop in a Route resource, say true.
If unsure, leave this set to false.
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the resource; provided by
the client when the resource is created.
disks: Array of disks associated with this instance. Persistent disks must
be created before you can assign them.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
machineType: URL of the machine type resource describing which machine
type to use to host the instance; provided by the client when the
instance is created.
metadata: Metadata key/value pairs assigned to this instance. Consists of
custom metadata or predefined keys; see Instance documentation for more
information.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
networkInterfaces: Array of configurations for this interface. This
specifies how this interface is configured to interact with other
network services, such as connecting to the internet. Currently,
ONE_TO_ONE_NAT is the only access config supported. If there are no
accessConfigs specified, then this instance will have no external
internet access.
scheduling: Scheduling options for this instance.
selfLink: Server defined URL for this resource (output only).
serviceAccounts: A list of service accounts each with specified scopes,
for which access tokens are to be made available to the instance through
metadata queries.
status: Instance status. One of the following values: "PROVISIONING",
"STAGING", "RUNNING", "STOPPING", "STOPPED", "TERMINATED" (output only).
statusMessage: An optional, human-readable explanation of the status
(output only).
tags: A list of tags to be applied to this instance. Used to identify
valid sources or targets for network firewalls. Provided by the client
on instance creation. The tags can be later modified by the setTags
method. Each tag within the list must comply with RFC1035.
zone: URL of the zone where the instance resides (output only).
"""
class StatusValueValuesEnum(messages.Enum):
"""Instance status. One of the following values: "PROVISIONING",
"STAGING", "RUNNING", "STOPPING", "STOPPED", "TERMINATED" (output only).
Values:
PROVISIONING: <no description>
RUNNING: <no description>
STAGING: <no description>
STOPPED: <no description>
STOPPING: <no description>
TERMINATED: <no description>
"""
PROVISIONING = 0
RUNNING = 1
STAGING = 2
STOPPED = 3
STOPPING = 4
TERMINATED = 5
canIpForward = messages.BooleanField(1)
creationTimestamp = messages.StringField(2)
description = messages.StringField(3)
disks = messages.MessageField('AttachedDisk', 4, repeated=True)
id = messages.IntegerField(5, variant=messages.Variant.UINT64)
kind = messages.StringField(6, default=u'compute#instance')
machineType = messages.StringField(7)
metadata = messages.MessageField('Metadata', 8)
name = messages.StringField(9)
networkInterfaces = messages.MessageField('NetworkInterface', 10, repeated=True)
scheduling = messages.MessageField('Scheduling', 11)
selfLink = messages.StringField(12)
serviceAccounts = messages.MessageField('ServiceAccount', 13, repeated=True)
status = messages.EnumField('StatusValueValuesEnum', 14)
statusMessage = messages.StringField(15)
tags = messages.MessageField('Tags', 16)
zone = messages.StringField(17)
class InstanceAggregatedList(messages.Message):
"""A InstanceAggregatedList object.
Messages:
ItemsValue: A map of scoped instance lists.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: A map of scoped instance lists.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ItemsValue(messages.Message):
"""A map of scoped instance lists.
Messages:
AdditionalProperty: An additional property for a ItemsValue object.
Fields:
additionalProperties: Name of the scope containing this set of
instances.
"""
class AdditionalProperty(messages.Message):
"""An additional property for a ItemsValue object.
Fields:
key: Name of the additional property.
value: A InstancesScopedList attribute.
"""
key = messages.StringField(1)
value = messages.MessageField('InstancesScopedList', 2)
additionalProperties = messages.MessageField('AdditionalProperty', 1, repeated=True)
id = messages.StringField(1)
items = messages.MessageField('ItemsValue', 2)
kind = messages.StringField(3, default=u'compute#instanceAggregatedList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class InstanceList(messages.Message):
"""Contains a list of instance resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: A list of instance resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('Instance', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#instanceList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class InstanceProperties(messages.Message):
"""InstanceProperties message type.
Fields:
canIpForward: Allows instances created based on this template to send
packets with source IP addresses other than their own and receive
packets with destination IP addresses other than their own. If these
instances will be used as an IP gateway or it will be set as the next-
hop in a Route resource, say true. If unsure, leave this set to false.
description: An optional textual description for the instances created
based on the instance template resource; provided by the client when the
template is created.
disks: Array of disks associated with instance created based on this
template.
machineType: Name of the machine type resource describing which machine
type to use to host the instances created based on this template;
provided by the client when the instance template is created.
metadata: Metadata key/value pairs assigned to instances created based on
this template. Consists of custom metadata or predefined keys; see
Instance documentation for more information.
networkInterfaces: Array of configurations for this interface. This
specifies how this interface is configured to interact with other
network services, such as connecting to the internet. Currently,
ONE_TO_ONE_NAT is the only access config supported. If there are no
accessConfigs specified, then this instances created based based on this
template will have no external internet access.
scheduling: Scheduling options for the instances created based on this
template.
serviceAccounts: A list of service accounts each with specified scopes,
for which access tokens are to be made available to the instances
created based on this template, through metadata queries.
tags: A list of tags to be applied to the instances created based on this
template used to identify valid sources or targets for network
firewalls. Provided by the client on instance creation. The tags can be
later modified by the setTags method. Each tag within the list must
comply with RFC1035.
"""
canIpForward = messages.BooleanField(1)
description = messages.StringField(2)
disks = messages.MessageField('AttachedDisk', 3, repeated=True)
machineType = messages.StringField(4)
metadata = messages.MessageField('Metadata', 5)
networkInterfaces = messages.MessageField('NetworkInterface', 6, repeated=True)
scheduling = messages.MessageField('Scheduling', 7)
serviceAccounts = messages.MessageField('ServiceAccount', 8, repeated=True)
tags = messages.MessageField('Tags', 9)
class InstanceReference(messages.Message):
"""A InstanceReference object.
Fields:
instance: A string attribute.
"""
instance = messages.StringField(1)
class InstanceTemplate(messages.Message):
"""An Instance Template resource.
Fields:
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the instance template
resource; provided by the client when the resource is created.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
name: Name of the instance template resource; provided by the client when
the resource is created. The name must be 1-63 characters long, and
comply with RFC1035
properties: The instance properties portion of this instance template
resource.
selfLink: Server defined URL for the resource (output only).
"""
creationTimestamp = messages.StringField(1)
description = messages.StringField(2)
id = messages.IntegerField(3, variant=messages.Variant.UINT64)
kind = messages.StringField(4, default=u'compute#instanceTemplate')
name = messages.StringField(5)
properties = messages.MessageField('InstanceProperties', 6)
selfLink = messages.StringField(7)
class InstanceTemplateList(messages.Message):
"""Contains a list of instance template resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: A list of instance template resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('InstanceTemplate', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#instanceTemplateList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class InstancesScopedList(messages.Message):
"""A InstancesScopedList object.
Messages:
WarningValue: Informational warning which replaces the list of instances
when the list is empty.
Fields:
instances: List of instances contained in this scope.
warning: Informational warning which replaces the list of instances when
the list is empty.
"""
class WarningValue(messages.Message):
"""Informational warning which replaces the list of instances when the
list is empty.
Enums:
CodeValueValuesEnum: The warning type identifier for this warning.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: The warning type identifier for this warning.
data: Metadata for this warning in 'key: value' format.
message: Optional human-readable details for this warning.
"""
class CodeValueValuesEnum(messages.Enum):
"""The warning type identifier for this warning.
Values:
DEPRECATED_RESOURCE_USED: <no description>
DISK_SIZE_LARGER_THAN_IMAGE_SIZE: <no description>
INJECTED_KERNELS_DEPRECATED: <no description>
NEXT_HOP_ADDRESS_NOT_ASSIGNED: <no description>
NEXT_HOP_CANNOT_IP_FORWARD: <no description>
NEXT_HOP_INSTANCE_NOT_FOUND: <no description>
NEXT_HOP_INSTANCE_NOT_ON_NETWORK: <no description>
NEXT_HOP_NOT_RUNNING: <no description>
NO_RESULTS_ON_PAGE: <no description>
REQUIRED_TOS_AGREEMENT: <no description>
RESOURCE_NOT_DELETED: <no description>
UNREACHABLE: <no description>
"""
DEPRECATED_RESOURCE_USED = 0
DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 1
INJECTED_KERNELS_DEPRECATED = 2
NEXT_HOP_ADDRESS_NOT_ASSIGNED = 3
NEXT_HOP_CANNOT_IP_FORWARD = 4
NEXT_HOP_INSTANCE_NOT_FOUND = 5
NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 6
NEXT_HOP_NOT_RUNNING = 7
NO_RESULTS_ON_PAGE = 8
REQUIRED_TOS_AGREEMENT = 9
RESOURCE_NOT_DELETED = 10
UNREACHABLE = 11
class DataValueListEntry(messages.Message):
"""A DataValueListEntry object.
Fields:
key: A key for the warning data.
value: A warning data value corresponding to the key.
"""
key = messages.StringField(1)
value = messages.StringField(2)
code = messages.EnumField('CodeValueValuesEnum', 1)
data = messages.MessageField('DataValueListEntry', 2, repeated=True)
message = messages.StringField(3)
instances = messages.MessageField('Instance', 1, repeated=True)
warning = messages.MessageField('WarningValue', 2)
class License(messages.Message):
"""A license resource.
Fields:
chargesUseFee: If true, the customer will be charged license fee for
running software that contains this license on an instance.
kind: Type of resource.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
selfLink: Server defined URL for the resource (output only).
"""
chargesUseFee = messages.BooleanField(1)
kind = messages.StringField(2, default=u'compute#license')
name = messages.StringField(3)
selfLink = messages.StringField(4)
class MachineType(messages.Message):
"""A machine type resource.
Messages:
ScratchDisksValueListEntry: A ScratchDisksValueListEntry object.
Fields:
creationTimestamp: [Output Only] Creation timestamp in RFC3339 text
format.
deprecated: The deprecation status associated with this machine type.
description: An optional textual description of the resource.
guestCpus: Count of CPUs exposed to the instance.
id: [Output Only] Unique identifier for the resource; defined by the
server.
imageSpaceGb: Space allotted for the image, defined in GB.
kind: Type of the resource.
maximumPersistentDisks: Maximum persistent disks allowed.
maximumPersistentDisksSizeGb: Maximum total persistent disks size (GB)
allowed.
memoryMb: Physical memory assigned to the instance, defined in MB.
name: Name of the resource.
scratchDisks: List of extended scratch disks assigned to the instance.
selfLink: [Output Only] Server defined URL for the resource.
zone: [Output Only] The name of the zone where the machine type resides,
such as us-central1-a.
"""
class ScratchDisksValueListEntry(messages.Message):
"""A ScratchDisksValueListEntry object.
Fields:
diskGb: Size of the scratch disk, defined in GB.
"""
diskGb = messages.IntegerField(1, variant=messages.Variant.INT32)
creationTimestamp = messages.StringField(1)
deprecated = messages.MessageField('DeprecationStatus', 2)
description = messages.StringField(3)
guestCpus = messages.IntegerField(4, variant=messages.Variant.INT32)
id = messages.IntegerField(5, variant=messages.Variant.UINT64)
imageSpaceGb = messages.IntegerField(6, variant=messages.Variant.INT32)
kind = messages.StringField(7, default=u'compute#machineType')
maximumPersistentDisks = messages.IntegerField(8, variant=messages.Variant.INT32)
maximumPersistentDisksSizeGb = messages.IntegerField(9)
memoryMb = messages.IntegerField(10, variant=messages.Variant.INT32)
name = messages.StringField(11)
scratchDisks = messages.MessageField('ScratchDisksValueListEntry', 12, repeated=True)
selfLink = messages.StringField(13)
zone = messages.StringField(14)
class MachineTypeAggregatedList(messages.Message):
"""A MachineTypeAggregatedList object.
Messages:
ItemsValue: A map of scoped machine type lists.
Fields:
id: [Output Only] Unique identifier for the resource; defined by the
server.
items: A map of scoped machine type lists.
kind: Type of resource.
nextPageToken: [Output Only] A token used to continue a truncated list
request.
selfLink: Server defined URL for this resource (output only).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ItemsValue(messages.Message):
"""A map of scoped machine type lists.
Messages:
AdditionalProperty: An additional property for a ItemsValue object.
Fields:
additionalProperties: Name of the scope containing this set of machine
types.
"""
class AdditionalProperty(messages.Message):
"""An additional property for a ItemsValue object.
Fields:
key: Name of the additional property.
value: A MachineTypesScopedList attribute.
"""
key = messages.StringField(1)
value = messages.MessageField('MachineTypesScopedList', 2)
additionalProperties = messages.MessageField('AdditionalProperty', 1, repeated=True)
id = messages.StringField(1)
items = messages.MessageField('ItemsValue', 2)
kind = messages.StringField(3, default=u'compute#machineTypeAggregatedList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class MachineTypeList(messages.Message):
"""Contains a list of machine type resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The machine type resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('MachineType', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#machineTypeList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class MachineTypesScopedList(messages.Message):
"""A MachineTypesScopedList object.
Messages:
WarningValue: An informational warning that appears when the machine types
list is empty.
Fields:
machineTypes: List of machine types contained in this scope.
warning: An informational warning that appears when the machine types list
is empty.
"""
class WarningValue(messages.Message):
"""An informational warning that appears when the machine types list is
empty.
Enums:
CodeValueValuesEnum: The warning type identifier for this warning.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: The warning type identifier for this warning.
data: Metadata for this warning in 'key: value' format.
message: Optional human-readable details for this warning.
"""
class CodeValueValuesEnum(messages.Enum):
"""The warning type identifier for this warning.
Values:
DEPRECATED_RESOURCE_USED: <no description>
DISK_SIZE_LARGER_THAN_IMAGE_SIZE: <no description>
INJECTED_KERNELS_DEPRECATED: <no description>
NEXT_HOP_ADDRESS_NOT_ASSIGNED: <no description>
NEXT_HOP_CANNOT_IP_FORWARD: <no description>
NEXT_HOP_INSTANCE_NOT_FOUND: <no description>
NEXT_HOP_INSTANCE_NOT_ON_NETWORK: <no description>
NEXT_HOP_NOT_RUNNING: <no description>
NO_RESULTS_ON_PAGE: <no description>
REQUIRED_TOS_AGREEMENT: <no description>
RESOURCE_NOT_DELETED: <no description>
UNREACHABLE: <no description>
"""
DEPRECATED_RESOURCE_USED = 0
DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 1
INJECTED_KERNELS_DEPRECATED = 2
NEXT_HOP_ADDRESS_NOT_ASSIGNED = 3
NEXT_HOP_CANNOT_IP_FORWARD = 4
NEXT_HOP_INSTANCE_NOT_FOUND = 5
NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 6
NEXT_HOP_NOT_RUNNING = 7
NO_RESULTS_ON_PAGE = 8
REQUIRED_TOS_AGREEMENT = 9
RESOURCE_NOT_DELETED = 10
UNREACHABLE = 11
class DataValueListEntry(messages.Message):
"""A DataValueListEntry object.
Fields:
key: A key for the warning data.
value: A warning data value corresponding to the key.
"""
key = messages.StringField(1)
value = messages.StringField(2)
code = messages.EnumField('CodeValueValuesEnum', 1)
data = messages.MessageField('DataValueListEntry', 2, repeated=True)
message = messages.StringField(3)
machineTypes = messages.MessageField('MachineType', 1, repeated=True)
warning = messages.MessageField('WarningValue', 2)
class Metadata(messages.Message):
"""A metadata key/value entry.
Messages:
ItemsValueListEntry: A ItemsValueListEntry object.
Fields:
fingerprint: Fingerprint of this resource. A hash of the metadata's
contents. This field is used for optimistic locking. An up-to-date
metadata fingerprint must be provided in order to modify metadata.
items: Array of key/value pairs. The total size of all keys and values
must be less than 512 KB.
kind: Type of the resource.
"""
class ItemsValueListEntry(messages.Message):
"""A ItemsValueListEntry object.
Fields:
key: Key for the metadata entry. Keys must conform to the following
regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is
reflected as part of a URL in the metadata server. Additionally, to
avoid ambiguity, keys must not conflict with any other metadata keys
for the project.
value: Value for the metadata entry. These are free-form strings, and
only have meaning as interpreted by the image running in the instance.
The only restriction placed on values is that their size must be less
than or equal to 32768 bytes.
"""
key = messages.StringField(1)
value = messages.StringField(2)
fingerprint = messages.BytesField(1)
items = messages.MessageField('ItemsValueListEntry', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#metadata')
class Network(messages.Message):
"""A network resource.
Fields:
IPv4Range: Required; The range of internal addresses that are legal on
this network. This range is a CIDR specification, for example:
192.168.0.0/16. Provided by the client when the network is created.
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the resource; provided by
the client when the resource is created.
gatewayIPv4: An optional address that is used for default routing to other
networks. This must be within the range specified by IPv4Range, and is
typically the first usable address in that range. If not specified, the
default value is the first usable address in IPv4Range.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
selfLink: Server defined URL for the resource (output only).
"""
IPv4Range = messages.StringField(1)
creationTimestamp = messages.StringField(2)
description = messages.StringField(3)
gatewayIPv4 = messages.StringField(4)
id = messages.IntegerField(5, variant=messages.Variant.UINT64)
kind = messages.StringField(6, default=u'compute#network')
name = messages.StringField(7)
selfLink = messages.StringField(8)
class NetworkInterface(messages.Message):
"""A network interface resource attached to an instance.
Fields:
accessConfigs: Array of configurations for this interface. This specifies
how this interface is configured to interact with other network
services, such as connecting to the internet. Currently, ONE_TO_ONE_NAT
is the only access config supported. If there are no accessConfigs
specified, then this instance will have no external internet access.
name: Name of the network interface, determined by the server; for network
devices, these are e.g. eth0, eth1, etc. (output only).
network: URL of the network resource attached to this interface.
networkIP: An optional IPV4 internal network address assigned to the
instance for this network interface (output only).
"""
accessConfigs = messages.MessageField('AccessConfig', 1, repeated=True)
name = messages.StringField(2)
network = messages.StringField(3)
networkIP = messages.StringField(4)
class NetworkList(messages.Message):
"""Contains a list of network resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The network resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('Network', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#networkList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class Operation(messages.Message):
"""An operation resource, used to manage asynchronous API requests.
Enums:
StatusValueValuesEnum: Status of the operation. Can be one of the
following: "PENDING", "RUNNING", or "DONE" (output only).
Messages:
ErrorValue: If errors occurred during processing of this operation, this
field will be populated (output only).
WarningsValueListEntry: A WarningsValueListEntry object.
Fields:
clientOperationId: An optional identifier specified by the client when the
mutation was initiated. Must be unique for all operation resources in
the project (output only).
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
endTime: The time that this operation was completed. This is in RFC 3339
format (output only).
error: If errors occurred during processing of this operation, this field
will be populated (output only).
httpErrorMessage: If operation fails, the HTTP error message returned,
e.g. NOT FOUND. (output only).
httpErrorStatusCode: If operation fails, the HTTP error status code
returned, e.g. 404. (output only).
id: Unique identifier for the resource; defined by the server (output
only).
insertTime: The time that this operation was requested. This is in RFC
3339 format (output only).
kind: Type of the resource.
name: Name of the resource (output only).
operationType: Type of the operation. Examples include "insert", "update",
and "delete" (output only).
progress: An optional progress indicator that ranges from 0 to 100. There
is no requirement that this be linear or support any granularity of
operations. This should not be used to guess at when the operation will
be complete. This number should be monotonically increasing as the
operation progresses (output only).
region: URL of the region where the operation resides (output only).
selfLink: Server defined URL for the resource (output only).
startTime: The time that this operation was started by the server. This is
in RFC 3339 format (output only).
status: Status of the operation. Can be one of the following: "PENDING",
"RUNNING", or "DONE" (output only).
statusMessage: An optional textual description of the current status of
the operation (output only).
targetId: Unique target id which identifies a particular incarnation of
the target (output only).
targetLink: URL of the resource the operation is mutating (output only).
user: User who requested the operation, for example "user@example.com"
(output only).
warnings: If warning messages generated during processing of this
operation, this field will be populated (output only).
zone: URL of the zone where the operation resides (output only).
"""
class StatusValueValuesEnum(messages.Enum):
"""Status of the operation. Can be one of the following: "PENDING",
"RUNNING", or "DONE" (output only).
Values:
DONE: <no description>
PENDING: <no description>
RUNNING: <no description>
"""
DONE = 0
PENDING = 1
RUNNING = 2
class ErrorValue(messages.Message):
"""If errors occurred during processing of this operation, this field will
be populated (output only).
Messages:
ErrorsValueListEntry: A ErrorsValueListEntry object.
Fields:
errors: The array of errors encountered while processing this operation.
"""
class ErrorsValueListEntry(messages.Message):
"""A ErrorsValueListEntry object.
Fields:
code: The error type identifier for this error.
location: Indicates the field in the request which caused the error.
This property is optional.
message: An optional, human-readable error message.
"""
code = messages.StringField(1)
location = messages.StringField(2)
message = messages.StringField(3)
errors = messages.MessageField('ErrorsValueListEntry', 1, repeated=True)
class WarningsValueListEntry(messages.Message):
"""A WarningsValueListEntry object.
Enums:
CodeValueValuesEnum: The warning type identifier for this warning.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: The warning type identifier for this warning.
data: Metadata for this warning in 'key: value' format.
message: Optional human-readable details for this warning.
"""
class CodeValueValuesEnum(messages.Enum):
"""The warning type identifier for this warning.
Values:
DEPRECATED_RESOURCE_USED: <no description>
DISK_SIZE_LARGER_THAN_IMAGE_SIZE: <no description>
INJECTED_KERNELS_DEPRECATED: <no description>
NEXT_HOP_ADDRESS_NOT_ASSIGNED: <no description>
NEXT_HOP_CANNOT_IP_FORWARD: <no description>
NEXT_HOP_INSTANCE_NOT_FOUND: <no description>
NEXT_HOP_INSTANCE_NOT_ON_NETWORK: <no description>
NEXT_HOP_NOT_RUNNING: <no description>
NO_RESULTS_ON_PAGE: <no description>
REQUIRED_TOS_AGREEMENT: <no description>
RESOURCE_NOT_DELETED: <no description>
UNREACHABLE: <no description>
"""
DEPRECATED_RESOURCE_USED = 0
DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 1
INJECTED_KERNELS_DEPRECATED = 2
NEXT_HOP_ADDRESS_NOT_ASSIGNED = 3
NEXT_HOP_CANNOT_IP_FORWARD = 4
NEXT_HOP_INSTANCE_NOT_FOUND = 5
NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 6
NEXT_HOP_NOT_RUNNING = 7
NO_RESULTS_ON_PAGE = 8
REQUIRED_TOS_AGREEMENT = 9
RESOURCE_NOT_DELETED = 10
UNREACHABLE = 11
class DataValueListEntry(messages.Message):
"""A DataValueListEntry object.
Fields:
key: A key for the warning data.
value: A warning data value corresponding to the key.
"""
key = messages.StringField(1)
value = messages.StringField(2)
code = messages.EnumField('CodeValueValuesEnum', 1)
data = messages.MessageField('DataValueListEntry', 2, repeated=True)
message = messages.StringField(3)
clientOperationId = messages.StringField(1)
creationTimestamp = messages.StringField(2)
endTime = messages.StringField(3)
error = messages.MessageField('ErrorValue', 4)
httpErrorMessage = messages.StringField(5)
httpErrorStatusCode = messages.IntegerField(6, variant=messages.Variant.INT32)
id = messages.IntegerField(7, variant=messages.Variant.UINT64)
insertTime = messages.StringField(8)
kind = messages.StringField(9, default=u'compute#operation')
name = messages.StringField(10)
operationType = messages.StringField(11)
progress = messages.IntegerField(12, variant=messages.Variant.INT32)
region = messages.StringField(13)
selfLink = messages.StringField(14)
startTime = messages.StringField(15)
status = messages.EnumField('StatusValueValuesEnum', 16)
statusMessage = messages.StringField(17)
targetId = messages.IntegerField(18, variant=messages.Variant.UINT64)
targetLink = messages.StringField(19)
user = messages.StringField(20)
warnings = messages.MessageField('WarningsValueListEntry', 21, repeated=True)
zone = messages.StringField(22)
class OperationAggregatedList(messages.Message):
"""A OperationAggregatedList object.
Messages:
ItemsValue: A map of scoped operation lists.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: A map of scoped operation lists.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ItemsValue(messages.Message):
"""A map of scoped operation lists.
Messages:
AdditionalProperty: An additional property for a ItemsValue object.
Fields:
additionalProperties: Name of the scope containing this set of
operations.
"""
class AdditionalProperty(messages.Message):
"""An additional property for a ItemsValue object.
Fields:
key: Name of the additional property.
value: A OperationsScopedList attribute.
"""
key = messages.StringField(1)
value = messages.MessageField('OperationsScopedList', 2)
additionalProperties = messages.MessageField('AdditionalProperty', 1, repeated=True)
id = messages.StringField(1)
items = messages.MessageField('ItemsValue', 2)
kind = messages.StringField(3, default=u'compute#operationAggregatedList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class OperationList(messages.Message):
"""Contains a list of operation resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The operation resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('Operation', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#operationList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class OperationsScopedList(messages.Message):
"""A OperationsScopedList object.
Messages:
WarningValue: Informational warning which replaces the list of operations
when the list is empty.
Fields:
operations: List of operations contained in this scope.
warning: Informational warning which replaces the list of operations when
the list is empty.
"""
class WarningValue(messages.Message):
"""Informational warning which replaces the list of operations when the
list is empty.
Enums:
CodeValueValuesEnum: The warning type identifier for this warning.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: The warning type identifier for this warning.
data: Metadata for this warning in 'key: value' format.
message: Optional human-readable details for this warning.
"""
class CodeValueValuesEnum(messages.Enum):
"""The warning type identifier for this warning.
Values:
DEPRECATED_RESOURCE_USED: <no description>
DISK_SIZE_LARGER_THAN_IMAGE_SIZE: <no description>
INJECTED_KERNELS_DEPRECATED: <no description>
NEXT_HOP_ADDRESS_NOT_ASSIGNED: <no description>
NEXT_HOP_CANNOT_IP_FORWARD: <no description>
NEXT_HOP_INSTANCE_NOT_FOUND: <no description>
NEXT_HOP_INSTANCE_NOT_ON_NETWORK: <no description>
NEXT_HOP_NOT_RUNNING: <no description>
NO_RESULTS_ON_PAGE: <no description>
REQUIRED_TOS_AGREEMENT: <no description>
RESOURCE_NOT_DELETED: <no description>
UNREACHABLE: <no description>
"""
DEPRECATED_RESOURCE_USED = 0
DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 1
INJECTED_KERNELS_DEPRECATED = 2
NEXT_HOP_ADDRESS_NOT_ASSIGNED = 3
NEXT_HOP_CANNOT_IP_FORWARD = 4
NEXT_HOP_INSTANCE_NOT_FOUND = 5
NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 6
NEXT_HOP_NOT_RUNNING = 7
NO_RESULTS_ON_PAGE = 8
REQUIRED_TOS_AGREEMENT = 9
RESOURCE_NOT_DELETED = 10
UNREACHABLE = 11
class DataValueListEntry(messages.Message):
"""A DataValueListEntry object.
Fields:
key: A key for the warning data.
value: A warning data value corresponding to the key.
"""
key = messages.StringField(1)
value = messages.StringField(2)
code = messages.EnumField('CodeValueValuesEnum', 1)
data = messages.MessageField('DataValueListEntry', 2, repeated=True)
message = messages.StringField(3)
operations = messages.MessageField('Operation', 1, repeated=True)
warning = messages.MessageField('WarningValue', 2)
class PathMatcher(messages.Message):
"""A matcher for the path portion of the URL. The BackendService from the
longest-matched rule will serve the URL. If no rule was matched, the
default_service will be used.
Fields:
defaultService: The URL to the BackendService resource. This will be used
if none of the 'pathRules' defined by this PathMatcher is met by the
URL's path portion.
description: A string attribute.
name: The name to which this PathMatcher is referred by the HostRule.
pathRules: The list of path rules.
"""
defaultService = messages.StringField(1)
description = messages.StringField(2)
name = messages.StringField(3)
pathRules = messages.MessageField('PathRule', 4, repeated=True)
class PathRule(messages.Message):
"""A path-matching rule for a URL. If matched, will use the specified
BackendService to handle the traffic arriving at this URL.
Fields:
paths: The list of path patterns to match. Each must start with / and the
only place a * is allowed is at the end following a /. The string fed to
the path matcher does not include any text after the first ? or #, and
those chars are not allowed here.
service: The URL of the BackendService resource if this rule is matched.
"""
paths = messages.StringField(1, repeated=True)
service = messages.StringField(2)
class Project(messages.Message):
"""A project resource. Projects can be created only in the APIs Console.
Unless marked otherwise, values can only be modified in the console.
Fields:
commonInstanceMetadata: Metadata key/value pairs available to all
instances contained in this project.
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the resource.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
name: Name of the resource.
quotas: Quotas assigned to this project.
selfLink: Server defined URL for the resource (output only).
usageExportLocation: The location in Cloud Storage and naming method of
the daily usage report.
"""
commonInstanceMetadata = messages.MessageField('Metadata', 1)
creationTimestamp = messages.StringField(2)
description = messages.StringField(3)
id = messages.IntegerField(4, variant=messages.Variant.UINT64)
kind = messages.StringField(5, default=u'compute#project')
name = messages.StringField(6)
quotas = messages.MessageField('Quota', 7, repeated=True)
selfLink = messages.StringField(8)
usageExportLocation = messages.MessageField('UsageExportLocation', 9)
class Quota(messages.Message):
"""A quotas entry.
Enums:
MetricValueValuesEnum: Name of the quota metric.
Fields:
limit: Quota limit for this metric.
metric: Name of the quota metric.
usage: Current usage of this metric.
"""
class MetricValueValuesEnum(messages.Enum):
"""Name of the quota metric.
Values:
BACKEND_SERVICES: <no description>
CPUS: <no description>
DISKS: <no description>
DISKS_TOTAL_GB: <no description>
EPHEMERAL_ADDRESSES: <no description>
FIREWALLS: <no description>
FORWARDING_RULES: <no description>
HEALTH_CHECKS: <no description>
IMAGES: <no description>
IMAGES_TOTAL_GB: <no description>
INSTANCES: <no description>
IN_USE_ADDRESSES: <no description>
KERNELS: <no description>
KERNELS_TOTAL_GB: <no description>
LOCAL_SSD_TOTAL_GB: <no description>
NETWORKS: <no description>
OPERATIONS: <no description>
ROUTES: <no description>
SNAPSHOTS: <no description>
SSD_TOTAL_GB: <no description>
STATIC_ADDRESSES: <no description>
TARGET_HTTP_PROXIES: <no description>
TARGET_INSTANCES: <no description>
TARGET_POOLS: <no description>
URL_MAPS: <no description>
"""
BACKEND_SERVICES = 0
CPUS = 1
DISKS = 2
DISKS_TOTAL_GB = 3
EPHEMERAL_ADDRESSES = 4
FIREWALLS = 5
FORWARDING_RULES = 6
HEALTH_CHECKS = 7
IMAGES = 8
IMAGES_TOTAL_GB = 9
INSTANCES = 10
IN_USE_ADDRESSES = 11
KERNELS = 12
KERNELS_TOTAL_GB = 13
LOCAL_SSD_TOTAL_GB = 14
NETWORKS = 15
OPERATIONS = 16
ROUTES = 17
SNAPSHOTS = 18
SSD_TOTAL_GB = 19
STATIC_ADDRESSES = 20
TARGET_HTTP_PROXIES = 21
TARGET_INSTANCES = 22
TARGET_POOLS = 23
URL_MAPS = 24
limit = messages.FloatField(1)
metric = messages.EnumField('MetricValueValuesEnum', 2)
usage = messages.FloatField(3)
class Region(messages.Message):
"""Region resource.
Enums:
StatusValueValuesEnum: Status of the region, "UP" or "DOWN".
Fields:
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
deprecated: The deprecation status associated with this region.
description: Textual description of the resource.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
name: Name of the resource.
quotas: Quotas assigned to this region.
selfLink: Server defined URL for the resource (output only).
status: Status of the region, "UP" or "DOWN".
zones: A list of zones homed in this region, in the form of resource URLs.
"""
class StatusValueValuesEnum(messages.Enum):
"""Status of the region, "UP" or "DOWN".
Values:
DOWN: <no description>
UP: <no description>
"""
DOWN = 0
UP = 1
creationTimestamp = messages.StringField(1)
deprecated = messages.MessageField('DeprecationStatus', 2)
description = messages.StringField(3)
id = messages.IntegerField(4, variant=messages.Variant.UINT64)
kind = messages.StringField(5, default=u'compute#region')
name = messages.StringField(6)
quotas = messages.MessageField('Quota', 7, repeated=True)
selfLink = messages.StringField(8)
status = messages.EnumField('StatusValueValuesEnum', 9)
zones = messages.StringField(10, repeated=True)
class RegionList(messages.Message):
"""Contains a list of region resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The region resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('Region', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#regionList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class ResourceGroupReference(messages.Message):
"""A ResourceGroupReference object.
Fields:
group: A URI referencing one of the resource views listed in the backend
service.
"""
group = messages.StringField(1)
class Route(messages.Message):
"""The route resource. A Route is a rule that specifies how certain packets
should be handled by the virtual network. Routes are associated with VMs by
tag and the set of Routes for a particular VM is called its routing table.
For each packet leaving a VM, the system searches that VM's routing table
for a single best matching Route. Routes match packets by destination IP
address, preferring smaller or more specific ranges over larger ones. If
there is a tie, the system selects the Route with the smallest priority
value. If there is still a tie, it uses the layer three and four packet
headers to select just one of the remaining matching Routes. The packet is
then forwarded as specified by the next_hop field of the winning Route --
either to another VM destination, a VM gateway or a GCE operated gateway.
Packets that do not match any Route in the sending VM's routing table will
be dropped.
Messages:
WarningsValueListEntry: A WarningsValueListEntry object.
Fields:
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the resource; provided by
the client when the resource is created.
destRange: Which packets does this route apply to?
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
network: URL of the network to which this route is applied; provided by
the client when the route is created.
nextHopGateway: The URL to a gateway that should handle matching packets.
nextHopInstance: The URL to an instance that should handle matching
packets.
nextHopIp: The network IP address of an instance that should handle
matching packets.
nextHopNetwork: The URL of the local network if it should handle matching
packets.
priority: Breaks ties between Routes of equal specificity. Routes with
smaller values win when tied with routes with larger values.
selfLink: Server defined URL for the resource (output only).
tags: A list of instance tags to which this route applies.
warnings: If potential misconfigurations are detected for this route, this
field will be populated with warning messages.
"""
class WarningsValueListEntry(messages.Message):
"""A WarningsValueListEntry object.
Enums:
CodeValueValuesEnum: The warning type identifier for this warning.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: The warning type identifier for this warning.
data: Metadata for this warning in 'key: value' format.
message: Optional human-readable details for this warning.
"""
class CodeValueValuesEnum(messages.Enum):
"""The warning type identifier for this warning.
Values:
DEPRECATED_RESOURCE_USED: <no description>
DISK_SIZE_LARGER_THAN_IMAGE_SIZE: <no description>
INJECTED_KERNELS_DEPRECATED: <no description>
NEXT_HOP_ADDRESS_NOT_ASSIGNED: <no description>
NEXT_HOP_CANNOT_IP_FORWARD: <no description>
NEXT_HOP_INSTANCE_NOT_FOUND: <no description>
NEXT_HOP_INSTANCE_NOT_ON_NETWORK: <no description>
NEXT_HOP_NOT_RUNNING: <no description>
NO_RESULTS_ON_PAGE: <no description>
REQUIRED_TOS_AGREEMENT: <no description>
RESOURCE_NOT_DELETED: <no description>
UNREACHABLE: <no description>
"""
DEPRECATED_RESOURCE_USED = 0
DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 1
INJECTED_KERNELS_DEPRECATED = 2
NEXT_HOP_ADDRESS_NOT_ASSIGNED = 3
NEXT_HOP_CANNOT_IP_FORWARD = 4
NEXT_HOP_INSTANCE_NOT_FOUND = 5
NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 6
NEXT_HOP_NOT_RUNNING = 7
NO_RESULTS_ON_PAGE = 8
REQUIRED_TOS_AGREEMENT = 9
RESOURCE_NOT_DELETED = 10
UNREACHABLE = 11
class DataValueListEntry(messages.Message):
"""A DataValueListEntry object.
Fields:
key: A key for the warning data.
value: A warning data value corresponding to the key.
"""
key = messages.StringField(1)
value = messages.StringField(2)
code = messages.EnumField('CodeValueValuesEnum', 1)
data = messages.MessageField('DataValueListEntry', 2, repeated=True)
message = messages.StringField(3)
creationTimestamp = messages.StringField(1)
description = messages.StringField(2)
destRange = messages.StringField(3)
id = messages.IntegerField(4, variant=messages.Variant.UINT64)
kind = messages.StringField(5, default=u'compute#route')
name = messages.StringField(6)
network = messages.StringField(7)
nextHopGateway = messages.StringField(8)
nextHopInstance = messages.StringField(9)
nextHopIp = messages.StringField(10)
nextHopNetwork = messages.StringField(11)
priority = messages.IntegerField(12, variant=messages.Variant.UINT32)
selfLink = messages.StringField(13)
tags = messages.StringField(14, repeated=True)
warnings = messages.MessageField('WarningsValueListEntry', 15, repeated=True)
class RouteList(messages.Message):
"""Contains a list of route resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The route resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('Route', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#routeList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class Scheduling(messages.Message):
"""Scheduling options for an Instance.
Enums:
OnHostMaintenanceValueValuesEnum: How the instance should behave when the
host machine undergoes maintenance that may temporarily impact instance
performance.
Fields:
automaticRestart: Whether the Instance should be automatically restarted
whenever it is terminated by Compute Engine (not terminated by user).
onHostMaintenance: How the instance should behave when the host machine
undergoes maintenance that may temporarily impact instance performance.
"""
class OnHostMaintenanceValueValuesEnum(messages.Enum):
"""How the instance should behave when the host machine undergoes
maintenance that may temporarily impact instance performance.
Values:
MIGRATE: <no description>
TERMINATE: <no description>
"""
MIGRATE = 0
TERMINATE = 1
automaticRestart = messages.BooleanField(1)
onHostMaintenance = messages.EnumField('OnHostMaintenanceValueValuesEnum', 2)
class SerialPortOutput(messages.Message):
"""An instance serial console output.
Fields:
contents: The contents of the console output.
kind: Type of the resource.
selfLink: Server defined URL for the resource (output only).
"""
contents = messages.StringField(1)
kind = messages.StringField(2, default=u'compute#serialPortOutput')
selfLink = messages.StringField(3)
class ServiceAccount(messages.Message):
"""A service account.
Fields:
email: Email address of the service account.
scopes: The list of scopes to be made available for this service account.
"""
email = messages.StringField(1)
scopes = messages.StringField(2, repeated=True)
class Snapshot(messages.Message):
"""A persistent disk snapshot resource.
Enums:
StatusValueValuesEnum: The status of the persistent disk snapshot (output
only).
StorageBytesStatusValueValuesEnum: An indicator whether storageBytes is in
a stable state, or it is being adjusted as a result of shared storage
reallocation.
Fields:
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the resource; provided by
the client when the resource is created.
diskSizeGb: Size of the persistent disk snapshot, specified in GB (output
only).
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
licenses: Public visible licenses.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
selfLink: Server defined URL for the resource (output only).
sourceDisk: The source disk used to create this snapshot.
sourceDiskId: The 'id' value of the disk used to create this snapshot.
This value may be used to determine whether the snapshot was taken from
the current or a previous instance of a given disk name.
status: The status of the persistent disk snapshot (output only).
storageBytes: A size of the the storage used by the snapshot. As snapshots
share storage this number is expected to change with snapshot
creation/deletion.
storageBytesStatus: An indicator whether storageBytes is in a stable
state, or it is being adjusted as a result of shared storage
reallocation.
"""
class StatusValueValuesEnum(messages.Enum):
"""The status of the persistent disk snapshot (output only).
Values:
CREATING: <no description>
DELETING: <no description>
FAILED: <no description>
READY: <no description>
UPLOADING: <no description>
"""
CREATING = 0
DELETING = 1
FAILED = 2
READY = 3
UPLOADING = 4
class StorageBytesStatusValueValuesEnum(messages.Enum):
"""An indicator whether storageBytes is in a stable state, or it is being
adjusted as a result of shared storage reallocation.
Values:
UPDATING: <no description>
UP_TO_DATE: <no description>
"""
UPDATING = 0
UP_TO_DATE = 1
creationTimestamp = messages.StringField(1)
description = messages.StringField(2)
diskSizeGb = messages.IntegerField(3)
id = messages.IntegerField(4, variant=messages.Variant.UINT64)
kind = messages.StringField(5, default=u'compute#snapshot')
licenses = messages.StringField(6, repeated=True)
name = messages.StringField(7)
selfLink = messages.StringField(8)
sourceDisk = messages.StringField(9)
sourceDiskId = messages.StringField(10)
status = messages.EnumField('StatusValueValuesEnum', 11)
storageBytes = messages.IntegerField(12)
storageBytesStatus = messages.EnumField('StorageBytesStatusValueValuesEnum', 13)
class SnapshotList(messages.Message):
"""Contains a list of persistent disk snapshot resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The persistent snapshot resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('Snapshot', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#snapshotList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class StandardQueryParameters(messages.Message):
"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" or "email:<ldap>" to
include in api requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(messages.Enum):
"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = messages.StringField(2)
key = messages.StringField(3)
oauth_token = messages.StringField(4)
prettyPrint = messages.BooleanField(5, default=True)
quotaUser = messages.StringField(6)
trace = messages.StringField(7)
userIp = messages.StringField(8)
class Tags(messages.Message):
"""A set of instance tags.
Fields:
fingerprint: Fingerprint of this resource. A hash of the tags stored in
this object. This field is used optimistic locking. An up-to-date tags
fingerprint must be provided in order to modify tags.
items: An array of tags. Each tag must be 1-63 characters long, and comply
with RFC1035.
"""
fingerprint = messages.BytesField(1)
items = messages.StringField(2, repeated=True)
class TargetHttpProxy(messages.Message):
"""A TargetHttpProxy resource. This resource defines an HTTP proxy.
Fields:
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the resource; provided by
the client when the resource is created.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
selfLink: Server defined URL for the resource (output only).
urlMap: URL to the UrlMap resource that defines the mapping from URL to
the BackendService.
"""
creationTimestamp = messages.StringField(1)
description = messages.StringField(2)
id = messages.IntegerField(3, variant=messages.Variant.UINT64)
kind = messages.StringField(4, default=u'compute#targetHttpProxy')
name = messages.StringField(5)
selfLink = messages.StringField(6)
urlMap = messages.StringField(7)
class TargetHttpProxyList(messages.Message):
"""Contains a list of TargetHttpProxy resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The TargetHttpProxy resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('TargetHttpProxy', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#targetHttpProxyList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class TargetInstance(messages.Message):
"""A TargetInstance resource. This resource defines an endpoint VM that
terminates traffic of certain protocols.
Enums:
NatPolicyValueValuesEnum: NAT option controlling how IPs are NAT'ed to the
VM. Currently only NO_NAT (default value) is supported.
Fields:
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the resource; provided by
the client when the resource is created.
id: Unique identifier for the resource; defined by the server (output
only).
instance: The URL to the instance that terminates the relevant traffic.
kind: Type of the resource.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
natPolicy: NAT option controlling how IPs are NAT'ed to the VM. Currently
only NO_NAT (default value) is supported.
selfLink: Server defined URL for the resource (output only).
zone: URL of the zone where the target instance resides (output only).
"""
class NatPolicyValueValuesEnum(messages.Enum):
"""NAT option controlling how IPs are NAT'ed to the VM. Currently only
NO_NAT (default value) is supported.
Values:
NO_NAT: <no description>
"""
NO_NAT = 0
creationTimestamp = messages.StringField(1)
description = messages.StringField(2)
id = messages.IntegerField(3, variant=messages.Variant.UINT64)
instance = messages.StringField(4)
kind = messages.StringField(5, default=u'compute#targetInstance')
name = messages.StringField(6)
natPolicy = messages.EnumField('NatPolicyValueValuesEnum', 7)
selfLink = messages.StringField(8)
zone = messages.StringField(9)
class TargetInstanceAggregatedList(messages.Message):
"""A TargetInstanceAggregatedList object.
Messages:
ItemsValue: A map of scoped target instance lists.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: A map of scoped target instance lists.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ItemsValue(messages.Message):
"""A map of scoped target instance lists.
Messages:
AdditionalProperty: An additional property for a ItemsValue object.
Fields:
additionalProperties: Name of the scope containing this set of target
instances.
"""
class AdditionalProperty(messages.Message):
"""An additional property for a ItemsValue object.
Fields:
key: Name of the additional property.
value: A TargetInstancesScopedList attribute.
"""
key = messages.StringField(1)
value = messages.MessageField('TargetInstancesScopedList', 2)
additionalProperties = messages.MessageField('AdditionalProperty', 1, repeated=True)
id = messages.StringField(1)
items = messages.MessageField('ItemsValue', 2)
kind = messages.StringField(3, default=u'compute#targetInstanceAggregatedList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class TargetInstanceList(messages.Message):
"""Contains a list of TargetInstance resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The TargetInstance resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('TargetInstance', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#targetInstanceList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class TargetInstancesScopedList(messages.Message):
"""A TargetInstancesScopedList object.
Messages:
WarningValue: Informational warning which replaces the list of addresses
when the list is empty.
Fields:
targetInstances: List of target instances contained in this scope.
warning: Informational warning which replaces the list of addresses when
the list is empty.
"""
class WarningValue(messages.Message):
"""Informational warning which replaces the list of addresses when the
list is empty.
Enums:
CodeValueValuesEnum: The warning type identifier for this warning.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: The warning type identifier for this warning.
data: Metadata for this warning in 'key: value' format.
message: Optional human-readable details for this warning.
"""
class CodeValueValuesEnum(messages.Enum):
"""The warning type identifier for this warning.
Values:
DEPRECATED_RESOURCE_USED: <no description>
DISK_SIZE_LARGER_THAN_IMAGE_SIZE: <no description>
INJECTED_KERNELS_DEPRECATED: <no description>
NEXT_HOP_ADDRESS_NOT_ASSIGNED: <no description>
NEXT_HOP_CANNOT_IP_FORWARD: <no description>
NEXT_HOP_INSTANCE_NOT_FOUND: <no description>
NEXT_HOP_INSTANCE_NOT_ON_NETWORK: <no description>
NEXT_HOP_NOT_RUNNING: <no description>
NO_RESULTS_ON_PAGE: <no description>
REQUIRED_TOS_AGREEMENT: <no description>
RESOURCE_NOT_DELETED: <no description>
UNREACHABLE: <no description>
"""
DEPRECATED_RESOURCE_USED = 0
DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 1
INJECTED_KERNELS_DEPRECATED = 2
NEXT_HOP_ADDRESS_NOT_ASSIGNED = 3
NEXT_HOP_CANNOT_IP_FORWARD = 4
NEXT_HOP_INSTANCE_NOT_FOUND = 5
NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 6
NEXT_HOP_NOT_RUNNING = 7
NO_RESULTS_ON_PAGE = 8
REQUIRED_TOS_AGREEMENT = 9
RESOURCE_NOT_DELETED = 10
UNREACHABLE = 11
class DataValueListEntry(messages.Message):
"""A DataValueListEntry object.
Fields:
key: A key for the warning data.
value: A warning data value corresponding to the key.
"""
key = messages.StringField(1)
value = messages.StringField(2)
code = messages.EnumField('CodeValueValuesEnum', 1)
data = messages.MessageField('DataValueListEntry', 2, repeated=True)
message = messages.StringField(3)
targetInstances = messages.MessageField('TargetInstance', 1, repeated=True)
warning = messages.MessageField('WarningValue', 2)
class TargetPool(messages.Message):
"""A TargetPool resource. This resource defines a pool of VMs, associated
HttpHealthCheck resources, and the fallback TargetPool.
Enums:
SessionAffinityValueValuesEnum: Sesssion affinity option, must be one of
the following values: 'NONE': Connections from the same client IP may go
to any VM in the pool; 'CLIENT_IP': Connections from the same client IP
will go to the same VM in the pool while that VM remains healthy.
'CLIENT_IP_PROTO': Connections from the same client IP with the same IP
protocol will go to the same VM in the pool while that VM remains
healthy.
Fields:
backupPool: This field is applicable only when the containing target pool
is serving a forwarding rule as the primary pool, and its
'failoverRatio' field is properly set to a value between [0, 1].
'backupPool' and 'failoverRatio' together define the fallback behavior
of the primary target pool: if the ratio of the healthy VMs in the
primary pool is at or below 'failoverRatio', traffic arriving at the
load-balanced IP will be directed to the backup pool. In case where
'failoverRatio' and 'backupPool' are not set, or all the VMs in the
backup pool are unhealthy, the traffic will be directed back to the
primary pool in the "force" mode, where traffic will be spread to the
healthy VMs with the best effort, or to all VMs when no VM is healthy.
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
description: An optional textual description of the resource; provided by
the client when the resource is created.
failoverRatio: This field is applicable only when the containing target
pool is serving a forwarding rule as the primary pool (i.e., not as a
backup pool to some other target pool). The value of the field must be
in [0, 1]. If set, 'backupPool' must also be set. They together define
the fallback behavior of the primary target pool: if the ratio of the
healthy VMs in the primary pool is at or below this number, traffic
arriving at the load-balanced IP will be directed to the backup pool.
In case where 'failoverRatio' is not set or all the VMs in the backup
pool are unhealthy, the traffic will be directed back to the primary
pool in the "force" mode, where traffic will be spread to the healthy
VMs with the best effort, or to all VMs when no VM is healthy.
healthChecks: A list of URLs to the HttpHealthCheck resource. A member VM
in this pool is considered healthy if and only if all specified health
checks pass. An empty list means all member VMs will be considered
healthy at all times.
id: Unique identifier for the resource; defined by the server (output
only).
instances: A list of resource URLs to the member VMs serving this pool.
They must live in zones contained in the same region as this pool.
kind: Type of the resource.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
region: URL of the region where the target pool resides (output only).
selfLink: Server defined URL for the resource (output only).
sessionAffinity: Sesssion affinity option, must be one of the following
values: 'NONE': Connections from the same client IP may go to any VM in
the pool; 'CLIENT_IP': Connections from the same client IP will go to
the same VM in the pool while that VM remains healthy.
'CLIENT_IP_PROTO': Connections from the same client IP with the same IP
protocol will go to the same VM in the pool while that VM remains
healthy.
"""
class SessionAffinityValueValuesEnum(messages.Enum):
"""Sesssion affinity option, must be one of the following values: 'NONE':
Connections from the same client IP may go to any VM in the pool;
'CLIENT_IP': Connections from the same client IP will go to the same VM in
the pool while that VM remains healthy. 'CLIENT_IP_PROTO': Connections
from the same client IP with the same IP protocol will go to the same VM
in the pool while that VM remains healthy.
Values:
CLIENT_IP: <no description>
CLIENT_IP_PROTO: <no description>
NONE: <no description>
"""
CLIENT_IP = 0
CLIENT_IP_PROTO = 1
NONE = 2
backupPool = messages.StringField(1)
creationTimestamp = messages.StringField(2)
description = messages.StringField(3)
failoverRatio = messages.FloatField(4, variant=messages.Variant.FLOAT)
healthChecks = messages.StringField(5, repeated=True)
id = messages.IntegerField(6, variant=messages.Variant.UINT64)
instances = messages.StringField(7, repeated=True)
kind = messages.StringField(8, default=u'compute#targetPool')
name = messages.StringField(9)
region = messages.StringField(10)
selfLink = messages.StringField(11)
sessionAffinity = messages.EnumField('SessionAffinityValueValuesEnum', 12)
class TargetPoolAggregatedList(messages.Message):
"""A TargetPoolAggregatedList object.
Messages:
ItemsValue: A map of scoped target pool lists.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: A map of scoped target pool lists.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ItemsValue(messages.Message):
"""A map of scoped target pool lists.
Messages:
AdditionalProperty: An additional property for a ItemsValue object.
Fields:
additionalProperties: Name of the scope containing this set of target
pools.
"""
class AdditionalProperty(messages.Message):
"""An additional property for a ItemsValue object.
Fields:
key: Name of the additional property.
value: A TargetPoolsScopedList attribute.
"""
key = messages.StringField(1)
value = messages.MessageField('TargetPoolsScopedList', 2)
additionalProperties = messages.MessageField('AdditionalProperty', 1, repeated=True)
id = messages.StringField(1)
items = messages.MessageField('ItemsValue', 2)
kind = messages.StringField(3, default=u'compute#targetPoolAggregatedList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class TargetPoolInstanceHealth(messages.Message):
"""A TargetPoolInstanceHealth object.
Fields:
healthStatus: A HealthStatus attribute.
kind: Type of resource.
"""
healthStatus = messages.MessageField('HealthStatus', 1, repeated=True)
kind = messages.StringField(2, default=u'compute#targetPoolInstanceHealth')
class TargetPoolList(messages.Message):
"""Contains a list of TargetPool resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The TargetPool resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('TargetPool', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#targetPoolList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class TargetPoolsAddHealthCheckRequest(messages.Message):
"""A TargetPoolsAddHealthCheckRequest object.
Fields:
healthChecks: Health check URLs to be added to targetPool.
"""
healthChecks = messages.MessageField('HealthCheckReference', 1, repeated=True)
class TargetPoolsAddInstanceRequest(messages.Message):
"""A TargetPoolsAddInstanceRequest object.
Fields:
instances: URLs of the instances to be added to targetPool.
"""
instances = messages.MessageField('InstanceReference', 1, repeated=True)
class TargetPoolsRemoveHealthCheckRequest(messages.Message):
"""A TargetPoolsRemoveHealthCheckRequest object.
Fields:
healthChecks: Health check URLs to be removed from targetPool.
"""
healthChecks = messages.MessageField('HealthCheckReference', 1, repeated=True)
class TargetPoolsRemoveInstanceRequest(messages.Message):
"""A TargetPoolsRemoveInstanceRequest object.
Fields:
instances: URLs of the instances to be removed from targetPool.
"""
instances = messages.MessageField('InstanceReference', 1, repeated=True)
class TargetPoolsScopedList(messages.Message):
"""A TargetPoolsScopedList object.
Messages:
WarningValue: Informational warning which replaces the list of addresses
when the list is empty.
Fields:
targetPools: List of target pools contained in this scope.
warning: Informational warning which replaces the list of addresses when
the list is empty.
"""
class WarningValue(messages.Message):
"""Informational warning which replaces the list of addresses when the
list is empty.
Enums:
CodeValueValuesEnum: The warning type identifier for this warning.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: The warning type identifier for this warning.
data: Metadata for this warning in 'key: value' format.
message: Optional human-readable details for this warning.
"""
class CodeValueValuesEnum(messages.Enum):
"""The warning type identifier for this warning.
Values:
DEPRECATED_RESOURCE_USED: <no description>
DISK_SIZE_LARGER_THAN_IMAGE_SIZE: <no description>
INJECTED_KERNELS_DEPRECATED: <no description>
NEXT_HOP_ADDRESS_NOT_ASSIGNED: <no description>
NEXT_HOP_CANNOT_IP_FORWARD: <no description>
NEXT_HOP_INSTANCE_NOT_FOUND: <no description>
NEXT_HOP_INSTANCE_NOT_ON_NETWORK: <no description>
NEXT_HOP_NOT_RUNNING: <no description>
NO_RESULTS_ON_PAGE: <no description>
REQUIRED_TOS_AGREEMENT: <no description>
RESOURCE_NOT_DELETED: <no description>
UNREACHABLE: <no description>
"""
DEPRECATED_RESOURCE_USED = 0
DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 1
INJECTED_KERNELS_DEPRECATED = 2
NEXT_HOP_ADDRESS_NOT_ASSIGNED = 3
NEXT_HOP_CANNOT_IP_FORWARD = 4
NEXT_HOP_INSTANCE_NOT_FOUND = 5
NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 6
NEXT_HOP_NOT_RUNNING = 7
NO_RESULTS_ON_PAGE = 8
REQUIRED_TOS_AGREEMENT = 9
RESOURCE_NOT_DELETED = 10
UNREACHABLE = 11
class DataValueListEntry(messages.Message):
"""A DataValueListEntry object.
Fields:
key: A key for the warning data.
value: A warning data value corresponding to the key.
"""
key = messages.StringField(1)
value = messages.StringField(2)
code = messages.EnumField('CodeValueValuesEnum', 1)
data = messages.MessageField('DataValueListEntry', 2, repeated=True)
message = messages.StringField(3)
targetPools = messages.MessageField('TargetPool', 1, repeated=True)
warning = messages.MessageField('WarningValue', 2)
class TargetReference(messages.Message):
"""A TargetReference object.
Fields:
target: A string attribute.
"""
target = messages.StringField(1)
class TestFailure(messages.Message):
"""A TestFailure object.
Fields:
actualService: A string attribute.
expectedService: A string attribute.
host: A string attribute.
path: A string attribute.
"""
actualService = messages.StringField(1)
expectedService = messages.StringField(2)
host = messages.StringField(3)
path = messages.StringField(4)
class UrlMap(messages.Message):
"""A UrlMap resource. This resource defines the mapping from URL to the
BackendService resource, based on the "longest-match" of the URL's host and
path.
Fields:
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
defaultService: The URL of the BackendService resource if none of the
hostRules match.
description: An optional textual description of the resource; provided by
the client when the resource is created.
fingerprint: Fingerprint of this resource. A hash of the contents stored
in this object. This field is used in optimistic locking. This field
will be ignored when inserting a UrlMap. An up-to-date fingerprint must
be provided in order to update the UrlMap.
hostRules: The list of HostRules to use against the URL.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
name: Name of the resource; provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with RFC1035.
pathMatchers: The list of named PathMatchers to use against the URL.
selfLink: Server defined URL for the resource (output only).
tests: The list of expected URL mappings. Request to update this UrlMap
will succeed only all of the test cases pass.
"""
creationTimestamp = messages.StringField(1)
defaultService = messages.StringField(2)
description = messages.StringField(3)
fingerprint = messages.BytesField(4)
hostRules = messages.MessageField('HostRule', 5, repeated=True)
id = messages.IntegerField(6, variant=messages.Variant.UINT64)
kind = messages.StringField(7, default=u'compute#urlMap')
name = messages.StringField(8)
pathMatchers = messages.MessageField('PathMatcher', 9, repeated=True)
selfLink = messages.StringField(10)
tests = messages.MessageField('UrlMapTest', 11, repeated=True)
class UrlMapList(messages.Message):
"""Contains a list of UrlMap resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The UrlMap resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('UrlMap', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#urlMapList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
class UrlMapReference(messages.Message):
"""A UrlMapReference object.
Fields:
urlMap: A string attribute.
"""
urlMap = messages.StringField(1)
class UrlMapTest(messages.Message):
"""Message for the expected URL mappings.
Fields:
description: Description of this test case.
host: Host portion of the URL.
path: Path portion of the URL.
service: Expected BackendService resource the given URL should be mapped
to.
"""
description = messages.StringField(1)
host = messages.StringField(2)
path = messages.StringField(3)
service = messages.StringField(4)
class UrlMapValidationResult(messages.Message):
"""Message representing the validation result for a UrlMap.
Fields:
loadErrors: A string attribute.
loadSucceeded: Whether the given UrlMap can be successfully loaded. If
false, 'loadErrors' indicates the reasons.
testFailures: A TestFailure attribute.
testPassed: If successfully loaded, this field indicates whether the test
passed. If false, 'testFailures's indicate the reason of failure.
"""
loadErrors = messages.StringField(1, repeated=True)
loadSucceeded = messages.BooleanField(2)
testFailures = messages.MessageField('TestFailure', 3, repeated=True)
testPassed = messages.BooleanField(4)
class UrlMapsValidateRequest(messages.Message):
"""A UrlMapsValidateRequest object.
Fields:
resource: Content of the UrlMap to be validated.
"""
resource = messages.MessageField('UrlMap', 1)
class UrlMapsValidateResponse(messages.Message):
"""A UrlMapsValidateResponse object.
Fields:
result: A UrlMapValidationResult attribute.
"""
result = messages.MessageField('UrlMapValidationResult', 1)
class UsageExportLocation(messages.Message):
"""The location in Cloud Storage and naming method of the daily usage
report. Contains bucket_name and report_name prefix.
Fields:
bucketName: The name of an existing bucket in Cloud Storage where the
usage report object is stored. The Google Service Account is granted
write access to this bucket. This is simply the bucket name, with no
"gs://" or "https://storage.googleapis.com/" in front of it.
reportNamePrefix: An optional prefix for the name of the usage report
object stored in bucket_name. If not supplied, defaults to "usage_". The
report is stored as a CSV file named _gce_.csv. where is the day of the
usage according to Pacific Time. The prefix should conform to Cloud
Storage object naming conventions.
"""
bucketName = messages.StringField(1)
reportNamePrefix = messages.StringField(2)
class Zone(messages.Message):
"""A zone resource.
Enums:
StatusValueValuesEnum: Status of the zone. "UP" or "DOWN".
Messages:
MaintenanceWindowsValueListEntry: A MaintenanceWindowsValueListEntry
object.
Fields:
creationTimestamp: Creation timestamp in RFC3339 text format (output
only).
deprecated: The deprecation status associated with this zone.
description: Textual description of the resource.
id: Unique identifier for the resource; defined by the server (output
only).
kind: Type of the resource.
maintenanceWindows: Scheduled maintenance windows for the zone. When the
zone is in a maintenance window, all resources which reside in the zone
will be unavailable.
name: Name of the resource.
region: Full URL reference to the region which hosts the zone (output
only).
selfLink: Server defined URL for the resource (output only).
status: Status of the zone. "UP" or "DOWN".
"""
class StatusValueValuesEnum(messages.Enum):
"""Status of the zone. "UP" or "DOWN".
Values:
DOWN: <no description>
UP: <no description>
"""
DOWN = 0
UP = 1
class MaintenanceWindowsValueListEntry(messages.Message):
"""A MaintenanceWindowsValueListEntry object.
Fields:
beginTime: Begin time of the maintenance window, in RFC 3339 format.
description: Textual description of the maintenance window.
endTime: End time of the maintenance window, in RFC 3339 format.
name: Name of the maintenance window.
"""
beginTime = messages.StringField(1)
description = messages.StringField(2)
endTime = messages.StringField(3)
name = messages.StringField(4)
creationTimestamp = messages.StringField(1)
deprecated = messages.MessageField('DeprecationStatus', 2)
description = messages.StringField(3)
id = messages.IntegerField(4, variant=messages.Variant.UINT64)
kind = messages.StringField(5, default=u'compute#zone')
maintenanceWindows = messages.MessageField('MaintenanceWindowsValueListEntry', 6, repeated=True)
name = messages.StringField(7)
region = messages.StringField(8)
selfLink = messages.StringField(9)
status = messages.EnumField('StatusValueValuesEnum', 10)
class ZoneList(messages.Message):
"""Contains a list of zone resources.
Fields:
id: Unique identifier for the resource; defined by the server (output
only).
items: The zone resources.
kind: Type of resource.
nextPageToken: A token used to continue a truncated list request (output
only).
selfLink: Server defined URL for this resource (output only).
"""
id = messages.StringField(1)
items = messages.MessageField('Zone', 2, repeated=True)
kind = messages.StringField(3, default=u'compute#zoneList')
nextPageToken = messages.StringField(4)
selfLink = messages.StringField(5)
| gpl-3.0 |
jeanlinux/calibre | src/calibre/gui2/search_box.py | 10 | 20902 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re, time
from functools import partial
from PyQt5.Qt import QComboBox, Qt, QLineEdit, pyqtSlot, QDialog, \
pyqtSignal, QCompleter, QAction, QKeySequence, QTimer, \
QIcon, QMenu
from calibre.gui2 import config, error_dialog, question_dialog, gprefs
from calibre.gui2.dialogs.confirm_delete import confirm
from calibre.gui2.dialogs.saved_search_editor import SavedSearchEditor
from calibre.gui2.dialogs.search import SearchDialog
class AsYouType(unicode):
def __new__(cls, text):
self = unicode.__new__(cls, text)
self.as_you_type = True
return self
class SearchLineEdit(QLineEdit): # {{{
key_pressed = pyqtSignal(object)
select_on_mouse_press = None
def keyPressEvent(self, event):
self.key_pressed.emit(event)
QLineEdit.keyPressEvent(self, event)
def dropEvent(self, ev):
self.parent().normalize_state()
return QLineEdit.dropEvent(self, ev)
def contextMenuEvent(self, ev):
self.parent().normalize_state()
return QLineEdit.contextMenuEvent(self, ev)
@pyqtSlot()
def paste(self, *args):
self.parent().normalize_state()
return QLineEdit.paste(self)
def focusInEvent(self, ev):
self.select_on_mouse_press = time.time()
return QLineEdit.focusInEvent(self, ev)
def mousePressEvent(self, ev):
QLineEdit.mousePressEvent(self, ev)
if self.select_on_mouse_press is not None and abs(time.time() - self.select_on_mouse_press) < 0.2:
self.selectAll()
self.select_on_mouse_press = None
# }}}
class SearchBox2(QComboBox): # {{{
'''
To use this class:
* Call initialize()
* Connect to the search() and cleared() signals from this widget.
* Connect to the changed() signal to know when the box content changes
* Connect to focus_to_library() signal to be told to manually change focus
* Call search_done() after every search is complete
* Call set_search_string() to perform a search programmatically
* You can use the current_text property to get the current search text
Be aware that if you are using it in a slot connected to the
changed() signal, if the connection is not queued it will not be
accurate.
'''
INTERVAL = 1500 #: Time to wait before emitting search signal
MAX_COUNT = 25
search = pyqtSignal(object)
cleared = pyqtSignal()
changed = pyqtSignal()
focus_to_library = pyqtSignal()
def __init__(self, parent=None):
QComboBox.__init__(self, parent)
self.normal_background = 'rgb(255, 255, 255, 0%)'
self.line_edit = SearchLineEdit(self)
self.setLineEdit(self.line_edit)
c = self.line_edit.completer()
c.setCompletionMode(c.PopupCompletion)
c.highlighted[str].connect(self.completer_used)
self.line_edit.key_pressed.connect(self.key_pressed, type=Qt.DirectConnection)
# QueuedConnection as workaround for https://bugreports.qt-project.org/browse/QTBUG-40807
self.activated[str].connect(self.history_selected, type=Qt.QueuedConnection)
self.setEditable(True)
self.as_you_type = True
self.timer = QTimer()
self.timer.setSingleShot(True)
self.timer.timeout.connect(self.timer_event, type=Qt.QueuedConnection)
self.setInsertPolicy(self.NoInsert)
self.setMaxCount(self.MAX_COUNT)
self.setSizeAdjustPolicy(self.AdjustToMinimumContentsLengthWithIcon)
self.setMinimumContentsLength(25)
self._in_a_search = False
self.tool_tip_text = self.toolTip()
def initialize(self, opt_name, colorize=False, help_text=_('Search')):
self.as_you_type = config['search_as_you_type']
self.opt_name = opt_name
items = []
for item in config[opt_name]:
if item not in items:
items.append(item)
self.addItems(items)
self.line_edit.setPlaceholderText(help_text)
self.colorize = colorize
self.clear()
def hide_completer_popup(self):
try:
self.lineEdit().completer().popup().setVisible(False)
except:
pass
def normalize_state(self):
self.setToolTip(self.tool_tip_text)
self.line_edit.setStyleSheet(
'QLineEdit{color:none;background-color:%s;}' % self.normal_background)
def text(self):
return self.currentText()
def clear_history(self, *args):
QComboBox.clear(self)
def clear(self, emit_search=True):
self.normalize_state()
self.setEditText('')
if emit_search:
self.search.emit('')
self._in_a_search = False
self.cleared.emit()
def clear_clicked(self, *args):
self.clear()
def search_done(self, ok):
if isinstance(ok, basestring):
self.setToolTip(ok)
ok = False
if not unicode(self.currentText()).strip():
self.clear(emit_search=False)
return
self._in_a_search = ok
col = 'rgba(0,255,0,20%)' if ok else 'rgb(255,0,0,20%)'
if not self.colorize:
col = self.normal_background
self.line_edit.setStyleSheet('QLineEdit{color:black;background-color:%s;}' % col)
# Comes from the lineEdit control
def key_pressed(self, event):
k = event.key()
if k in (Qt.Key_Left, Qt.Key_Right, Qt.Key_Up, Qt.Key_Down,
Qt.Key_Home, Qt.Key_End, Qt.Key_PageUp, Qt.Key_PageDown,
Qt.Key_unknown):
return
self.normalize_state()
if self._in_a_search:
self.changed.emit()
self._in_a_search = False
if event.key() in (Qt.Key_Return, Qt.Key_Enter):
self.do_search()
self.focus_to_library.emit()
elif self.as_you_type and unicode(event.text()):
self.timer.start(1500)
# Comes from the combobox itself
def keyPressEvent(self, event):
k = event.key()
if k in (Qt.Key_Enter, Qt.Key_Return):
return self.do_search()
if k not in (Qt.Key_Up, Qt.Key_Down):
QComboBox.keyPressEvent(self, event)
else:
self.blockSignals(True)
self.normalize_state()
QComboBox.keyPressEvent(self, event)
self.blockSignals(False)
def completer_used(self, text):
self.timer.stop()
self.normalize_state()
def timer_event(self):
self._do_search(as_you_type=True)
def history_selected(self, text):
self.changed.emit()
self.do_search()
def _do_search(self, store_in_history=True, as_you_type=False):
self.hide_completer_popup()
text = unicode(self.currentText()).strip()
if not text:
return self.clear()
if as_you_type:
text = AsYouType(text)
self.search.emit(text)
if store_in_history:
idx = self.findText(text, Qt.MatchFixedString|Qt.MatchCaseSensitive)
self.block_signals(True)
if idx < 0:
self.insertItem(0, text)
else:
t = self.itemText(idx)
self.removeItem(idx)
self.insertItem(0, t)
self.setCurrentIndex(0)
self.block_signals(False)
history = [unicode(self.itemText(i)) for i in
range(self.count())]
config[self.opt_name] = history
def do_search(self, *args):
self._do_search()
def block_signals(self, yes):
self.blockSignals(yes)
self.line_edit.blockSignals(yes)
def set_search_string(self, txt, store_in_history=False, emit_changed=True):
if not store_in_history:
self.activated[str].disconnect()
try:
self.setFocus(Qt.OtherFocusReason)
if not txt:
self.clear()
else:
self.normalize_state()
# must turn on case sensitivity here so that tag browser strings
# are not case-insensitively replaced from history
self.line_edit.completer().setCaseSensitivity(Qt.CaseSensitive)
self.setEditText(txt)
self.line_edit.end(False)
if emit_changed:
self.changed.emit()
self._do_search(store_in_history=store_in_history)
self.line_edit.completer().setCaseSensitivity(Qt.CaseInsensitive)
self.focus_to_library.emit()
finally:
if not store_in_history:
# QueuedConnection as workaround for https://bugreports.qt-project.org/browse/QTBUG-40807
self.activated[str].connect(self.history_selected, type=Qt.QueuedConnection)
def search_as_you_type(self, enabled):
self.as_you_type = enabled
def in_a_search(self):
return self._in_a_search
@property
def current_text(self):
return unicode(self.lineEdit().text())
# }}}
class SavedSearchBox(QComboBox): # {{{
'''
To use this class:
* Call initialize()
* Connect to the changed() signal from this widget
if you care about changes to the list of saved searches.
'''
changed = pyqtSignal()
def __init__(self, parent=None):
QComboBox.__init__(self, parent)
self.normal_background = 'rgb(255, 255, 255, 0%)'
self.line_edit = SearchLineEdit(self)
self.setLineEdit(self.line_edit)
self.line_edit.key_pressed.connect(self.key_pressed, type=Qt.DirectConnection)
self.activated[str].connect(self.saved_search_selected)
# Turn off auto-completion so that it doesn't interfere with typing
# names of new searches.
completer = QCompleter(self)
self.setCompleter(completer)
self.setEditable(True)
self.setInsertPolicy(self.NoInsert)
self.setSizeAdjustPolicy(self.AdjustToMinimumContentsLengthWithIcon)
self.setMinimumContentsLength(10)
self.tool_tip_text = self.toolTip()
def initialize(self, _search_box, colorize=False, help_text=_('Search')):
self.search_box = _search_box
try:
self.line_edit.setPlaceholderText(help_text)
except:
# Using Qt < 4.7
pass
self.colorize = colorize
self.clear()
def normalize_state(self):
# need this because line_edit will call it in some cases such as paste
pass
def clear(self):
QComboBox.clear(self)
self.initialize_saved_search_names()
self.setEditText('')
self.line_edit.home(False)
def key_pressed(self, event):
if event.key() in (Qt.Key_Return, Qt.Key_Enter):
self.saved_search_selected(self.currentText())
def saved_search_selected(self, qname):
from calibre.gui2.ui import get_gui
db = get_gui().current_db
qname = unicode(qname)
if qname is None or not qname.strip():
self.search_box.clear()
return
if not db.saved_search_lookup(qname):
self.search_box.clear()
self.setEditText(qname)
return
self.search_box.set_search_string(u'search:"%s"' % qname, emit_changed=False)
self.setEditText(qname)
self.setToolTip(db.saved_search_lookup(qname))
def initialize_saved_search_names(self):
from calibre.gui2.ui import get_gui
gui = get_gui()
try:
names = gui.current_db.saved_search_names()
except AttributeError:
# Happens during gui initialization
names = []
self.addItems(names)
self.setCurrentIndex(-1)
# SIGNALed from the main UI
def save_search_button_clicked(self):
from calibre.gui2.ui import get_gui
db = get_gui().current_db
name = unicode(self.currentText())
if not name.strip():
name = unicode(self.search_box.text()).replace('"', '')
name = name.replace('\\', '')
if not name:
error_dialog(self, _('Create saved search'),
_('Invalid saved search name. '
'It must contain at least one letter or number'), show=True)
return
if not self.search_box.text():
error_dialog(self, _('Create saved search'),
_('There is no search to save'), show=True)
return
db.saved_search_delete(name)
db.saved_search_add(name, unicode(self.search_box.text()))
# now go through an initialization cycle to ensure that the combobox has
# the new search in it, that it is selected, and that the search box
# references the new search instead of the text in the search.
self.clear()
self.setCurrentIndex(self.findText(name))
self.saved_search_selected(name)
self.changed.emit()
def delete_current_search(self):
from calibre.gui2.ui import get_gui
db = get_gui().current_db
idx = self.currentIndex()
if idx <= 0:
error_dialog(self, _('Delete current search'),
_('No search is selected'), show=True)
return
if not confirm('<p>'+_('The selected search will be '
'<b>permanently deleted</b>. Are you sure?') +
'</p>', 'saved_search_delete', self):
return
ss = db.saved_search_lookup(unicode(self.currentText()))
if ss is None:
return
db.saved_search_delete(unicode(self.currentText()))
self.clear()
self.search_box.clear()
self.changed.emit()
# SIGNALed from the main UI
def copy_search_button_clicked(self):
from calibre.gui2.ui import get_gui
db = get_gui().current_db
idx = self.currentIndex()
if idx < 0:
return
self.search_box.set_search_string(db.saved_search_lookup(unicode(self.currentText())))
# }}}
class SearchBoxMixin(object): # {{{
def __init__(self, *args, **kwargs):
pass
def init_search_box_mixin(self):
self.search.initialize('main_search_history', colorize=True,
help_text=_('Search (For Advanced Search click the button to the left)'))
self.search.cleared.connect(self.search_box_cleared)
# Queued so that search.current_text will be correct
self.search.changed.connect(self.search_box_changed,
type=Qt.QueuedConnection)
self.search.focus_to_library.connect(self.focus_to_library)
self.clear_button.clicked.connect(self.search.clear_clicked)
self.advanced_search_button.clicked[bool].connect(self.do_advanced_search)
self.search.clear()
self.search.setMaximumWidth(self.width()-150)
self.action_focus_search = QAction(self)
shortcuts = list(
map(lambda x:unicode(x.toString(QKeySequence.PortableText)),
QKeySequence.keyBindings(QKeySequence.Find)))
shortcuts += ['/', 'Alt+S']
self.keyboard.register_shortcut('start search', _('Start search'),
default_keys=shortcuts, action=self.action_focus_search)
self.action_focus_search.triggered.connect(self.focus_search_box)
self.addAction(self.action_focus_search)
self.search.setStatusTip(re.sub(r'<\w+>', ' ',
unicode(self.search.toolTip())))
self.advanced_search_button.setStatusTip(self.advanced_search_button.toolTip())
self.clear_button.setStatusTip(self.clear_button.toolTip())
self.set_highlight_only_button_icon()
self.highlight_only_button.clicked.connect(self.highlight_only_clicked)
tt = _('Enable or disable search highlighting.') + '<br><br>'
tt += config.help('highlight_search_matches')
self.highlight_only_button.setToolTip(tt)
self.highlight_only_action = ac = QAction(self)
self.addAction(ac), ac.triggered.connect(self.highlight_only_clicked)
self.keyboard.register_shortcut('highlight search results', _('Highlight search results'), action=self.highlight_only_action)
def highlight_only_clicked(self, state):
if not config['highlight_search_matches'] and not question_dialog(self, _('Are you sure?'),
_('This will change how searching works. When you search, instead of showing only the '
'matching books, all books will be shown with the matching books highlighted. '
'Are you sure this is what you want?'), skip_dialog_name='confirm_search_highlight_toggle'):
return
config['highlight_search_matches'] = not config['highlight_search_matches']
self.set_highlight_only_button_icon()
self.search.do_search()
self.focus_to_library()
def set_highlight_only_button_icon(self):
if config['highlight_search_matches']:
self.highlight_only_button.setIcon(QIcon(I('highlight_only_on.png')))
else:
self.highlight_only_button.setIcon(QIcon(I('highlight_only_off.png')))
self.highlight_only_button.setVisible(gprefs['show_highlight_toggle_button'])
self.library_view.model().set_highlight_only(config['highlight_search_matches'])
def focus_search_box(self, *args):
self.search.setFocus(Qt.OtherFocusReason)
self.search.lineEdit().selectAll()
def search_box_cleared(self):
self.tags_view.clear()
self.saved_search.clear()
self.set_number_of_books_shown()
def search_box_changed(self):
self.saved_search.clear()
self.tags_view.conditional_clear(self.search.current_text)
def do_advanced_search(self, *args):
d = SearchDialog(self, self.library_view.model().db)
if d.exec_() == QDialog.Accepted:
self.search.set_search_string(d.search_string(), store_in_history=True)
def do_search_button(self):
self.search.do_search()
self.focus_to_library()
def focus_to_library(self):
self.current_view().setFocus(Qt.OtherFocusReason)
# }}}
class SavedSearchBoxMixin(object): # {{{
def __init__(self, *args, **kwargs):
pass
def init_saved_seach_box_mixin(self):
self.saved_search.changed.connect(self.saved_searches_changed)
self.clear_button.clicked.connect(self.saved_search.clear)
self.save_search_button.clicked.connect(
self.saved_search.save_search_button_clicked)
self.copy_search_button.clicked.connect(
self.saved_search.copy_search_button_clicked)
# self.saved_searches_changed()
self.saved_search.initialize(self.search, colorize=True,
help_text=_('Saved Searches'))
self.saved_search.setToolTip(
_('Choose saved search or enter name for new saved search'))
self.saved_search.setStatusTip(self.saved_search.toolTip())
for x in ('copy', 'save'):
b = getattr(self, x+'_search_button')
b.setStatusTip(b.toolTip())
self.save_search_button.setToolTip('<p>' +
_("Save current search under the name shown in the box. "
"Press and hold for a pop-up options menu.") + '</p>')
self.save_search_button.setMenu(QMenu())
self.save_search_button.menu().addAction(
QIcon(I('plus.png')),
_('Create saved search'),
self.saved_search.save_search_button_clicked)
self.save_search_button.menu().addAction(
QIcon(I('trash.png')), _('Delete saved search'), self.saved_search.delete_current_search)
self.save_search_button.menu().addAction(
QIcon(I('search.png')), _('Manage saved searches'), partial(self.do_saved_search_edit, None))
def saved_searches_changed(self, set_restriction=None, recount=True):
self.build_search_restriction_list()
if recount:
self.tags_view.recount()
if set_restriction: # redo the search restriction if there was one
self.apply_named_search_restriction(set_restriction)
def do_saved_search_edit(self, search):
d = SavedSearchEditor(self, search)
d.exec_()
if d.result() == d.Accepted:
self.do_rebuild_saved_searches()
def do_rebuild_saved_searches(self):
self.saved_searches_changed()
self.saved_search.clear()
# }}}
| gpl-3.0 |
gsnbng/erpnext | erpnext/accounts/doctype/account/chart_of_accounts/verified/standard_chart_of_accounts_with_account_number.py | 15 | 10585 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
from frappe import _
def get():
return {
_("Application of Funds (Assets)"): {
_("Current Assets"): {
_("Accounts Receivable"): {
_("Debtors"): {
"account_type": "Receivable",
"account_number": "1310"
},
"account_number": "1300"
},
_("Bank Accounts"): {
"account_type": "Bank",
"is_group": 1,
"account_number": "1200"
},
_("Cash In Hand"): {
_("Cash"): {
"account_type": "Cash",
"account_number": "1110"
},
"account_type": "Cash",
"account_number": "1100"
},
_("Loans and Advances (Assets)"): {
_("Employee Advances"): {
"account_number": "1610"
},
"account_number": "1600"
},
_("Securities and Deposits"): {
_("Earnest Money"): {
"account_number": "1651"
},
"account_number": "1650"
},
_("Stock Assets"): {
_("Stock In Hand"): {
"account_type": "Stock",
"account_number": "1410"
},
"account_type": "Stock",
"account_number": "1400"
},
_("Tax Assets"): {
"is_group": 1,
"account_number": "1500"
},
"account_number": "1100-1600"
},
_("Fixed Assets"): {
_("Capital Equipments"): {
"account_type": "Fixed Asset",
"account_number": "1710"
},
_("Electronic Equipments"): {
"account_type": "Fixed Asset",
"account_number": "1720"
},
_("Furnitures and Fixtures"): {
"account_type": "Fixed Asset",
"account_number": "1730"
},
_("Office Equipments"): {
"account_type": "Fixed Asset",
"account_number": "1740"
},
_("Plants and Machineries"): {
"account_type": "Fixed Asset",
"account_number": "1750"
},
_("Buildings"): {
"account_type": "Fixed Asset",
"account_number": "1760"
},
_("Softwares"): {
"account_type": "Fixed Asset",
"account_number": "1770"
},
_("Accumulated Depreciation"): {
"account_type": "Accumulated Depreciation",
"account_number": "1780"
},
_("CWIP Account"): {
"account_type": "Capital Work in Progress",
"account_number": "1790"
},
"account_number": "1700"
},
_("Investments"): {
"is_group": 1,
"account_number": "1800"
},
_("Temporary Accounts"): {
_("Temporary Opening"): {
"account_type": "Temporary",
"account_number": "1910"
},
"account_number": "1900"
},
"root_type": "Asset",
"account_number": "1000"
},
_("Expenses"): {
_("Direct Expenses"): {
_("Stock Expenses"): {
_("Cost of Goods Sold"): {
"account_type": "Cost of Goods Sold",
"account_number": "5111"
},
_("Expenses Included In Asset Valuation"): {
"account_type": "Expenses Included In Asset Valuation",
"account_number": "5112"
},
_("Expenses Included In Valuation"): {
"account_type": "Expenses Included In Valuation",
"account_number": "5118"
},
_("Stock Adjustment"): {
"account_type": "Stock Adjustment",
"account_number": "5119"
},
"account_number": "5110"
},
"account_number": "5100"
},
_("Indirect Expenses"): {
_("Administrative Expenses"): {
"account_number": "5201"
},
_("Commission on Sales"): {
"account_number": "5202"
},
_("Depreciation"): {
"account_type": "Depreciation",
"account_number": "5203"
},
_("Entertainment Expenses"): {
"account_number": "5204"
},
_("Freight and Forwarding Charges"): {
"account_type": "Chargeable",
"account_number": "5205"
},
_("Legal Expenses"): {
"account_number": "5206"
},
_("Marketing Expenses"): {
"account_type": "Chargeable",
"account_number": "5207"
},
_("Office Maintenance Expenses"): {
"account_number": "5208"
},
_("Office Rent"): {
"account_number": "5209"
},
_("Postal Expenses"): {
"account_number": "5210"
},
_("Print and Stationery"): {
"account_number": "5211"
},
_("Round Off"): {
"account_type": "Round Off",
"account_number": "5212"
},
_("Salary"): {
"account_number": "5213"
},
_("Sales Expenses"): {
"account_number": "5214"
},
_("Telephone Expenses"): {
"account_number": "5215"
},
_("Travel Expenses"): {
"account_number": "5216"
},
_("Utility Expenses"): {
"account_number": "5217"
},
_("Write Off"): {
"account_number": "5218"
},
_("Exchange Gain/Loss"): {
"account_number": "5219"
},
_("Gain/Loss on Asset Disposal"): {
"account_number": "5220"
},
_("Miscellaneous Expenses"): {
"account_type": "Chargeable",
"account_number": "5221"
},
"account_number": "5200"
},
"root_type": "Expense",
"account_number": "5000"
},
_("Income"): {
_("Direct Income"): {
_("Sales"): {
"account_number": "4110"
},
_("Service"): {
"account_number": "4120"
},
"account_number": "4100"
},
_("Indirect Income"): {
"is_group": 1,
"account_number": "4200"
},
"root_type": "Income",
"account_number": "4000"
},
_("Source of Funds (Liabilities)"): {
_("Current Liabilities"): {
_("Accounts Payable"): {
_("Creditors"): {
"account_type": "Payable",
"account_number": "2110"
},
_("Payroll Payable"): {
"account_number": "2120"
},
"account_number": "2100"
},
_("Stock Liabilities"): {
_("Stock Received But Not Billed"): {
"account_type": "Stock Received But Not Billed",
"account_number": "2210"
},
_("Asset Received But Not Billed"): {
"account_type": "Asset Received But Not Billed",
"account_number": "2211"
},
"account_number": "2200"
},
_("Duties and Taxes"): {
"account_type": "Tax",
"is_group": 1,
"account_number": "2300"
},
_("Loans (Liabilities)"): {
_("Secured Loans"): {
"account_number": "2410"
},
_("Unsecured Loans"): {
"account_number": "2420"
},
_("Bank Overdraft Account"): {
"account_number": "2430"
},
"account_number": "2400"
},
"account_number": "2100-2400"
},
"root_type": "Liability",
"account_number": "2000"
},
_("Equity"): {
_("Capital Stock"): {
"account_type": "Equity",
"account_number": "3100"
},
_("Dividends Paid"): {
"account_type": "Equity",
"account_number": "3200"
},
_("Opening Balance Equity"): {
"account_type": "Equity",
"account_number": "3300"
},
_("Retained Earnings"): {
"account_type": "Equity",
"account_number": "3400"
},
"root_type": "Equity",
"account_number": "3000"
}
}
| agpl-3.0 |
texcaltech/windmilltownhomes-old | django/contrib/gis/gdal/prototypes/geom.py | 315 | 4821 | import re
from datetime import date
from ctypes import c_char, c_char_p, c_double, c_int, c_ubyte, c_void_p, POINTER
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal, GEOJSON
from django.contrib.gis.gdal.prototypes.errcheck import check_bool, check_envelope
from django.contrib.gis.gdal.prototypes.generation import \
const_string_output, double_output, geom_output, int_output, \
srs_output, string_output, void_output
### Generation routines specific to this module ###
def env_func(f, argtypes):
"For getting OGREnvelopes."
f.argtypes = argtypes
f.restype = None
f.errcheck = check_envelope
return f
def pnt_func(f):
"For accessing point information."
return double_output(f, [c_void_p, c_int])
def topology_func(f):
f.argtypes = [c_void_p, c_void_p]
f.restype = c_int
f.errchck = check_bool
return f
### OGR_G ctypes function prototypes ###
# GeoJSON routines, if supported.
if GEOJSON:
from_json = geom_output(lgdal.OGR_G_CreateGeometryFromJson, [c_char_p])
to_json = string_output(lgdal.OGR_G_ExportToJson, [c_void_p], str_result=True)
to_kml = string_output(lgdal.OGR_G_ExportToKML, [c_void_p, c_char_p], str_result=True)
else:
from_json = False
to_json = False
to_kml = False
# GetX, GetY, GetZ all return doubles.
getx = pnt_func(lgdal.OGR_G_GetX)
gety = pnt_func(lgdal.OGR_G_GetY)
getz = pnt_func(lgdal.OGR_G_GetZ)
# Geometry creation routines.
from_wkb = geom_output(lgdal.OGR_G_CreateFromWkb, [c_char_p, c_void_p, POINTER(c_void_p), c_int], offset=-2)
from_wkt = geom_output(lgdal.OGR_G_CreateFromWkt, [POINTER(c_char_p), c_void_p, POINTER(c_void_p)], offset=-1)
create_geom = geom_output(lgdal.OGR_G_CreateGeometry, [c_int])
clone_geom = geom_output(lgdal.OGR_G_Clone, [c_void_p])
get_geom_ref = geom_output(lgdal.OGR_G_GetGeometryRef, [c_void_p, c_int])
get_boundary = geom_output(lgdal.OGR_G_GetBoundary, [c_void_p])
geom_convex_hull = geom_output(lgdal.OGR_G_ConvexHull, [c_void_p])
geom_diff = geom_output(lgdal.OGR_G_Difference, [c_void_p, c_void_p])
geom_intersection = geom_output(lgdal.OGR_G_Intersection, [c_void_p, c_void_p])
geom_sym_diff = geom_output(lgdal.OGR_G_SymmetricDifference, [c_void_p, c_void_p])
geom_union = geom_output(lgdal.OGR_G_Union, [c_void_p, c_void_p])
# Geometry modification routines.
add_geom = void_output(lgdal.OGR_G_AddGeometry, [c_void_p, c_void_p])
import_wkt = void_output(lgdal.OGR_G_ImportFromWkt, [c_void_p, POINTER(c_char_p)])
# Destroys a geometry
destroy_geom = void_output(lgdal.OGR_G_DestroyGeometry, [c_void_p], errcheck=False)
# Geometry export routines.
to_wkb = void_output(lgdal.OGR_G_ExportToWkb, None, errcheck=True) # special handling for WKB.
to_wkt = string_output(lgdal.OGR_G_ExportToWkt, [c_void_p, POINTER(c_char_p)])
to_gml = string_output(lgdal.OGR_G_ExportToGML, [c_void_p], str_result=True)
get_wkbsize = int_output(lgdal.OGR_G_WkbSize, [c_void_p])
# Geometry spatial-reference related routines.
assign_srs = void_output(lgdal.OGR_G_AssignSpatialReference, [c_void_p, c_void_p], errcheck=False)
get_geom_srs = srs_output(lgdal.OGR_G_GetSpatialReference, [c_void_p])
# Geometry properties
get_area = double_output(lgdal.OGR_G_GetArea, [c_void_p])
get_centroid = void_output(lgdal.OGR_G_Centroid, [c_void_p, c_void_p])
get_dims = int_output(lgdal.OGR_G_GetDimension, [c_void_p])
get_coord_dim = int_output(lgdal.OGR_G_GetCoordinateDimension, [c_void_p])
set_coord_dim = void_output(lgdal.OGR_G_SetCoordinateDimension, [c_void_p, c_int], errcheck=False)
get_geom_count = int_output(lgdal.OGR_G_GetGeometryCount, [c_void_p])
get_geom_name = const_string_output(lgdal.OGR_G_GetGeometryName, [c_void_p])
get_geom_type = int_output(lgdal.OGR_G_GetGeometryType, [c_void_p])
get_point_count = int_output(lgdal.OGR_G_GetPointCount, [c_void_p])
get_point = void_output(lgdal.OGR_G_GetPoint, [c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)], errcheck=False)
geom_close_rings = void_output(lgdal.OGR_G_CloseRings, [c_void_p], errcheck=False)
# Topology routines.
ogr_contains = topology_func(lgdal.OGR_G_Contains)
ogr_crosses = topology_func(lgdal.OGR_G_Crosses)
ogr_disjoint = topology_func(lgdal.OGR_G_Disjoint)
ogr_equals = topology_func(lgdal.OGR_G_Equals)
ogr_intersects = topology_func(lgdal.OGR_G_Intersects)
ogr_overlaps = topology_func(lgdal.OGR_G_Overlaps)
ogr_touches = topology_func(lgdal.OGR_G_Touches)
ogr_within = topology_func(lgdal.OGR_G_Within)
# Transformation routines.
geom_transform = void_output(lgdal.OGR_G_Transform, [c_void_p, c_void_p])
geom_transform_to = void_output(lgdal.OGR_G_TransformTo, [c_void_p, c_void_p])
# For retrieving the envelope of the geometry.
get_envelope = env_func(lgdal.OGR_G_GetEnvelope, [c_void_p, POINTER(OGREnvelope)])
| bsd-3-clause |
pierrecnalb/White-Renamer | whiterenamer/ui/main_window.py | 1 | 18283 | #!/usr/bin/python3
# Copyright (C) 2015-2016 Pierre Blanc
#
# This file is part of WhiteRenamer.
#
# WhiteRenamer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WhiteRenamer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WhiteRenamer. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import os.path
import subprocess
import webbrowser
import urllib.request
import whiterenamer
from PyQt5.QtCore import pyqtSlot, QSize
from PyQt5.QtWidgets import QMainWindow, QAction, QActionGroup, QLineEdit, QWidget, QSizePolicy, QFileDialog, QMessageBox
from PyQt5.QtGui import QIcon
from . import MainWidget, resource_rc
from ..model import FileSystem
class MainWindow(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self)
self.setWindowTitle('White Renamer')
# self.setWindowIcon(QIcon(':/white_renamer48.png'))
self.setUnifiedTitleAndToolBarOnMac(True)
self.directory = None
self.use_subfolder = False
self.filtered_collection = None
self.files_collection = None
self.resize(1000, 800)
self.showMaximized()
self.name_filter = ''
self.files_type = ['*.*']
self.reverse_order = False
self.sorting_criteria = "name"
#CREATE THE ACTIONS
self.action_open = QAction(self.tr('&Open'), self)
self.action_open = self.edit_action(self.action_open, self.open_directory_dialog_click, None, 'ctrl+o', "new_icon.png" ,self.tr('Open directory.'))
self.action_exit = QAction(self.tr('&Exit'), self)
self.action_exit = self.edit_action(self.action_exit, self.close, None,'ctrl+q', "exit_icon.png", self.tr('Exit the application.'))
self.action_help = QAction(self.tr('&Help'), self)
self.action_help = self.edit_action(self.action_help, self.help_click, None, 'ctrl+h', 'help_icon.png', self.tr('Show help page.'))
self.action_about = QAction(self.tr('&About'), self)
self.action_about = self.edit_action(self.action_about, self.about_box_click, None, None, None,self.tr('About Box.'))
self.action_check_update = QAction(self.tr('&Check Updates'), self)
self.action_check_update = self.edit_action(self.action_check_update, self.check_update_click, None, None, None,self.tr('Check for updates.'))
self.action_recursion = QAction(self.tr('Show Subdirectories'), self)
self.action_recursion = self.edit_action(self.action_recursion, self.recursion_click, bool, None, "subdirectory_icon.png",self.tr('Rename subdirectories recursively.'))
self.action_recursion.setCheckable(True)
self.action_hide = QAction(self.tr('Show Hidden Files'), self)
self.action_hide = self.edit_action(self.action_hide, self.hide_files_click, bool, 'ctrl+h', "hidden_icon.png",self.tr('Show hidden files.'))
self.action_hide.setCheckable(True)
self.action_rename = QAction(self.tr('&Rename'), self)
self.action_rename = self.edit_action(self.action_rename, self.rename_click, None, 'ctrl+enter', "run_icon.png",self.tr('Rename the files/folders.'))
self.action_undo = QAction(self.tr('Undo'), self)
self.action_undo = self.edit_action(self.action_undo, self.undo_click, None, 'ctrl+z', "undo_icon.png",self.tr('Undo the previous renaming.'))
self.action_reverse_sorting = QAction(self.tr('Reverse'), self)
self.action_reverse_sorting.setCheckable(True)
self.action_reverse_sorting = self.edit_action(self.action_reverse_sorting, self.reverse_sorting_click, bool, None, "order_icon.png",self.tr('Reverse the sorting order.'))
self.action_name_sorting = QAction(self.tr('By Name'), self)
self.action_name_sorting.setCheckable(True)
self.action_name_sorting = self.edit_action(self.action_name_sorting, self.name_sorting_click, None, None, None,self.tr('Sort the files/folders by name.'))
self.action_name_sorting.setChecked(True)
self.action_size_sorting = QAction(self.tr('By Size'), self)
self.action_size_sorting.setCheckable(True)
self.action_size_sorting = self.edit_action(self.action_size_sorting, self.size_sorting_click, None, None, None,self.tr('Sort the files/folders by size.'))
self.action_modified_date_sorting = QAction(self.tr('By Modified Date'), self)
self.action_modified_date_sorting.setCheckable(True)
self.action_modified_date_sorting = self.edit_action(self.action_modified_date_sorting, self.modified_date_sorting_click, None, None, None,self.tr('Sort the files/folders by modified date.'))
self.action_creation_date_sorting = QAction(self.tr('By Creation Date'), self)
self.action_creation_date_sorting.setCheckable(True)
self.action_creation_date_sorting = self.edit_action(self.action_creation_date_sorting, self.creation_date_sorting_click, None, None, None,self.tr('Sort the files/folders by creation date.'))
sort_group = QActionGroup(self)
sort_group.addAction(self.action_name_sorting)
sort_group.addAction(self.action_size_sorting)
sort_group.addAction(self.action_modified_date_sorting)
sort_group.addAction(self.action_creation_date_sorting)
filterInput = QLineEdit()
filterInput.setPlaceholderText(self.tr("Filter Files..."))
filterInput.setMaximumWidth(150)
filterInput.textChanged[str].connect(self.get_name_filter)
self.action_files_only = QAction(self.tr('Files Only'), self)
self.action_files_only.setCheckable(True)
self.action_files_only.setChecked(True)
self.action_files_only = self.edit_action(self.action_files_only, self.files_only_click, None, None, "file_icon.png",self.tr('Rename only files.'))
self.action_folders_only = QAction(self.tr('Folders Only'), self)
self.action_folders_only.setCheckable(True)
self.action_folders_only = self.edit_action(self.action_folders_only, self.folders_only_click, None, None, "folder_icon.png",self.tr('Rename only folders.'))
node_type_selector = QActionGroup(self)
node_type_selector.setObjectName('selector')
node_type_selector.addAction(self.action_files_only)
node_type_selector.addAction(self.action_folders_only)
file_type = QActionGroup(self)
self.action_all_files = QAction(self.tr("All"),self)
self.action_all_files = self.edit_action(self.action_all_files, self.all_files_click, None, None, "all_files_icon.png",self.tr('Rename files of any kind.'))
self.action_all_files.setCheckable(True)
self.action_all_files.setChecked(True)
self.action_music_files = QAction(self.tr("Music"),self)
self.action_music_files = self.edit_action(self.action_music_files, self.music_files_click, None, None, "music_icon.png",self.tr('Rename only music files.'))
self.action_music_files.setCheckable(True)
self.action_image_files = QAction(self.tr("Images"),self)
self.action_image_files = self.edit_action(self.action_image_files, self.image_files_click, None, None, "images_icon.png",self.tr('Rename only image files.'))
self.action_image_files.setCheckable(True)
file_type.addAction(self.action_all_files)
file_type.addAction(self.action_image_files)
file_type.addAction(self.action_music_files)
# CREATE THE MENU BAR
menubar = self.menuBar()
#FILE
menu_file = menubar.addMenu(self.tr('&File'))
menu_file.addAction(self.action_open)
menu_file.addSeparator()
menu_file.addAction(self.action_hide)
menu_file.addAction(self.action_recursion)
menu_file.addSeparator()
menu_file.addAction(self.action_exit)
#EDIT
menu_edit = menubar.addMenu(self.tr('&Sort'))
menu_edit.addAction(self.action_name_sorting)
menu_edit.addAction(self.action_size_sorting)
menu_edit.addAction(self.action_creation_date_sorting)
menu_edit.addAction(self.action_modified_date_sorting)
menu_edit.addSeparator()
menu_edit.addAction(self.action_reverse_sorting)
menu_filter = menubar.addMenu(self.tr('&Filter'))
menu_filter.addAction(self.action_files_only)
menu_filter.addAction(self.action_folders_only)
menu_filter.addSeparator()
menu_filter.addAction(self.action_all_files)
menu_filter.addAction(self.action_image_files)
menu_filter.addAction(self.action_music_files)
#TOOL
menu_tool = menubar.addMenu(self.tr('&Tool'))
menu_tool.addAction(self.action_rename)
menu_tool.addAction(self.action_undo)
#HELP
menu_help = menubar.addMenu(self.tr('&Help'))
menu_help.addAction(self.action_help)
menu_help.addAction(self.action_about)
menu_help.addAction(self.action_check_update)
self.main_toolbar = self.addToolBar('main_toolbar')
self.main_toolbar.addAction(self.action_open)
self.main_toolbar.addSeparator()
self.main_toolbar.addAction(self.action_hide)
self.main_toolbar.addAction(self.action_recursion)
self.main_toolbar.addSeparator()
self.main_toolbar.addAction(self.action_files_only)
self.main_toolbar.addAction(self.action_folders_only)
self.main_toolbar.addSeparator()
self.main_toolbar.addAction(self.action_all_files)
self.main_toolbar.addAction(self.action_image_files)
self.main_toolbar.addAction(self.action_music_files)
self.main_toolbar.addWidget(filterInput)
self.main_toolbar.addSeparator()
self.main_toolbar.addAction(self.action_rename)
self.main_toolbar.addAction(self.action_undo)
self.main_toolbar.setIconSize(QSize(16,16))
empty = QWidget();
empty.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Preferred)
self.main_toolbar.addWidget(empty)
self.main_toolbar.addAction(self.action_help)
# create the status bar
self.statusBar()
self.main_widget = MainWidget()
self.setCentralWidget(self.main_widget)
def get_main_widget(self):
return self.main_widget
def edit_action(self, action, slot=None, type=None, shortcut=None, icon=None, tip=None):
'''This method adds to action: icon, shortcut, ToolTip,\
StatusTip and can connect triggered action to pyqtSlot '''
if icon is not None:
action.setIcon(QIcon(":/{0}".format(icon)))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
if type is not None:
action.triggered[type].connect(slot)
else:
action.triggered.connect(slot)
return action
@pyqtSlot()
def get_name_filter(self, value):
self.name_filter = value
self.reset_files_collection()
@pyqtSlot()
def help_click(self):
'''Read and display a help file- currently the README.txt.'''
if getattr(sys, 'frozen', False): # frozen
dir_ = os.path.dirname(sys.executable)
filepath = os.path.join(dir_, "Documentation.pdf")
else: # unfrozen
dir_ = os.path.dirname(os.path.realpath(__file__))
filepath = os.path.join(dir_, "..","doc", "Documentation.pdf")
if sys.platform.startswith('darwin'):
subprocess.call(('open', filepath))
elif os.name == 'nt':
os.startfile(filepath)
elif os.name == 'posix':
subprocess.call(('xdg-open', filepath))
@pyqtSlot()
def music_files_click(self):
self.files_type = ['.flac', '.mp3', '.m4a', '.ogg', '.wma', '.m3a', '.mp4']
self.reset_files_collection()
@pyqtSlot()
def image_files_click(self):
self.files_type = ['.jpg', '.jpeg', '.tif', '.png', '.gif', '.bmp', '.eps', '.im', '.jfif', '.j2p', '.jpx', '.pcx', '.ico', '.icns', '.psd', '.nef', 'cr2', 'pef']
self.reset_files_collection()
@pyqtSlot()
def all_files_click(self):
self.files_type = ['*.*']
self.reset_files_collection()
@pyqtSlot()
def files_only_click(self):
self.files_type = ['*.*']
self.action_all_files.setChecked(True)
self.action_all_files.setEnabled(True)
self.action_image_files.setEnabled(True)
self.action_music_files.setEnabled(True)
self.main_widget.is_file(True)
self.reset_files_collection()
@pyqtSlot()
def folders_only_click(self):
self.files_type = ["folders"]
self.action_all_files.setChecked(True)
self.action_all_files.setEnabled(False)
self.action_image_files.setEnabled(False)
self.action_music_files.setEnabled(False)
self.main_widget.is_file(False)
self.reset_files_collection()
@pyqtSlot()
def about_box_click(self):
'''Popup a box with about message.'''
QMessageBox.about(self, "About WhiteRenamer",
"""<b>White Renamer</b>
<p>Copyright © 2015 Pierre BLANC.</p>
<p>email : <a href="mailto:pierrecnalb@mailbox.org">pierrecnalb@mailbox.org</a></p>
<p> White Renamer is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.</p>
<p>White Renamer is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; See the GNU General Public License for more details.</p>
""" )
@pyqtSlot()
def check_update_click(self):
try:
code_online = urllib.request.urlopen("https://raw.githubusercontent.com/pierrecnalb/White-Renamer/master/whiterenamer/version.txt").read().splitlines()
version_online = code_online[0].decode().split('.')
version_online = str(whiterenamer.Version(version_online[0], version_online[1], version_online[2]))
print(version_online)
self.update_message(str(whiterenamer.__version__), version_online)
except:
raise Exception("Unable to retrieve the new software version from the server. Please try later.")
@pyqtSlot(bool)
def recursion_click(self, value):
self.use_subfolder = value
if self.directory is None:
return
self.reset_files_collection()
@pyqtSlot(bool)
def hide_files_click(self, value):
self.show_hidden_files = value
self.reset_files_collection()
def are_hidden_files_shown(self):
return self.action_hide.isChecked()
@pyqtSlot()
def open_directory_dialog_click(self):
"""Opens a dialog to allow user to choose a directory """
flags = QFileDialog.DontResolveSymlinks | QFileDialog.ShowDirsOnly
try:
self.directory = QFileDialog.getExistingDirectory(self,self.tr("Select Directory"), os.getcwd(), flags)
self.reset_files_collection()
except Exception as e:
print(str(e))
msg_box = QMessageBox.warning(self, "Invalid directory", "Please select a valid directory." )
@pyqtSlot(bool)
def reverse_sorting_click(self, value):
self.reverse_order = value
self.reset_files_collection()
@pyqtSlot()
def name_sorting_click(self):
self.sorting_criteria = "name"
self.reset_files_collection()
@pyqtSlot()
def size_sorting_click(self):
self.sorting_criteria = "size"
self.reset_files_collection()
@pyqtSlot()
def creation_date_sorting_click(self):
self.sorting_criteria = "creation_date"
self.reset_files_collection()
@pyqtSlot()
def modified_date_sorting_click(self):
self.sorting_criteria = "modified_date"
self.reset_files_collection()
def reset_files_collection(self):
if(self.directory is None):
return
self.files_system = FileSystem(self.directory, self.use_subfolder)
self.files_system_view = self.files_system.generate_files_system_view(self.are_hidden_files_shown(), self.files_type, self.name_filter, self.sorting_criteria, self.reverse_order)
self.main_widget.set_filtered_files(self.files_system_view)
@pyqtSlot()
def rename_click(self):
self.main_widget.rename()
self.reset_files_collection()
@pyqtSlot()
def undo_click(self):
self.main_widget.undo()
def update_message(self, version, new_version):
if(version == new_version):
msg_box = QMessageBox.information(self, "Version",
"""<p>Your version of whiterenamer is up to date.</p>
""".format(version, new_version), QMessageBox.Ok)
else:
msg_box = QMessageBox.information(self, "Update available",
"""
<b>New version available</b>
<p>You are running an old version of White Renamer (v{0}).</p>
<p>A newer version is available (v{1}). Do you want to download it ?</p>
""".format(version, new_version), QMessageBox.No, QMessageBox.Yes)
if msg_box == QMessageBox.No:
pass
if msg_box == QMessageBox.Yes:
new = 2 # open in a new tab, if possible
# open a public URL, in this case, the webbrowser docs
url = "https://github.com/pierrecnalb/WhiteRenamer-builds"
webbrowser.open(url,new=new)
# Save was clicked
# elif ret == QMessageBox.Discard:
# Don't save was clicked
| gpl-2.0 |
maestrano/openerp | openerp/conf/__init__.py | 442 | 1974 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Library-wide configuration variables.
For now, configuration code is in openerp.tools.config. It is in mainly
unprocessed form, e.g. addons_path is a string with commas-separated
paths. The aim is to have code related to configuration (command line
parsing, configuration file loading and saving, ...) in this module
and provide real Python variables, e.g. addons_paths is really a list
of paths.
To initialize properly this module, openerp.tools.config.parse_config()
must be used.
"""
import deprecation
# Paths to search for OpenERP addons.
addons_paths = []
# List of server-wide modules to load. Those modules are supposed to provide
# features not necessarily tied to a particular database. This is in contrast
# to modules that are always bound to a specific database when they are
# installed (i.e. the majority of OpenERP addons). This is set with the --load
# command-line option.
server_wide_modules = []
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
paulmadore/Eric-IDE | 6-6.0.9/eric/Plugins/VcsPlugins/vcsSubversion/SvnStatusDialog.py | 1 | 38220 | # -*- coding: utf-8 -*-
# Copyright (c) 2003 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog to show the output of the svn status command
process.
"""
from __future__ import unicode_literals
try:
str = unicode
except NameError:
pass
import os
from PyQt5.QtCore import QTimer, QProcess, QRegExp, Qt, pyqtSlot
from PyQt5.QtWidgets import QWidget, QHeaderView, QLineEdit, QApplication, \
QMenu, QDialogButtonBox, QTreeWidgetItem
from E5Gui.E5Application import e5App
from E5Gui import E5MessageBox
from .Ui_SvnStatusDialog import Ui_SvnStatusDialog
import Preferences
class SvnStatusDialog(QWidget, Ui_SvnStatusDialog):
"""
Class implementing a dialog to show the output of the svn status command
process.
"""
def __init__(self, vcs, parent=None):
"""
Constructor
@param vcs reference to the vcs object
@param parent parent widget (QWidget)
"""
super(SvnStatusDialog, self).__init__(parent)
self.setupUi(self)
self.__toBeCommittedColumn = 0
self.__changelistColumn = 1
self.__statusColumn = 2
self.__propStatusColumn = 3
self.__lockedColumn = 4
self.__historyColumn = 5
self.__switchedColumn = 6
self.__lockinfoColumn = 7
self.__upToDateColumn = 8
self.__pathColumn = 12
self.__lastColumn = self.statusList.columnCount()
self.refreshButton = \
self.buttonBox.addButton(self.tr("Refresh"),
QDialogButtonBox.ActionRole)
self.refreshButton.setToolTip(
self.tr("Press to refresh the status display"))
self.refreshButton.setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)
self.diff = None
self.process = None
self.vcs = vcs
self.vcs.committed.connect(self.__committed)
self.statusList.headerItem().setText(self.__lastColumn, "")
self.statusList.header().setSortIndicator(self.__pathColumn,
Qt.AscendingOrder)
if self.vcs.version < (1, 5, 0):
self.statusList.header().hideSection(self.__changelistColumn)
self.menuactions = []
self.menu = QMenu()
self.menuactions.append(self.menu.addAction(
self.tr("Commit changes to repository..."), self.__commit))
self.menuactions.append(self.menu.addAction(
self.tr("Select all for commit"), self.__commitSelectAll))
self.menuactions.append(self.menu.addAction(
self.tr("Deselect all from commit"), self.__commitDeselectAll))
self.menu.addSeparator()
self.menuactions.append(self.menu.addAction(
self.tr("Add to repository"), self.__add))
self.menuactions.append(self.menu.addAction(
self.tr("Show differences"), self.__diff))
self.menuactions.append(self.menu.addAction(
self.tr("Show differences side-by-side"), self.__sbsDiff))
self.menuactions.append(self.menu.addAction(
self.tr("Revert changes"), self.__revert))
self.menuactions.append(self.menu.addAction(
self.tr("Restore missing"), self.__restoreMissing))
if self.vcs.version >= (1, 5, 0):
self.menu.addSeparator()
self.menuactions.append(self.menu.addAction(
self.tr("Add to Changelist"), self.__addToChangelist))
self.menuactions.append(self.menu.addAction(
self.tr("Remove from Changelist"),
self.__removeFromChangelist))
if self.vcs.version >= (1, 2, 0):
self.menu.addSeparator()
self.menuactions.append(self.menu.addAction(
self.tr("Lock"), self.__lock))
self.menuactions.append(self.menu.addAction(
self.tr("Unlock"), self.__unlock))
self.menuactions.append(self.menu.addAction(
self.tr("Break lock"),
self.__breakLock))
self.menuactions.append(self.menu.addAction(
self.tr("Steal lock"),
self.__stealLock))
self.menu.addSeparator()
self.menuactions.append(self.menu.addAction(
self.tr("Adjust column sizes"),
self.__resizeColumns))
for act in self.menuactions:
act.setEnabled(False)
self.statusList.setContextMenuPolicy(Qt.CustomContextMenu)
self.statusList.customContextMenuRequested.connect(
self.__showContextMenu)
self.modifiedIndicators = [
self.tr('added'),
self.tr('deleted'),
self.tr('modified'),
]
self.missingIndicators = [
self.tr('missing'),
]
self.unversionedIndicators = [
self.tr('unversioned'),
]
self.lockedIndicators = [
self.tr('locked'),
]
self.stealBreakLockIndicators = [
self.tr('other lock'),
self.tr('stolen lock'),
self.tr('broken lock'),
]
self.unlockedIndicators = [
self.tr('not locked'),
]
self.status = {
' ': self.tr('normal'),
'A': self.tr('added'),
'D': self.tr('deleted'),
'M': self.tr('modified'),
'R': self.tr('replaced'),
'C': self.tr('conflict'),
'X': self.tr('external'),
'I': self.tr('ignored'),
'?': self.tr('unversioned'),
'!': self.tr('missing'),
'~': self.tr('type error'),
}
self.propStatus = {
' ': self.tr('normal'),
'M': self.tr('modified'),
'C': self.tr('conflict'),
}
self.locked = {
' ': self.tr('no'),
'L': self.tr('yes'),
}
self.history = {
' ': self.tr('no'),
'+': self.tr('yes'),
}
self.switched = {
' ': self.tr('no'),
'S': self.tr('yes'),
}
self.lockinfo = {
' ': self.tr('not locked'),
'K': self.tr('locked'),
'O': self.tr('other lock'),
'T': self.tr('stolen lock'),
'B': self.tr('broken lock'),
}
self.uptodate = {
' ': self.tr('yes'),
'*': self.tr('no'),
}
self.rx_status = QRegExp(
'(.{8,9})\\s+([0-9-]+)\\s+([0-9?]+)\\s+(\\S+)\\s+(.+)\\s*')
# flags (8 or 9 anything), revision, changed rev, author, path
self.rx_status2 = \
QRegExp('(.{8,9})\\s+(.+)\\s*')
# flags (8 or 9 anything), path
self.rx_changelist = \
QRegExp('--- \\S+ .([\\w\\s]+).:\\s+')
# three dashes, Changelist (translated), quote,
# changelist name, quote, :
self.__nonverbose = True
def __resort(self):
"""
Private method to resort the tree.
"""
self.statusList.sortItems(
self.statusList.sortColumn(),
self.statusList.header().sortIndicatorOrder())
def __resizeColumns(self):
"""
Private method to resize the list columns.
"""
self.statusList.header().resizeSections(QHeaderView.ResizeToContents)
self.statusList.header().setStretchLastSection(True)
def __generateItem(self, status, propStatus, locked, history, switched,
lockinfo, uptodate, revision, change, author, path):
"""
Private method to generate a status item in the status list.
@param status status indicator (string)
@param propStatus property status indicator (string)
@param locked locked indicator (string)
@param history history indicator (string)
@param switched switched indicator (string)
@param lockinfo lock indicator (string)
@param uptodate up to date indicator (string)
@param revision revision string (string)
@param change revision of last change (string)
@param author author of the last change (string)
@param path path of the file or directory (string)
"""
if self.__nonverbose and \
status == " " and \
propStatus == " " and \
locked == " " and \
history == " " and \
switched == " " and \
lockinfo == " " and \
uptodate == " " and \
self.currentChangelist == "":
return
if revision == "":
rev = ""
else:
try:
rev = int(revision)
except ValueError:
rev = revision
if change == "":
chg = ""
else:
try:
chg = int(change)
except ValueError:
chg = change
statusText = self.status[status]
itm = QTreeWidgetItem(self.statusList)
itm.setData(0, Qt.DisplayRole, "")
itm.setData(1, Qt.DisplayRole, self.currentChangelist)
itm.setData(2, Qt.DisplayRole, statusText)
itm.setData(3, Qt.DisplayRole, self.propStatus[propStatus])
itm.setData(4, Qt.DisplayRole, self.locked[locked])
itm.setData(5, Qt.DisplayRole, self.history[history])
itm.setData(6, Qt.DisplayRole, self.switched[switched])
itm.setData(7, Qt.DisplayRole, self.lockinfo[lockinfo])
itm.setData(8, Qt.DisplayRole, self.uptodate[uptodate])
itm.setData(9, Qt.DisplayRole, rev)
itm.setData(10, Qt.DisplayRole, chg)
itm.setData(11, Qt.DisplayRole, author)
itm.setData(12, Qt.DisplayRole, path)
itm.setTextAlignment(1, Qt.AlignLeft)
itm.setTextAlignment(2, Qt.AlignHCenter)
itm.setTextAlignment(3, Qt.AlignHCenter)
itm.setTextAlignment(4, Qt.AlignHCenter)
itm.setTextAlignment(5, Qt.AlignHCenter)
itm.setTextAlignment(6, Qt.AlignHCenter)
itm.setTextAlignment(7, Qt.AlignHCenter)
itm.setTextAlignment(8, Qt.AlignHCenter)
itm.setTextAlignment(9, Qt.AlignRight)
itm.setTextAlignment(10, Qt.AlignRight)
itm.setTextAlignment(11, Qt.AlignLeft)
itm.setTextAlignment(12, Qt.AlignLeft)
if status in "ADM" or propStatus in "M":
itm.setFlags(itm.flags() | Qt.ItemIsUserCheckable)
itm.setCheckState(self.__toBeCommittedColumn, Qt.Checked)
else:
itm.setFlags(itm.flags() & ~Qt.ItemIsUserCheckable)
self.hidePropertyStatusColumn = self.hidePropertyStatusColumn and \
propStatus == " "
self.hideLockColumns = self.hideLockColumns and \
locked == " " and lockinfo == " "
self.hideUpToDateColumn = self.hideUpToDateColumn and uptodate == " "
self.hideHistoryColumn = self.hideHistoryColumn and history == " "
self.hideSwitchedColumn = self.hideSwitchedColumn and switched == " "
if statusText not in self.__statusFilters:
self.__statusFilters.append(statusText)
def closeEvent(self, e):
"""
Protected slot implementing a close event handler.
@param e close event (QCloseEvent)
"""
if self.process is not None and \
self.process.state() != QProcess.NotRunning:
self.process.terminate()
QTimer.singleShot(2000, self.process.kill)
self.process.waitForFinished(3000)
e.accept()
def start(self, fn):
"""
Public slot to start the svn status command.
@param fn filename(s)/directoryname(s) to show the status of
(string or list of strings)
"""
self.errorGroup.hide()
self.intercept = False
self.args = fn
for act in self.menuactions:
act.setEnabled(False)
self.addButton.setEnabled(False)
self.commitButton.setEnabled(False)
self.diffButton.setEnabled(False)
self.sbsDiffButton.setEnabled(False)
self.revertButton.setEnabled(False)
self.restoreButton.setEnabled(False)
self.statusFilterCombo.clear()
self.__statusFilters = []
self.currentChangelist = ""
self.changelistFound = False
self.hidePropertyStatusColumn = True
self.hideLockColumns = True
self.hideUpToDateColumn = True
self.hideHistoryColumn = True
self.hideSwitchedColumn = True
if self.process:
self.process.kill()
else:
self.process = QProcess()
self.process.finished.connect(self.__procFinished)
self.process.readyReadStandardOutput.connect(self.__readStdout)
self.process.readyReadStandardError.connect(self.__readStderr)
args = []
args.append('status')
self.vcs.addArguments(args, self.vcs.options['global'])
self.vcs.addArguments(args, self.vcs.options['status'])
if '--verbose' not in self.vcs.options['global'] and \
'--verbose' not in self.vcs.options['status']:
args.append('--verbose')
self.__nonverbose = True
else:
self.__nonverbose = False
if '--show-updates' in self.vcs.options['status'] or \
'-u' in self.vcs.options['status']:
self.activateWindow()
self.raise_()
if isinstance(fn, list):
self.dname, fnames = self.vcs.splitPathList(fn)
self.vcs.addArguments(args, fnames)
else:
self.dname, fname = self.vcs.splitPath(fn)
args.append(fname)
self.process.setWorkingDirectory(self.dname)
self.setWindowTitle(self.tr('Subversion Status'))
self.process.start('svn', args)
procStarted = self.process.waitForStarted(5000)
if not procStarted:
self.inputGroup.setEnabled(False)
self.inputGroup.hide()
E5MessageBox.critical(
self,
self.tr('Process Generation Error'),
self.tr(
'The process {0} could not be started. '
'Ensure, that it is in the search path.'
).format('svn'))
else:
self.inputGroup.setEnabled(True)
self.inputGroup.show()
def __finish(self):
"""
Private slot called when the process finished or the user pressed
the button.
"""
if self.process is not None and \
self.process.state() != QProcess.NotRunning:
self.process.terminate()
QTimer.singleShot(2000, self.process.kill)
self.process.waitForFinished(3000)
self.buttonBox.button(QDialogButtonBox.Close).setEnabled(True)
self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Close).setDefault(True)
self.buttonBox.button(QDialogButtonBox.Close).setFocus(
Qt.OtherFocusReason)
self.inputGroup.setEnabled(False)
self.inputGroup.hide()
self.refreshButton.setEnabled(True)
self.__statusFilters.sort()
self.__statusFilters.insert(0, "<{0}>".format(self.tr("all")))
self.statusFilterCombo.addItems(self.__statusFilters)
for act in self.menuactions:
act.setEnabled(True)
self.process = None
self.__resort()
self.__resizeColumns()
self.statusList.setColumnHidden(self.__changelistColumn,
not self.changelistFound)
self.statusList.setColumnHidden(self.__propStatusColumn,
self.hidePropertyStatusColumn)
self.statusList.setColumnHidden(self.__lockedColumn,
self.hideLockColumns)
self.statusList.setColumnHidden(self.__lockinfoColumn,
self.hideLockColumns)
self.statusList.setColumnHidden(self.__upToDateColumn,
self.hideUpToDateColumn)
self.statusList.setColumnHidden(self.__historyColumn,
self.hideHistoryColumn)
self.statusList.setColumnHidden(self.__switchedColumn,
self.hideSwitchedColumn)
self.__updateButtons()
self.__updateCommitButton()
def on_buttonBox_clicked(self, button):
"""
Private slot called by a button of the button box clicked.
@param button button that was clicked (QAbstractButton)
"""
if button == self.buttonBox.button(QDialogButtonBox.Close):
self.close()
elif button == self.buttonBox.button(QDialogButtonBox.Cancel):
self.__finish()
elif button == self.refreshButton:
self.on_refreshButton_clicked()
def __procFinished(self, exitCode, exitStatus):
"""
Private slot connected to the finished signal.
@param exitCode exit code of the process (integer)
@param exitStatus exit status of the process (QProcess.ExitStatus)
"""
self.__finish()
def __readStdout(self):
"""
Private slot to handle the readyReadStandardOutput signal.
It reads the output of the process, formats it and inserts it into
the contents pane.
"""
if self.process is not None:
self.process.setReadChannel(QProcess.StandardOutput)
while self.process.canReadLine():
s = str(self.process.readLine(),
Preferences.getSystem("IOEncoding"),
'replace')
if self.rx_status.exactMatch(s):
flags = self.rx_status.cap(1)
rev = self.rx_status.cap(2)
change = self.rx_status.cap(3)
author = self.rx_status.cap(4)
path = self.rx_status.cap(5).strip()
self.__generateItem(flags[0], flags[1], flags[2], flags[3],
flags[4], flags[5], flags[-1], rev,
change, author, path)
elif self.rx_status2.exactMatch(s):
flags = self.rx_status2.cap(1)
path = self.rx_status2.cap(2).strip()
self.__generateItem(flags[0], flags[1], flags[2], flags[3],
flags[4], flags[5], flags[-1], "", "",
"", path)
elif self.rx_changelist.exactMatch(s):
self.currentChangelist = self.rx_changelist.cap(1)
self.changelistFound = True
def __readStderr(self):
"""
Private slot to handle the readyReadStandardError signal.
It reads the error output of the process and inserts it into the
error pane.
"""
if self.process is not None:
self.errorGroup.show()
s = str(self.process.readAllStandardError(),
Preferences.getSystem("IOEncoding"),
'replace')
self.errors.insertPlainText(s)
self.errors.ensureCursorVisible()
def on_passwordCheckBox_toggled(self, isOn):
"""
Private slot to handle the password checkbox toggled.
@param isOn flag indicating the status of the check box (boolean)
"""
if isOn:
self.input.setEchoMode(QLineEdit.Password)
else:
self.input.setEchoMode(QLineEdit.Normal)
@pyqtSlot()
def on_sendButton_clicked(self):
"""
Private slot to send the input to the subversion process.
"""
input = self.input.text()
input += os.linesep
if self.passwordCheckBox.isChecked():
self.errors.insertPlainText(os.linesep)
self.errors.ensureCursorVisible()
else:
self.errors.insertPlainText(input)
self.errors.ensureCursorVisible()
self.process.write(input)
self.passwordCheckBox.setChecked(False)
self.input.clear()
def on_input_returnPressed(self):
"""
Private slot to handle the press of the return key in the input field.
"""
self.intercept = True
self.on_sendButton_clicked()
def keyPressEvent(self, evt):
"""
Protected slot to handle a key press event.
@param evt the key press event (QKeyEvent)
"""
if self.intercept:
self.intercept = False
evt.accept()
return
super(SvnStatusDialog, self).keyPressEvent(evt)
@pyqtSlot()
def on_refreshButton_clicked(self):
"""
Private slot to refresh the status display.
"""
self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(True)
self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)
self.inputGroup.setEnabled(True)
self.inputGroup.show()
self.refreshButton.setEnabled(False)
self.statusList.clear()
self.start(self.args)
def __updateButtons(self):
"""
Private method to update the VCS buttons status.
"""
modified = len(self.__getModifiedItems())
unversioned = len(self.__getUnversionedItems())
missing = len(self.__getMissingItems())
self.addButton.setEnabled(unversioned)
self.diffButton.setEnabled(modified)
self.sbsDiffButton.setEnabled(modified == 1)
self.revertButton.setEnabled(modified)
self.restoreButton.setEnabled(missing)
def __updateCommitButton(self):
"""
Private method to update the Commit button status.
"""
commitable = len(self.__getCommitableItems())
self.commitButton.setEnabled(commitable)
@pyqtSlot(str)
def on_statusFilterCombo_activated(self, txt):
"""
Private slot to react to the selection of a status filter.
@param txt selected status filter (string)
"""
if txt == "<{0}>".format(self.tr("all")):
for topIndex in range(self.statusList.topLevelItemCount()):
topItem = self.statusList.topLevelItem(topIndex)
topItem.setHidden(False)
else:
for topIndex in range(self.statusList.topLevelItemCount()):
topItem = self.statusList.topLevelItem(topIndex)
topItem.setHidden(topItem.text(self.__statusColumn) != txt)
@pyqtSlot(QTreeWidgetItem, int)
def on_statusList_itemChanged(self, item, column):
"""
Private slot to act upon item changes.
@param item reference to the changed item (QTreeWidgetItem)
@param column index of column that changed (integer)
"""
if column == self.__toBeCommittedColumn:
self.__updateCommitButton()
@pyqtSlot()
def on_statusList_itemSelectionChanged(self):
"""
Private slot to act upon changes of selected items.
"""
self.__updateButtons()
@pyqtSlot()
def on_commitButton_clicked(self):
"""
Private slot to handle the press of the Commit button.
"""
self.__commit()
@pyqtSlot()
def on_addButton_clicked(self):
"""
Private slot to handle the press of the Add button.
"""
self.__add()
@pyqtSlot()
def on_diffButton_clicked(self):
"""
Private slot to handle the press of the Differences button.
"""
self.__diff()
@pyqtSlot()
def on_sbsDiffButton_clicked(self):
"""
Private slot to handle the press of the Side-by-Side Diff button.
"""
self.__sbsDiff()
@pyqtSlot()
def on_revertButton_clicked(self):
"""
Private slot to handle the press of the Revert button.
"""
self.__revert()
@pyqtSlot()
def on_restoreButton_clicked(self):
"""
Private slot to handle the press of the Restore button.
"""
self.__restoreMissing()
###########################################################################
## Context menu handling methods
###########################################################################
def __showContextMenu(self, coord):
"""
Private slot to show the context menu of the status list.
@param coord the position of the mouse pointer (QPoint)
"""
self.menu.popup(self.statusList.mapToGlobal(coord))
def __commit(self):
"""
Private slot to handle the Commit context menu entry.
"""
names = [os.path.join(self.dname, itm.text(self.__pathColumn))
for itm in self.__getCommitableItems()]
if not names:
E5MessageBox.information(
self,
self.tr("Commit"),
self.tr("""There are no entries selected to be"""
""" committed."""))
return
if Preferences.getVCS("AutoSaveFiles"):
vm = e5App().getObject("ViewManager")
for name in names:
vm.saveEditor(name)
self.vcs.vcsCommit(names, '')
def __committed(self):
"""
Private slot called after the commit has finished.
"""
if self.isVisible():
self.on_refreshButton_clicked()
self.vcs.checkVCSStatus()
def __commitSelectAll(self):
"""
Private slot to select all entries for commit.
"""
self.__commitSelect(True)
def __commitDeselectAll(self):
"""
Private slot to deselect all entries from commit.
"""
self.__commitSelect(False)
def __add(self):
"""
Private slot to handle the Add context menu entry.
"""
names = [os.path.join(self.dname, itm.text(self.__pathColumn))
for itm in self.__getUnversionedItems()]
if not names:
E5MessageBox.information(
self,
self.tr("Add"),
self.tr("""There are no unversioned entries"""
""" available/selected."""))
return
self.vcs.vcsAdd(names)
self.on_refreshButton_clicked()
project = e5App().getObject("Project")
for name in names:
project.getModel().updateVCSStatus(name)
self.vcs.checkVCSStatus()
def __revert(self):
"""
Private slot to handle the Revert context menu entry.
"""
names = [os.path.join(self.dname, itm.text(self.__pathColumn))
for itm in self.__getModifiedItems()]
if not names:
E5MessageBox.information(
self,
self.tr("Revert"),
self.tr("""There are no uncommitted changes"""
""" available/selected."""))
return
self.vcs.vcsRevert(names)
self.raise_()
self.activateWindow()
self.on_refreshButton_clicked()
project = e5App().getObject("Project")
for name in names:
project.getModel().updateVCSStatus(name)
self.vcs.checkVCSStatus()
def __restoreMissing(self):
"""
Private slot to handle the Restore Missing context menu entry.
"""
names = [os.path.join(self.dname, itm.text(self.__pathColumn))
for itm in self.__getMissingItems()]
if not names:
E5MessageBox.information(
self,
self.tr("Revert"),
self.tr("""There are no missing entries"""
""" available/selected."""))
return
self.vcs.vcsRevert(names)
self.on_refreshButton_clicked()
self.vcs.checkVCSStatus()
def __diff(self):
"""
Private slot to handle the Diff context menu entry.
"""
names = [os.path.join(self.dname, itm.text(self.__pathColumn))
for itm in self.__getModifiedItems()]
if not names:
E5MessageBox.information(
self,
self.tr("Differences"),
self.tr("""There are no uncommitted changes"""
""" available/selected."""))
return
if self.diff is None:
from .SvnDiffDialog import SvnDiffDialog
self.diff = SvnDiffDialog(self.vcs)
self.diff.show()
QApplication.processEvents()
self.diff.start(names)
def __sbsDiff(self):
"""
Private slot to handle the Side-by-Side Diff context menu entry.
"""
names = [os.path.join(self.dname, itm.text(self.__pathColumn))
for itm in self.__getModifiedItems()]
if not names:
E5MessageBox.information(
self,
self.tr("Side-by-Side Diff"),
self.tr("""There are no uncommitted changes"""
""" available/selected."""))
return
elif len(names) > 1:
E5MessageBox.information(
self,
self.tr("Side-by-Side Diff"),
self.tr("""Only one file with uncommitted changes"""
""" must be selected."""))
return
self.vcs.svnSbsDiff(names[0])
def __lock(self):
"""
Private slot to handle the Lock context menu entry.
"""
names = [os.path.join(self.dname, itm.text(self.__pathColumn))
for itm in self.__getLockActionItems(self.unlockedIndicators)]
if not names:
E5MessageBox.information(
self,
self.tr("Lock"),
self.tr("""There are no unlocked files"""
""" available/selected."""))
return
self.vcs.svnLock(names, parent=self)
self.on_refreshButton_clicked()
def __unlock(self):
"""
Private slot to handle the Unlock context menu entry.
"""
names = [os.path.join(self.dname, itm.text(self.__pathColumn))
for itm in self.__getLockActionItems(self.lockedIndicators)]
if not names:
E5MessageBox.information(
self,
self.tr("Unlock"),
self.tr("""There are no locked files"""
""" available/selected."""))
return
self.vcs.svnUnlock(names, parent=self)
self.on_refreshButton_clicked()
def __breakLock(self):
"""
Private slot to handle the Break Lock context menu entry.
"""
names = [os.path.join(self.dname, itm.text(self.__pathColumn))
for itm in self.__getLockActionItems(
self.stealBreakLockIndicators)]
if not names:
E5MessageBox.information(
self,
self.tr("Break Lock"),
self.tr("""There are no locked files"""
""" available/selected."""))
return
self.vcs.svnUnlock(names, parent=self, breakIt=True)
self.on_refreshButton_clicked()
def __stealLock(self):
"""
Private slot to handle the Break Lock context menu entry.
"""
names = [os.path.join(self.dname, itm.text(self.__pathColumn))
for itm in self.__getLockActionItems(
self.stealBreakLockIndicators)]
if not names:
E5MessageBox.information(
self,
self.tr("Steal Lock"),
self.tr("""There are no locked files"""
""" available/selected."""))
return
self.vcs.svnLock(names, parent=self, stealIt=True)
self.on_refreshButton_clicked()
def __addToChangelist(self):
"""
Private slot to add entries to a changelist.
"""
names = [os.path.join(self.dname, itm.text(self.__pathColumn))
for itm in self.__getNonChangelistItems()]
if not names:
E5MessageBox.information(
self,
self.tr("Remove from Changelist"),
self.tr(
"""There are no files available/selected not """
"""belonging to a changelist."""
)
)
return
self.vcs.svnAddToChangelist(names)
self.on_refreshButton_clicked()
def __removeFromChangelist(self):
"""
Private slot to remove entries from their changelists.
"""
names = [os.path.join(self.dname, itm.text(self.__pathColumn))
for itm in self.__getChangelistItems()]
if not names:
E5MessageBox.information(
self,
self.tr("Remove from Changelist"),
self.tr(
"""There are no files available/selected belonging"""
""" to a changelist."""
)
)
return
self.vcs.svnRemoveFromChangelist(names)
self.on_refreshButton_clicked()
def __getCommitableItems(self):
"""
Private method to retrieve all entries the user wants to commit.
@return list of all items, the user has checked
"""
commitableItems = []
for index in range(self.statusList.topLevelItemCount()):
itm = self.statusList.topLevelItem(index)
if itm.checkState(self.__toBeCommittedColumn) == Qt.Checked:
commitableItems.append(itm)
return commitableItems
def __getModifiedItems(self):
"""
Private method to retrieve all entries, that have a modified status.
@return list of all items with a modified status
"""
modifiedItems = []
for itm in self.statusList.selectedItems():
if itm.text(self.__statusColumn) in self.modifiedIndicators or \
itm.text(self.__propStatusColumn) in self.modifiedIndicators:
modifiedItems.append(itm)
return modifiedItems
def __getUnversionedItems(self):
"""
Private method to retrieve all entries, that have an unversioned
status.
@return list of all items with an unversioned status
"""
unversionedItems = []
for itm in self.statusList.selectedItems():
if itm.text(self.__statusColumn) in self.unversionedIndicators:
unversionedItems.append(itm)
return unversionedItems
def __getMissingItems(self):
"""
Private method to retrieve all entries, that have a missing status.
@return list of all items with a missing status
"""
missingItems = []
for itm in self.statusList.selectedItems():
if itm.text(self.__statusColumn) in self.missingIndicators:
missingItems.append(itm)
return missingItems
def __getLockActionItems(self, indicators):
"""
Private method to retrieve all emtries, that have a locked status.
@param indicators list of indicators to check against (list of strings)
@return list of all items with a locked status
"""
lockitems = []
for itm in self.statusList.selectedItems():
if itm.text(self.__lockinfoColumn) in indicators:
lockitems.append(itm)
return lockitems
def __getChangelistItems(self):
"""
Private method to retrieve all entries, that are members of
a changelist.
@return list of all items belonging to a changelist
"""
clitems = []
for itm in self.statusList.selectedItems():
if itm.text(self.__changelistColumn) != "":
clitems.append(itm)
return clitems
def __getNonChangelistItems(self):
"""
Private method to retrieve all entries, that are not members of
a changelist.
@return list of all items not belonging to a changelist
"""
clitems = []
for itm in self.statusList.selectedItems():
if itm.text(self.__changelistColumn) == "":
clitems.append(itm)
return clitems
def __commitSelect(self, selected):
"""
Private slot to select or deselect all entries.
@param selected commit selection state to be set (boolean)
"""
for index in range(self.statusList.topLevelItemCount()):
itm = self.statusList.topLevelItem(index)
if itm.flags() & Qt.ItemIsUserCheckable:
if selected:
itm.setCheckState(self.__toBeCommittedColumn, Qt.Checked)
else:
itm.setCheckState(self.__toBeCommittedColumn, Qt.Unchecked)
| gpl-3.0 |
yoava333/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/trie/datrie.py | 785 | 1166 | from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
| mpl-2.0 |
izapolsk/integration_tests | cfme/control/explorer/alert_profiles.py | 1 | 10143 | import attr
from navmazing import NavigateToAttribute
from navmazing import NavigateToSibling
from widgetastic.widget import Text
from widgetastic.widget import TextInput
from widgetastic_patternfly import BootstrapSelect
from widgetastic_patternfly import Button
from widgetastic_patternfly import CheckableBootstrapTreeview as CbTree
from widgetastic_patternfly import Input
from cfme.control.explorer import ControlExplorerView
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.utils import ParamClassName
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.pretty import Pretty
from cfme.utils.update import Updateable
from cfme.utils.wait import wait_for
from widgetastic_manageiq import MultiBoxSelect
class AlertProfileFormCommon(ControlExplorerView):
title = Text("#explorer_title_text")
description = Input(name="description")
notes = TextInput(name="notes")
alerts = MultiBoxSelect()
cancel_button = Button("Cancel")
class NewAlertProfileView(AlertProfileFormCommon):
add_button = Button("Add")
@property
def is_displayed(self):
return (
self.in_control_explorer and
self.title.text == "Adding a new Alert Profile" and
self.alert_profiles.tree.currently_selected == [
"All Alert Profiles",
"{} Alert Profiles".format(self.context["object"].TYPE)
]
)
class EditAlertProfileView(AlertProfileFormCommon):
save_button = Button("Save")
@property
def is_displayed(self):
return (
self.in_control_explorer and
self.title.text == 'Editing {} Alert Profile "{}"'.format(
self.context["object"].TYPE,
self.context["object"].description) and
self.alert_profiles.tree.currently_selected == [
"All Alert Profiles",
"{} Alert Profiles".format(self.context["object"].TYPE),
self.context["object"].description
]
)
class AlertProfileDetailsView(ControlExplorerView):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_control_explorer and
self.title.text == 'Alert Profile "{}"'.format(self.context["object"].description)
)
class AlertProfilesAllView(ControlExplorerView):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_control_explorer and
self.title.text == "All Alert Profiles"
)
class AlertProfilesEditAssignmentsView(ControlExplorerView):
title = Text("#explorer_title_text")
assign_to = BootstrapSelect("chosen_assign_to")
tag_category = BootstrapSelect("chosen_cat")
selections = CbTree("object_treebox")
header = Text("//div[@id='alert_profile_assign_div']/h3")
based_on = Text('//label[normalize-space(.)="Based On"]/../div')
save_button = Button("Save")
reset_button = Button("Reset")
cancel_button = Button("Cancel")
@property
def is_displayed(self):
return (
self.in_control_explorer and
self.title.text == 'Alert Profile "{}"'.format(self.context["object"].description) and
self.header.text == "Assignments" and
self.based_on.text == self.context["object"].TYPE
)
@attr.s
class BaseAlertProfile(BaseEntity, Updateable, Pretty):
TYPE = None
_param_name = ParamClassName('description')
pretty_attrs = ["description", "alerts"]
description = attr.ib()
alerts = attr.ib(default=None)
notes = attr.ib(default=None)
def update(self, updates):
"""Update this Alert Profile in UI.
Args:
updates: Provided by update() context manager.
cancel: Whether to cancel the update (default False).
"""
view = navigate_to(self, "Edit")
changed = view.fill(updates)
if changed:
view.save_button.click()
else:
view.cancel_button.click()
for attrib, value in updates.items():
setattr(self, attrib, value)
view = self.create_view(AlertProfileDetailsView)
wait_for(lambda: view.is_displayed, timeout=10,
message="wait AlertProfileDetailsView is displayed")
view.flash.assert_no_error()
if changed:
view.flash.assert_message(
'Alert Profile "{}" was saved'.format(
updates.get("description", self.description)))
else:
view.flash.assert_message(
'Edit of Alert Profile "{}" was cancelled by the user'.format(self.description))
def delete(self, cancel=False):
"""Delete this Alert Profile in UI.
Args:
cancel: Whether to cancel the deletion (default False).
"""
view = navigate_to(self, "Details")
view.configuration.item_select("Delete this Alert Profile", handle_alert=not cancel)
if cancel:
assert view.is_displayed
view.flash.assert_no_error()
@property
def exists(self):
"""Check existence of this Alert Profile.
Returns: :py:class:`bool` signalizing the presence of the Alert Profile in database.
"""
miq_sets = self.appliance.db.client["miq_sets"]
return self.appliance.db.client.session\
.query(miq_sets.description)\
.filter(
miq_sets.description == self.description and miq_sets.set_type == "MiqAlertSet")\
.count() > 0
def assign_to(self, assign, selections=None, tag_category=None):
"""Assigns this Alert Profile to specified objects.
Args:
assign: Where to assign (The Enterprise, ...).
selections: What items to check in the tree. N/A for The Enteprise.
tag_category: Only for choices starting with Tagged. N/A for The Enterprise.
Returns:
Boolean indicating if assignment was made (form fill changed)
"""
view = navigate_to(self, "Edit assignments")
changed = []
if selections is not None:
selections = view.selections.CheckNode(selections)
changed = view.fill({
"assign_to": assign,
"tag_category": tag_category,
"selections": selections
})
if changed:
view.save_button.click()
else:
view.cancel_button.click()
view = self.create_view(AlertProfileDetailsView)
assert view.is_displayed
view.flash.assert_no_error()
return changed
@attr.s
class AlertProfileCollection(BaseCollection):
def instantiate(self, *args, **kwargs):
alert_profile_class = args[0]
args = args[1:]
return alert_profile_class.from_collection(self, *args, **kwargs)
def create(self, alert_profile_class, description, alerts=None, notes=None):
alert_profile = self.instantiate(alert_profile_class, description, alerts=alerts,
notes=notes)
view = navigate_to(alert_profile, "Add")
view.fill({
"description": alert_profile.description,
"notes": alert_profile.notes,
"alerts": [str(alert) for alert in alert_profile.alerts]
})
view.add_button.click()
view = alert_profile.create_view(AlertProfileDetailsView)
assert view.is_displayed
view.flash.assert_success_message(
'Alert Profile "{}" was added'.format(alert_profile.description))
return alert_profile
@navigator.register(AlertProfileCollection, "All")
class AlertProfilesAll(CFMENavigateStep):
VIEW = AlertProfilesAllView
prerequisite = NavigateToAttribute("appliance.server", "ControlExplorer")
def step(self, *args, **kwargs):
self.prerequisite_view.alert_profiles.tree.click_path("All Alert Profiles")
@navigator.register(BaseAlertProfile, "Add")
class AlertProfileNew(CFMENavigateStep):
VIEW = NewAlertProfileView
prerequisite = NavigateToAttribute("parent", "All")
def step(self, *args, **kwargs):
self.prerequisite_view.alert_profiles.tree.click_path("All Alert Profiles",
"{} Alert Profiles".format(self.obj.TYPE))
self.prerequisite_view.configuration.item_select(
"Add a New {} Alert Profile".format(self.obj.TYPE))
@navigator.register(BaseAlertProfile, "Edit")
class AlertProfileEdit(CFMENavigateStep):
VIEW = EditAlertProfileView
prerequisite = NavigateToSibling("Details")
def step(self, *args, **kwargs):
self.prerequisite_view.configuration.item_select("Edit this Alert Profile")
@navigator.register(BaseAlertProfile, "Edit assignments")
class AlertProfileEditAssignments(CFMENavigateStep):
VIEW = AlertProfilesEditAssignmentsView
prerequisite = NavigateToSibling("Details")
def step(self, *args, **kwargs):
self.prerequisite_view.configuration.item_select("Edit assignments for this Alert Profile")
@navigator.register(BaseAlertProfile, "Details")
class AlertProfileDetails(CFMENavigateStep):
VIEW = AlertProfileDetailsView
prerequisite = NavigateToAttribute("parent", "All")
def step(self, *args, **kwargs):
self.prerequisite_view.alert_profiles.tree.click_path("All Alert Profiles",
"{} Alert Profiles".format(self.obj.TYPE), self.obj.description)
class ClusterAlertProfile(BaseAlertProfile):
TYPE = "Cluster / Deployment Role"
class DatastoreAlertProfile(BaseAlertProfile):
TYPE = "Datastore"
class HostAlertProfile(BaseAlertProfile):
TYPE = "Host / Node"
class ProviderAlertProfile(BaseAlertProfile):
TYPE = "Provider"
class ServerAlertProfile(BaseAlertProfile):
TYPE = "Server"
class VMInstanceAlertProfile(BaseAlertProfile):
TYPE = "VM and Instance"
class NodeAlertProfile(BaseAlertProfile):
TYPE = "Container Node"
| gpl-2.0 |
nekulin/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/gettext.py | 73 | 19890 | """Internationalization and localization support.
This module provides internationalization (I18N) and localization (L10N)
support for your Python programs by providing an interface to the GNU gettext
message catalog library.
I18N refers to the operation by which a program is made aware of multiple
languages. L10N refers to the adaptation of your program, once
internationalized, to the local language and cultural habits.
"""
# This module represents the integration of work, contributions, feedback, and
# suggestions from the following people:
#
# Martin von Loewis, who wrote the initial implementation of the underlying
# C-based libintlmodule (later renamed _gettext), along with a skeletal
# gettext.py implementation.
#
# Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule,
# which also included a pure-Python implementation to read .mo files if
# intlmodule wasn't available.
#
# James Henstridge, who also wrote a gettext.py module, which has some
# interesting, but currently unsupported experimental features: the notion of
# a Catalog class and instances, and the ability to add to a catalog file via
# a Python API.
#
# Barry Warsaw integrated these modules, wrote the .install() API and code,
# and conformed all C and Python code to Python's coding standards.
#
# Francois Pinard and Marc-Andre Lemburg also contributed valuably to this
# module.
#
# J. David Ibanez implemented plural forms. Bruno Haible fixed some bugs.
#
# TODO:
# - Lazy loading of .mo files. Currently the entire catalog is loaded into
# memory, but that's probably bad for large translated programs. Instead,
# the lexical sort of original strings in GNU .mo files should be exploited
# to do binary searches and lazy initializations. Or you might want to use
# the undocumented double-hash algorithm for .mo files with hash tables, but
# you'll need to study the GNU gettext code to do this.
#
# - Support Solaris .mo file formats. Unfortunately, we've been unable to
# find this format documented anywhere.
import locale, copy, os, re, struct, sys
from errno import ENOENT
__all__ = ['NullTranslations', 'GNUTranslations', 'Catalog',
'find', 'translation', 'install', 'textdomain', 'bindtextdomain',
'dgettext', 'dngettext', 'gettext', 'ngettext',
]
_default_localedir = os.path.join(sys.prefix, 'share', 'locale')
def test(condition, true, false):
"""
Implements the C expression:
condition ? true : false
Required to correctly interpret plural forms.
"""
if condition:
return true
else:
return false
def c2py(plural):
"""Gets a C expression as used in PO files for plural forms and returns a
Python lambda function that implements an equivalent expression.
"""
# Security check, allow only the "n" identifier
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import token, tokenize
tokens = tokenize.generate_tokens(StringIO(plural).readline)
try:
danger = [x for x in tokens if x[0] == token.NAME and x[1] != 'n']
except tokenize.TokenError:
raise ValueError, \
'plural forms expression error, maybe unbalanced parenthesis'
else:
if danger:
raise ValueError, 'plural forms expression could be dangerous'
# Replace some C operators by their Python equivalents
plural = plural.replace('&&', ' and ')
plural = plural.replace('||', ' or ')
expr = re.compile(r'\!([^=])')
plural = expr.sub(' not \\1', plural)
# Regular expression and replacement function used to transform
# "a?b:c" to "test(a,b,c)".
expr = re.compile(r'(.*?)\?(.*?):(.*)')
def repl(x):
return "test(%s, %s, %s)" % (x.group(1), x.group(2),
expr.sub(repl, x.group(3)))
# Code to transform the plural expression, taking care of parentheses
stack = ['']
for c in plural:
if c == '(':
stack.append('')
elif c == ')':
if len(stack) == 1:
# Actually, we never reach this code, because unbalanced
# parentheses get caught in the security check at the
# beginning.
raise ValueError, 'unbalanced parenthesis in plural form'
s = expr.sub(repl, stack.pop())
stack[-1] += '(%s)' % s
else:
stack[-1] += c
plural = expr.sub(repl, stack.pop())
return eval('lambda n: int(%s)' % plural)
def _expand_lang(locale):
from locale import normalize
locale = normalize(locale)
COMPONENT_CODESET = 1 << 0
COMPONENT_TERRITORY = 1 << 1
COMPONENT_MODIFIER = 1 << 2
# split up the locale into its base components
mask = 0
pos = locale.find('@')
if pos >= 0:
modifier = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_MODIFIER
else:
modifier = ''
pos = locale.find('.')
if pos >= 0:
codeset = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_CODESET
else:
codeset = ''
pos = locale.find('_')
if pos >= 0:
territory = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_TERRITORY
else:
territory = ''
language = locale
ret = []
for i in range(mask+1):
if not (i & ~mask): # if all components for this combo exist ...
val = language
if i & COMPONENT_TERRITORY: val += territory
if i & COMPONENT_CODESET: val += codeset
if i & COMPONENT_MODIFIER: val += modifier
ret.append(val)
ret.reverse()
return ret
class NullTranslations:
def __init__(self, fp=None):
self._info = {}
self._charset = None
self._output_charset = None
self._fallback = None
if fp is not None:
self._parse(fp)
def _parse(self, fp):
pass
def add_fallback(self, fallback):
if self._fallback:
self._fallback.add_fallback(fallback)
else:
self._fallback = fallback
def gettext(self, message):
if self._fallback:
return self._fallback.gettext(message)
return message
def lgettext(self, message):
if self._fallback:
return self._fallback.lgettext(message)
return message
def ngettext(self, msgid1, msgid2, n):
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def lngettext(self, msgid1, msgid2, n):
if self._fallback:
return self._fallback.lngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def ugettext(self, message):
if self._fallback:
return self._fallback.ugettext(message)
return unicode(message)
def ungettext(self, msgid1, msgid2, n):
if self._fallback:
return self._fallback.ungettext(msgid1, msgid2, n)
if n == 1:
return unicode(msgid1)
else:
return unicode(msgid2)
def info(self):
return self._info
def charset(self):
return self._charset
def output_charset(self):
return self._output_charset
def set_output_charset(self, charset):
self._output_charset = charset
def install(self, unicode=False, names=None):
import __builtin__
__builtin__.__dict__['_'] = unicode and self.ugettext or self.gettext
if hasattr(names, "__contains__"):
if "gettext" in names:
__builtin__.__dict__['gettext'] = __builtin__.__dict__['_']
if "ngettext" in names:
__builtin__.__dict__['ngettext'] = (unicode and self.ungettext
or self.ngettext)
if "lgettext" in names:
__builtin__.__dict__['lgettext'] = self.lgettext
if "lngettext" in names:
__builtin__.__dict__['lngettext'] = self.lngettext
class GNUTranslations(NullTranslations):
# Magic number of .mo files
LE_MAGIC = 0x950412deL
BE_MAGIC = 0xde120495L
def _parse(self, fp):
"""Override this method to support alternative .mo formats."""
unpack = struct.unpack
filename = getattr(fp, 'name', '')
# Parse the .mo file header, which consists of 5 little endian 32
# bit words.
self._catalog = catalog = {}
self.plural = lambda n: int(n != 1) # germanic plural by default
buf = fp.read()
buflen = len(buf)
# Are we big endian or little endian?
magic = unpack('<I', buf[:4])[0]
if magic == self.LE_MAGIC:
version, msgcount, masteridx, transidx = unpack('<4I', buf[4:20])
ii = '<II'
elif magic == self.BE_MAGIC:
version, msgcount, masteridx, transidx = unpack('>4I', buf[4:20])
ii = '>II'
else:
raise IOError(0, 'Bad magic number', filename)
# Now put all messages from the .mo file buffer into the catalog
# dictionary.
for i in xrange(0, msgcount):
mlen, moff = unpack(ii, buf[masteridx:masteridx+8])
mend = moff + mlen
tlen, toff = unpack(ii, buf[transidx:transidx+8])
tend = toff + tlen
if mend < buflen and tend < buflen:
msg = buf[moff:mend]
tmsg = buf[toff:tend]
else:
raise IOError(0, 'File is corrupt', filename)
# See if we're looking at GNU .mo conventions for metadata
if mlen == 0:
# Catalog description
lastk = k = None
for item in tmsg.splitlines():
item = item.strip()
if not item:
continue
if ':' in item:
k, v = item.split(':', 1)
k = k.strip().lower()
v = v.strip()
self._info[k] = v
lastk = k
elif lastk:
self._info[lastk] += '\n' + item
if k == 'content-type':
self._charset = v.split('charset=')[1]
elif k == 'plural-forms':
v = v.split(';')
plural = v[1].split('plural=')[1]
self.plural = c2py(plural)
# Note: we unconditionally convert both msgids and msgstrs to
# Unicode using the character encoding specified in the charset
# parameter of the Content-Type header. The gettext documentation
# strongly encourages msgids to be us-ascii, but some appliations
# require alternative encodings (e.g. Zope's ZCML and ZPT). For
# traditional gettext applications, the msgid conversion will
# cause no problems since us-ascii should always be a subset of
# the charset encoding. We may want to fall back to 8-bit msgids
# if the Unicode conversion fails.
if '\x00' in msg:
# Plural forms
msgid1, msgid2 = msg.split('\x00')
tmsg = tmsg.split('\x00')
if self._charset:
msgid1 = unicode(msgid1, self._charset)
tmsg = [unicode(x, self._charset) for x in tmsg]
for i in range(len(tmsg)):
catalog[(msgid1, i)] = tmsg[i]
else:
if self._charset:
msg = unicode(msg, self._charset)
tmsg = unicode(tmsg, self._charset)
catalog[msg] = tmsg
# advance to next entry in the seek tables
masteridx += 8
transidx += 8
def gettext(self, message):
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.gettext(message)
return message
# Encode the Unicode tmsg back to an 8-bit string, if possible
if self._output_charset:
return tmsg.encode(self._output_charset)
elif self._charset:
return tmsg.encode(self._charset)
return tmsg
def lgettext(self, message):
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.lgettext(message)
return message
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
def ngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog[(msgid1, self.plural(n))]
if self._output_charset:
return tmsg.encode(self._output_charset)
elif self._charset:
return tmsg.encode(self._charset)
return tmsg
except KeyError:
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def lngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog[(msgid1, self.plural(n))]
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
except KeyError:
if self._fallback:
return self._fallback.lngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def ugettext(self, message):
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.ugettext(message)
return unicode(message)
return tmsg
def ungettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog[(msgid1, self.plural(n))]
except KeyError:
if self._fallback:
return self._fallback.ungettext(msgid1, msgid2, n)
if n == 1:
tmsg = unicode(msgid1)
else:
tmsg = unicode(msgid2)
return tmsg
# Locate a .mo file using the gettext strategy
def find(domain, localedir=None, languages=None, all=0):
# Get some reasonable defaults for arguments that were not supplied
if localedir is None:
localedir = _default_localedir
if languages is None:
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
if 'C' not in languages:
languages.append('C')
# now normalize and expand the languages
nelangs = []
for lang in languages:
for nelang in _expand_lang(lang):
if nelang not in nelangs:
nelangs.append(nelang)
# select a language
if all:
result = []
else:
result = None
for lang in nelangs:
if lang == 'C':
break
mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain)
if os.path.exists(mofile):
if all:
result.append(mofile)
else:
return mofile
return result
# a mapping between absolute .mo file path and Translation object
_translations = {}
def translation(domain, localedir=None, languages=None,
class_=None, fallback=False, codeset=None):
if class_ is None:
class_ = GNUTranslations
mofiles = find(domain, localedir, languages, all=1)
if not mofiles:
if fallback:
return NullTranslations()
raise IOError(ENOENT, 'No translation file found for domain', domain)
# TBD: do we need to worry about the file pointer getting collected?
# Avoid opening, reading, and parsing the .mo file after it's been done
# once.
result = None
for mofile in mofiles:
key = os.path.abspath(mofile)
t = _translations.get(key)
if t is None:
t = _translations.setdefault(key, class_(open(mofile, 'rb')))
# Copy the translation object to allow setting fallbacks and
# output charset. All other instance data is shared with the
# cached object.
t = copy.copy(t)
if codeset:
t.set_output_charset(codeset)
if result is None:
result = t
else:
result.add_fallback(t)
return result
def install(domain, localedir=None, unicode=False, codeset=None, names=None):
t = translation(domain, localedir, fallback=True, codeset=codeset)
t.install(unicode, names)
# a mapping b/w domains and locale directories
_localedirs = {}
# a mapping b/w domains and codesets
_localecodesets = {}
# current global domain, `messages' used for compatibility w/ GNU gettext
_current_domain = 'messages'
def textdomain(domain=None):
global _current_domain
if domain is not None:
_current_domain = domain
return _current_domain
def bindtextdomain(domain, localedir=None):
global _localedirs
if localedir is not None:
_localedirs[domain] = localedir
return _localedirs.get(domain, _default_localedir)
def bind_textdomain_codeset(domain, codeset=None):
global _localecodesets
if codeset is not None:
_localecodesets[domain] = codeset
return _localecodesets.get(domain)
def dgettext(domain, message):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except IOError:
return message
return t.gettext(message)
def ldgettext(domain, message):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except IOError:
return message
return t.lgettext(message)
def dngettext(domain, msgid1, msgid2, n):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except IOError:
if n == 1:
return msgid1
else:
return msgid2
return t.ngettext(msgid1, msgid2, n)
def ldngettext(domain, msgid1, msgid2, n):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except IOError:
if n == 1:
return msgid1
else:
return msgid2
return t.lngettext(msgid1, msgid2, n)
def gettext(message):
return dgettext(_current_domain, message)
def lgettext(message):
return ldgettext(_current_domain, message)
def ngettext(msgid1, msgid2, n):
return dngettext(_current_domain, msgid1, msgid2, n)
def lngettext(msgid1, msgid2, n):
return ldngettext(_current_domain, msgid1, msgid2, n)
# dcgettext() has been deemed unnecessary and is not implemented.
# James Henstridge's Catalog constructor from GNOME gettext. Documented usage
# was:
#
# import gettext
# cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR)
# _ = cat.gettext
# print _('Hello World')
# The resulting catalog object currently don't support access through a
# dictionary API, which was supported (but apparently unused) in GNOME
# gettext.
Catalog = translation
| apache-2.0 |
dlazz/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_project.py | 4 | 6245 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_project
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower projects
description:
- Create, update, or destroy Ansible Tower projects. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the project.
required: True
description:
description:
- Description to use for the project.
scm_type:
description:
- Type of SCM resource.
choices: ["manual", "git", "hg", "svn"]
default: "manual"
scm_url:
description:
- URL of SCM resource.
local_path:
description:
- The server playbook directory for manual projects.
scm_branch:
description:
- The branch to use for the SCM resource.
scm_credential:
description:
- Name of the credential to use with this SCM resource.
scm_clean:
description:
- Remove local modifications before updating.
type: bool
default: 'no'
scm_delete_on_update:
description:
- Remove the repository completely before updating.
type: bool
default: 'no'
scm_update_on_launch:
description:
- Before an update to the local repository before launching a job with this project.
type: bool
default: 'no'
organization:
description:
- Primary key of organization for project.
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Add tower project
tower_project:
name: "Foo"
description: "Foo bar project"
organization: "test"
state: present
tower_config_file: "~/tower_cli.cfg"
'''
from ansible.module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = dict(
name=dict(),
description=dict(),
organization=dict(),
scm_type=dict(choices=['manual', 'git', 'hg', 'svn'], default='manual'),
scm_url=dict(),
scm_branch=dict(),
scm_credential=dict(),
scm_clean=dict(type='bool', default=False),
scm_delete_on_update=dict(type='bool', default=False),
scm_update_on_launch=dict(type='bool', default=False),
local_path=dict(),
state=dict(choices=['present', 'absent'], default='present'),
)
module = TowerModule(argument_spec=argument_spec, supports_check_mode=True)
name = module.params.get('name')
description = module.params.get('description')
organization = module.params.get('organization')
scm_type = module.params.get('scm_type')
if scm_type == "manual":
scm_type = ""
scm_url = module.params.get('scm_url')
local_path = module.params.get('local_path')
scm_branch = module.params.get('scm_branch')
scm_credential = module.params.get('scm_credential')
scm_clean = module.params.get('scm_clean')
scm_delete_on_update = module.params.get('scm_delete_on_update')
scm_update_on_launch = module.params.get('scm_update_on_launch')
state = module.params.get('state')
json_output = {'project': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
project = tower_cli.get_resource('project')
try:
if state == 'present':
try:
org_res = tower_cli.get_resource('organization')
org = org_res.get(name=organization)
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update project, organization not found: {0}'.format(organization), changed=False)
if scm_credential:
try:
cred_res = tower_cli.get_resource('credential')
try:
cred = cred_res.get(name=scm_credential)
except (tower_cli.exceptions.MultipleResults) as multi_res_excinfo:
module.warn('Multiple credentials found for {0}, falling back looking in project organization'.format(scm_credential))
cred = cred_res.get(name=scm_credential, organization=org['id'])
scm_credential = cred['id']
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update project, credential not found: {0}'.format(scm_credential), changed=False)
result = project.modify(name=name, description=description,
organization=org['id'],
scm_type=scm_type, scm_url=scm_url, local_path=local_path,
scm_branch=scm_branch, scm_clean=scm_clean, credential=scm_credential,
scm_delete_on_update=scm_delete_on_update,
scm_update_on_launch=scm_update_on_launch,
create_on_missing=True)
json_output['id'] = result['id']
elif state == 'absent':
result = project.delete(name=name)
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Failed to update project: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 |
saketkc/galaxy_tools | transFIC_web/transFIC_web.py | 2 | 4402 | #!/usr/bin/env python
import requests
import pycurl
import os
from os.path import getsize
import argparse
import sys
import cStringIO
from functools import wraps
import tempfile
import shutil
import time
__url__ = "http://bg.upf.edu/transfic/taskService"
def stop_err(msg, err=1):
sys.stderr.write('%s\n' % msg)
sys.exit(err)
def retry(ExceptionToCheck, tries=12000000, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck, e:
#msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
msg = "Retrying in %d seconds..." % (mdelay)
if logger:
logger.warning(msg)
else:
# print msg
pass
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
class TransficUploader:
def __init__(self):
self.c = pycurl.Curl()
self.c.setopt(pycurl.URL, __url__)
self.c.setopt(pycurl.UPLOAD, 1)
try:
proxy = os.environ['http_proxy']
self.c.setopt(pycurl.PROXY, proxy)
except KeyError:
pass
self.c.setopt(pycurl.HTTPHEADER, ['Expect:'])
self.c.setopt(pycurl.UPLOAD, 1)
self.c.setopt(pycurl.NOPROGRESS, 1)
self.c.setopt(pycurl.USERAGENT, "curl/7.27.0")
self.c.setopt(pycurl.SSL_VERIFYPEER, 1)
self.c.setopt(pycurl.CUSTOMREQUEST, "PUT")
self.c.setopt(pycurl.TCP_NODELAY, 1)
self.buf = cStringIO.StringIO()
self.c.setopt(self.c.WRITEFUNCTION, self.buf.write)
def upload_file(self, filepath):
f = open(filepath)
self.c.setopt(pycurl.INFILE, f)
self.c.setopt(pycurl.INFILESIZE, getsize(filepath))
def run(self):
self.c.perform()
def get_url(self):
return self.buf.getvalue().strip()
@retry(requests.exceptions.HTTPError)
def result_exists(self, url):
download_request = requests.request("GET", url)
print download_request.text
if download_request.status_code == 404 or download_request.status_code == 500:
raise requests.HTTPError()
elif "Task status is : error" in download_request.text:
stop_err("No SNVs found!")
else:
return url
@retry(requests.exceptions.HTTPError)
def download_result(self, url, outpath):
tmp_dir = tempfile.mkdtemp()
r = requests.get(url, stream=True)
if r.status_code == 500:
raise requests.HTTPError()
else:
path = os.path.join(tmp_dir, "results.csv")
with open(path, 'wb') as f:
for chunk in r.iter_content(128):
f.write(chunk)
shutil.move(path, outpath)
shutil.rmtree(tmp_dir)
def main(params):
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, required=True)
parser.add_argument("--output", type=str, required=True)
args = parser.parse_args(params)
uploader = TransficUploader()
uploader.upload_file(args.input)
uploader.run()
url = uploader.get_url()
url = uploader.result_exists(url)
uploader.download_result(url, args.output)
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
longaccess/bigstash-python | BigStash/auth.py | 1 | 1982 | from __future__ import print_function
from BigStash import __version__
from BigStash.base import BigStashAPIBase
from BigStash.decorators import json_response
from getpass import getpass
from six.moves import input
import os
import logging
log = logging.getLogger('bigstash.auth')
class BigStashAuth(BigStashAPIBase):
NAME = "BigStash Python SDK v{}".format(__version__)
@json_response
def GetAPIKey(self, username=None, password=None, auth=None, name=NAME):
if auth is None and username is not None and password is not None:
auth = (username, password)
return self.post('tokens', auth=auth, json={"name": name})
def get_api_credentials(settings, username=None, password=None):
k = s = None
if all(e in os.environ for e in ('BS_API_KEY', 'BS_API_SECRET')):
k, s = (os.environ['BS_API_KEY'], os.environ['BS_API_SECRET'])
else:
authfile = 'auth.{}'.format(settings.profile)
try:
r = settings.read_config_file(authfile)
except Exception:
log.debug("error reading config file", exc_info=True)
print("No saved credentials found")
auth = BigStashAuth(settings=settings)
r, _ = auth.GetAPIKey(username or input("Username: "),
password or getpass("Password: "))
if input("Save api key to settings? (y/N) ").lower() == "y":
settings.write_config_file(authfile, r)
k, s = (r['key'], r['secret'])
return (k, s)
if __name__ == "__main__":
import sys
from BigStash.conf import BigStashAPISettings
settings = BigStashAPISettings.load_settings()
u = None
if len(sys.argv) > 1 and sys.argv[1] == '-d':
logging.basicConfig(level=logging.DEBUG)
sys.argv = sys.argv[1:]
if len(sys.argv) > 1:
u = sys.argv[1]
k, s = get_api_credentials(settings, username=u)
print("Key: {}".format(k))
print("Secret: {}".format(s))
| apache-2.0 |
ZhangXinNan/tensorflow | tensorflow/contrib/learn/python/learn/estimators/head_test.py | 33 | 75693 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
# pylint: disable=g-bad-todo,g-import-not-at-top
import numpy as np
import six
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses as losses_lib
from tensorflow.python.platform import test
def _assert_variables(test_case,
expected_global=None,
expected_model=None,
expected_trainable=None):
test_case.assertItemsEqual(
tuple([] if expected_global is None else expected_global),
tuple([k.name for k in variables.global_variables()]))
test_case.assertItemsEqual(
tuple([] if expected_model is None else expected_model),
tuple([k.name for k in variables.model_variables()]))
test_case.assertItemsEqual(
tuple([] if expected_trainable is None else expected_trainable),
tuple([k.name for k in variables.trainable_variables()]))
def _assert_no_variables(test_case):
_assert_variables(test_case)
# This must be called from within a tf.Session.
def _assert_metrics(test_case, expected_loss, expected_eval_metrics,
model_fn_ops):
test_case.assertAlmostEqual(expected_loss, model_fn_ops.loss.eval(), places=4)
for k in six.iterkeys(expected_eval_metrics):
test_case.assertIn(k, six.iterkeys(model_fn_ops.eval_metric_ops))
variables.initialize_local_variables().run()
for key, expected_value in six.iteritems(expected_eval_metrics):
value_tensor, update_tensor = model_fn_ops.eval_metric_ops[key]
update = update_tensor.eval()
test_case.assertAlmostEqual(
expected_value,
update,
places=4,
msg="%s: update, expected %s, got %s." % (key, expected_value, update))
value = value_tensor.eval()
test_case.assertAlmostEqual(
expected_value,
value,
places=4,
msg="%s: value, expected %s, got %s." % (key, expected_value, value))
# This must be called from within a tf.Session.
def _assert_summary_tags(test_case, expected_tags=None):
actual_tags = []
for summary_op in ops.get_collection(ops.GraphKeys.SUMMARIES):
summ = summary_pb2.Summary()
summ.ParseFromString(summary_op.eval())
actual_tags.append(summ.value[0].tag)
test_case.assertItemsEqual(expected_tags or [], actual_tags)
def _sigmoid(x):
return 1. / (1. + math.exp(-1 * x))
class PoissonHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LINEAR_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def _log_poisson_loss(self, logits, labels):
x = np.array([f[0] for f in logits])
z = np.array([f[0] for f in labels])
lpl = np.exp(x) - z * x
stirling_approx = z * np.log(z) - z + 0.5 * np.log(2. * np.pi * z)
lpl += np.ma.masked_array(stirling_approx, mask=(z <= 1)).filled(0.)
return sum(lpl)/len(lpl)
def testPoissonWithLogits(self):
head = head_lib.poisson_regression_head()
labels = ((0.,), (1.,), (1.,))
logits = ((0.,), (-1.,), (3.,))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=logits)
self._assert_output_alternatives(model_fn_ops)
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
loss = self._log_poisson_loss(logits, labels)
_assert_metrics(self, loss, {"loss": loss}, model_fn_ops)
class RegressionHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LINEAR_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
# TODO(zakaria): test multilabel regression.
def testRegressionWithLogits(self):
head = head_lib.regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithLogitFn(self):
head = head_lib.regression_head(link_fn=math_ops.square)
def _assert_preditions(test_case, expected_predictions, model_fn_ops):
variables.initialize_local_variables().run()
test_case.assertAllClose(expected_predictions,
model_fn_ops.predictions["scores"].eval())
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
_assert_preditions(self, ([1.0, 1.0, 9.0]), model_fn_ops)
def testRegressionWithInvalidLogits(self):
head = head_lib.regression_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1., 1.), (1., 1.), (3., 1.)))
def testRegressionWithLogitsInput(self):
head = head_lib.regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits_input=((0., 0.), (0., 0.), (0., 0.)))
self._assert_output_alternatives(model_fn_ops)
w = ("regression_head/logits/weights:0",
"regression_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / 3, {"loss": 2. / 3}, model_fn_ops)
def testRegressionWithLogitsAndLogitsInput(self):
head = head_lib.regression_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits_input=((0., 0.), (0., 0.), (0., 0.)),
logits=((1.,), (1.,), (3.,)))
def testRegressionEvalMode(self):
head = head_lib.regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((1.,), (1.,), (3.,)),
mode=model_fn.ModeKeys.EVAL,
train_op_fn=head_lib.no_op_train_fn,
logits=((0.,), (1.,), (1.,)))
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithLabelName(self):
label_name = "my_label"
head = head_lib.regression_head(label_name=label_name)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels={label_name: ((0.,), (1.,), (1.,))},
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithScalarWeights(self):
head = head_lib.regression_head(weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = 2.
labels = ((0.,), (1.,), (1.,))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, (weights * 5.) / len(labels), {
"loss": (weights * 5.) / (weights * len(labels))
}, model_fn_ops)
def testRegressionWith1DWeights(self):
head = head_lib.regression_head(weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = (2., 5., 0.)
labels = ((0.,), (1.,), (1.,))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / len(labels), {"loss": 2. / np.sum(weights)},
model_fn_ops)
def testRegressionWith2DWeights(self):
head = head_lib.regression_head(weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((2.,), (5.,), (0.,))
labels = ((0.,), (1.,), (1.,))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / len(labels), {"loss": 2. / np.sum(weights)},
model_fn_ops)
def testRegressionWithCenteredBias(self):
head = head_lib.regression_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_variables(
self,
expected_global=(
"regression_head/centered_bias_weight:0",
"regression_head/regression_head/centered_bias_weight/Adagrad:0",
),
expected_trainable=("regression_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, [
"loss",
"regression_head/centered_bias/bias_0"
])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionErrorInSparseTensorLabels(self):
head = head_lib.regression_head()
with ops.Graph().as_default():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (2, 0)),
values=(0., 1., 1.),
dense_shape=(3, 1))
with self.assertRaisesRegexp(ValueError,
"SparseTensor is not supported"):
head.create_model_fn_ops(
{},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
class MultiLabelHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.CLASSIFICATION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def setUp(self):
self._logits = ((1., 0., 0.),)
self._labels = ((0, 0, 1),)
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 1. / 3,
"loss": expected_loss,
"auc": 1. / 4,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"auc_precision_recall": 0.166667,
"auc_precision_recall/class0": 0,
"auc_precision_recall/class1": 0.,
"auc_precision_recall/class2": 1.,
"labels/actual_label_mean/class0": self._labels[0][0],
"labels/actual_label_mean/class1": self._labels[0][1],
"labels/actual_label_mean/class2": self._labels[0][2],
"labels/logits_mean/class0": self._logits[0][0],
"labels/logits_mean/class1": self._logits[0][1],
"labels/logits_mean/class2": self._logits[0][2],
"labels/prediction_mean/class0": self._logits[0][0],
"labels/prediction_mean/class1": self._logits[0][1],
"labels/prediction_mean/class2": self._logits[0][2],
"labels/probability_mean/class0": _sigmoid(self._logits[0][0]),
"labels/probability_mean/class1": _sigmoid(self._logits[0][1]),
"labels/probability_mean/class2": _sigmoid(self._logits[0][2]),
}
def testMultiLabelWithLogits(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelTwoClasses(self):
n_classes = 2
labels = ((0, 1),)
logits = ((1., 0.),)
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, labels=labels,
train_op_fn=head_lib.no_op_train_fn, logits=logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.00320443
_assert_metrics(self, expected_loss, {
"accuracy": 0.,
"auc": 0.,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 0.,
"labels/actual_label_mean/class0": labels[0][0],
"labels/actual_label_mean/class1": labels[0][1],
"labels/logits_mean/class0": logits[0][0],
"labels/logits_mean/class1": logits[0][1],
"labels/prediction_mean/class0": logits[0][0],
"labels/prediction_mean/class1": logits[0][1],
"labels/probability_mean/class0": _sigmoid(logits[0][0]),
"labels/probability_mean/class1": _sigmoid(logits[0][1]),
}, model_fn_ops)
def testMultiLabelWithInvalidLogits(self):
head = head_lib.multi_label_head(n_classes=len(self._labels[0]) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
def testMultiLabelWithLogitsInput(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits_input=((0., 0.),))
self._assert_output_alternatives(model_fn_ops)
w = ("multi_label_head/logits/weights:0",
"multi_label_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = .69314718
_assert_metrics(self, expected_loss, {
"accuracy": 2. / 3,
"auc": 2. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": self._labels[0][0],
"labels/actual_label_mean/class1": self._labels[0][1],
"labels/actual_label_mean/class2": self._labels[0][2],
"labels/logits_mean/class0": 0.,
"labels/logits_mean/class1": 0.,
"labels/logits_mean/class2": 0.,
"labels/prediction_mean/class0": 0.,
"labels/prediction_mean/class1": 0.,
"labels/prediction_mean/class2": 0.,
"labels/probability_mean/class0": .5,
"labels/probability_mean/class1": .5,
"labels/probability_mean/class2": .5,
}, model_fn_ops)
def testMultiLabelWithLogitsAndLogitsInput(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits_input=((0., 0.),), logits=self._logits)
def testMultiLabelEval(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassEvalWithLargeLogits(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
logits = ((2., 0., -1),)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,
logits=logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.377779
expected_eval_metrics = {
"accuracy": 1. / 3,
"auc": 9.99999e-07,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": logits[0][0],
"labels/logits_mean/class1": logits[0][1],
"labels/logits_mean/class2": logits[0][2],
"labels/prediction_mean/class0": 1,
"labels/prediction_mean/class1": 0,
"labels/prediction_mean/class2": 0,
"labels/probability_mean/class0": _sigmoid(logits[0][0]),
"labels/probability_mean/class1": _sigmoid(logits[0][1]),
"labels/probability_mean/class2": _sigmoid(logits[0][2]),
}
_assert_metrics(self, expected_loss,
expected_eval_metrics, model_fn_ops)
def testMultiLabelInfer(self):
n_classes = 3
head = head_lib.multi_label_head(n_classes=n_classes, head_name="head_name")
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.INFER, self._labels, head_lib.no_op_train_fn,
logits=((1., 0., 0.), (0., 0., 1)))
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
with session.Session():
self.assertListEqual(
[1, 0, 0], model_fn_ops.predictions["classes"].eval().tolist()[0])
self.assertItemsEqual(
["head_name"], six.iterkeys(model_fn_ops.output_alternatives))
self.assertEqual(
constants.ProblemType.CLASSIFICATION,
model_fn_ops.output_alternatives["head_name"][0])
predictions_for_serving = (
model_fn_ops.output_alternatives["head_name"][1])
self.assertIn("classes", six.iterkeys(predictions_for_serving))
self.assertAllEqual(
[[b"0", b"1", b"2"], [b"0", b"1", b"2"]],
predictions_for_serving["classes"].eval())
self.assertIn("probabilities", six.iterkeys(predictions_for_serving))
self.assertAllClose(
[[0.731059, 0.5, 0.5],
[0.5, 0.5, 0.731059,]],
predictions_for_serving["probabilities"].eval())
def testMultiLabelWithLabelName(self):
n_classes = 3
label_name = "my_label"
head = head_lib.multi_label_head(
n_classes=n_classes,
label_name=label_name,
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, {label_name: self._labels},
head_lib.no_op_train_fn, logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithScalarWeight(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": .1},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, .089985214,
self._expected_eval_metrics(.89985214), model_fn_ops)
def testMultiLabelWith1DWeight(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "weights can not be broadcast to values"):
head.create_model_fn_ops(
features={"label_weight": (.1, .1, .1)},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
def testMultiLabelWith2DWeight(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": ((.1, .1, .1),)},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, .089985214,
self._expected_eval_metrics(.89985214), model_fn_ops)
def testMultiLabelWithCustomLoss(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes),
loss_fn=_sigmoid_cross_entropy)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": .1},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .089985214
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithCenteredBias(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes,
enable_centered_bias=True,
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_variables(
self,
expected_global=(
"multi_label_head/centered_bias_weight:0",
("multi_label_head/multi_label_head/centered_bias_weight/"
"Adagrad:0"),),
expected_trainable=("multi_label_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, (
"loss",
"multi_label_head/centered_bias/bias_0",
"multi_label_head/centered_bias/bias_1",
"multi_label_head/centered_bias/bias_2"
))
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelSparseTensorLabels(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0),),
values=(2,),
dense_shape=(1, 1))
model_fn_ops = head.create_model_fn_ops(
features={},
mode=model_fn.ModeKeys.TRAIN,
labels=labels,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelSparseTensorLabelsTooFewClasses(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
# Set _logits_dimension (n_classes) to a lower value; if it's set to 1
# upfront, the class throws an error during initialization.
head._logits_dimension = 1
with ops.Graph().as_default(), session.Session():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0),),
values=(2,),
dense_shape=(1, 1))
with self.assertRaisesRegexp(ValueError,
"Must set num_classes >= 2 when passing"):
head.create_model_fn_ops(
features={},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=[0.])
class BinaryClassificationHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LOGISTIC_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def setUp(self):
self._logits = ((1.,), (1.,))
self._labels = ((1.,), (0.,))
def _expected_eval_metrics(self, expected_loss):
label_mean = np.mean(self._labels)
return {
"accuracy": 1. / 2,
"accuracy/baseline_label_mean": label_mean,
"accuracy/threshold_0.500000_mean": 1. / 2,
"auc": 1. / 2,
"auc_precision_recall": 0.749999,
"labels/actual_label_mean": label_mean,
"labels/prediction_mean": .731059, # softmax
"loss": expected_loss,
"precision/positive_threshold_0.500000_mean": 1. / 2,
"recall/positive_threshold_0.500000_mean": 1. / 1,
}
def testBinaryClassificationWithLogits(self):
n_classes = 2
head = head_lib.multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationWithInvalidLogits(self):
head = head_lib.multi_class_head(n_classes=len(self._labels) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
def testBinaryClassificationWithLogitsInput(self):
n_classes = 2
head = head_lib.multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits_input=((0., 0.), (0., 0.)))
self._assert_output_alternatives(model_fn_ops)
w = ("binary_logistic_head/logits/weights:0",
"binary_logistic_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = .69314718
label_mean = np.mean(self._labels)
_assert_metrics(self, expected_loss, {
"accuracy": 1. / 2,
"accuracy/baseline_label_mean": label_mean,
"accuracy/threshold_0.500000_mean": 1. / 2,
"auc": 1. / 2,
"labels/actual_label_mean": label_mean,
"labels/prediction_mean": .5, # softmax
"loss": expected_loss,
"precision/positive_threshold_0.500000_mean": 0. / 2,
"recall/positive_threshold_0.500000_mean": 0. / 1,
}, model_fn_ops)
def testBinaryClassificationWithLogitsAndLogitsInput(self):
head = head_lib.multi_class_head(n_classes=2)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits_input=((0., 0.), (0., 0.)), logits=self._logits)
def testBinaryClassificationEval(self):
n_classes = 2
head = head_lib.multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationInfer(self):
n_classes = 2
head = head_lib.multi_class_head(n_classes=n_classes, head_name="head_name")
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.INFER, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
with session.Session():
self.assertListEqual(
[1, 1], list(model_fn_ops.predictions["classes"].eval()))
self.assertItemsEqual(
["head_name"], six.iterkeys(model_fn_ops.output_alternatives))
self.assertEqual(
constants.ProblemType.LOGISTIC_REGRESSION,
model_fn_ops.output_alternatives["head_name"][0])
predictions_for_serving = (
model_fn_ops.output_alternatives["head_name"][1])
self.assertIn("classes", six.iterkeys(predictions_for_serving))
predicted_classes = predictions_for_serving["classes"].eval().tolist()
self.assertListEqual(
[b"0", b"1"], predicted_classes[0])
self.assertIn("probabilities", six.iterkeys(predictions_for_serving))
def testBinaryClassificationInferMode_withWeightColumn(self):
n_classes = 2
head = head_lib.multi_class_head(n_classes=n_classes,
weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
# This is what is being tested, features should not have weight for
# inference.
{}, model_fn.ModeKeys.INFER, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
def testErrorInSparseTensorLabels(self):
n_classes = 2
head = head_lib.multi_class_head(n_classes=n_classes)
with ops.Graph().as_default():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 1),
dense_shape=(3, 1))
with self.assertRaisesRegexp(ValueError,
"SparseTensor is not supported"):
head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
labels,
head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
def testBinaryClassificationWithLabelName(self):
label_name = "my_label"
head = head_lib.multi_class_head(n_classes=2, label_name=label_name)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{},
labels={label_name: self._labels},
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationWith1DWeights(self):
n_classes = 2
head = head_lib.multi_class_head(
n_classes=n_classes, weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = (1., 0.)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_total_loss = .31326166
_assert_metrics(
self,
expected_total_loss / len(weights),
{
"accuracy": 1. / 1,
"accuracy/baseline_label_mean": 1. / 1,
"accuracy/threshold_0.500000_mean": 1. / 1,
"auc": 0. / 1,
"labels/actual_label_mean": 1. / 1,
"labels/prediction_mean": .731059, # softmax
# eval loss is weighted loss divided by sum of weights.
"loss": expected_total_loss,
"precision/positive_threshold_0.500000_mean": 1. / 1,
"recall/positive_threshold_0.500000_mean": 1. / 1,
},
model_fn_ops)
def testBinaryClassificationWith2DWeights(self):
n_classes = 2
head = head_lib.multi_class_head(
n_classes=n_classes, weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((1.,), (0.,))
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_total_loss = .31326166
_assert_metrics(
self,
expected_total_loss / len(weights),
{
"accuracy": 1. / 1,
"accuracy/baseline_label_mean": 1. / 1,
"accuracy/threshold_0.500000_mean": 1. / 1,
"auc": 0. / 1,
"labels/actual_label_mean": 1. / 1,
"labels/prediction_mean": .731059, # softmax
# eval loss is weighted loss divided by sum of weights.
"loss": expected_total_loss,
"precision/positive_threshold_0.500000_mean": 1. / 1,
"recall/positive_threshold_0.500000_mean": 1. / 1,
},
model_fn_ops)
def testBinaryClassificationWithCustomLoss(self):
head = head_lib.multi_class_head(
n_classes=2, weight_column_name="label_weight",
loss_fn=_sigmoid_cross_entropy)
with ops.Graph().as_default(), session.Session():
weights = ((.2,), (0.,))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
# expected_loss is (total_weighted_loss)/1 since there is 1 nonzero
# weight.
expected_loss = 0.062652342
_assert_metrics(
self,
expected_loss,
{
"accuracy": 1. / 1,
"accuracy/baseline_label_mean": 1. / 1,
"accuracy/threshold_0.500000_mean": 1. / 1,
"auc": 0. / 1,
"labels/actual_label_mean": 1. / 1,
"labels/prediction_mean": .731059, # softmax
"loss": expected_loss,
"precision/positive_threshold_0.500000_mean": 1. / 1,
"recall/positive_threshold_0.500000_mean": 1. / 1,
},
model_fn_ops)
def testBinaryClassificationWithCenteredBias(self):
head = head_lib.multi_class_head(n_classes=2, enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_variables(
self,
expected_global=(
"binary_logistic_head/centered_bias_weight:0",
("binary_logistic_head/binary_logistic_head/centered_bias_weight/"
"Adagrad:0"),),
expected_trainable=("binary_logistic_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, [
"loss",
"binary_logistic_head/centered_bias/bias_0"
])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
class MultiClassHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.CLASSIFICATION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def setUp(self):
self._logits = ((1., 0., 0.),)
self._labels = ((2,),)
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 0.,
"loss": expected_loss,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": self._logits[0][0],
"labels/logits_mean/class1": self._logits[0][1],
"labels/logits_mean/class2": self._logits[0][2],
"labels/prediction_mean/class0": self._logits[0][0],
"labels/prediction_mean/class1": self._logits[0][1],
"labels/prediction_mean/class2": self._logits[0][2],
"labels/probability_mean/class0": 0.576117, # softmax
"labels/probability_mean/class1": 0.211942, # softmax
"labels/probability_mean/class2": 0.211942, # softmax
}
def testMultiClassWithLogits(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514447
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassWithInvalidLogits(self):
head = head_lib.multi_class_head(n_classes=len(self._logits[0]) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
def testMultiClassWithNoneTrainOpFnInTrain(self):
head = head_lib.multi_class_head(n_classes=3)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "train_op_fn can not be None in TRAIN mode"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels,
train_op_fn=None,
logits=self._logits)
def testMultiClassWithLogitsInput(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits_input=((0., 0.),))
self._assert_output_alternatives(model_fn_ops)
w = ("multi_class_head/logits/weights:0",
"multi_class_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = 1.0986123
_assert_metrics(self, expected_loss, {
"accuracy": 0.,
"loss": expected_loss,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": 0.,
"labels/logits_mean/class1": 0.,
"labels/logits_mean/class2": 0.,
"labels/prediction_mean/class0": 1.,
"labels/prediction_mean/class1": 0.,
"labels/prediction_mean/class2": 0.,
"labels/probability_mean/class0": 0.333333, # softmax
"labels/probability_mean/class1": 0.333333, # softmax
"labels/probability_mean/class2": 0.333333, # softmax
}, model_fn_ops)
def testMultiClassWithLogitsAndLogitsInput(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits_input=((0., 0.),), logits=self._logits)
def testMultiClassEnableCenteredBias(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes, enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_variables(
self,
expected_global=(
"multi_class_head/centered_bias_weight:0",
("multi_class_head/multi_class_head/centered_bias_weight/"
"Adagrad:0"),
),
expected_trainable=("multi_class_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self,
["loss",
"multi_class_head/centered_bias/bias_0",
"multi_class_head/centered_bias/bias_1",
"multi_class_head/centered_bias/bias_2"])
def testMultiClassEval(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514447
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassEvalModeWithLargeLogits(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
logits = ((2., 0., -1),)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,
logits=logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 3.1698461
expected_eval_metrics = {
"accuracy": 0.,
"loss": expected_loss,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": logits[0][0],
"labels/logits_mean/class1": logits[0][1],
"labels/logits_mean/class2": logits[0][2],
"labels/prediction_mean/class0": 1,
"labels/prediction_mean/class1": 0,
"labels/prediction_mean/class2": 0,
"labels/probability_mean/class0": 0.843795, # softmax
"labels/probability_mean/class1": 0.114195, # softmax
"labels/probability_mean/class2": 0.0420101, # softmax
}
_assert_metrics(self, expected_loss,
expected_eval_metrics, model_fn_ops)
def testMultiClassWithScalarWeight(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
weight = .1
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weight},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514447
_assert_metrics(self, expected_loss * weight,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassWith1DWeight(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
weight = .1
weights = (weight,)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514447
_assert_metrics(self, expected_loss * weight,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassWith2DWeight(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
weight = .1
weights = ((weight,),)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514447
_assert_metrics(self, expected_loss * weight,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassWithCustomLoss(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes),
loss_fn=losses_lib.sparse_softmax_cross_entropy)
with ops.Graph().as_default(), session.Session():
weight = .1
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weight},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514447 * weight
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassInfer(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes,
head_name="head_name")
with ops.Graph().as_default():
model_fn_ops = head.create_model_fn_ops(
features={},
mode=model_fn.ModeKeys.INFER,
train_op_fn=head_lib.no_op_train_fn,
logits=((1., 0., 0.), (0., 0., 1.),))
with session.Session():
lookup_ops.tables_initializer().run()
self.assertAllEqual(
[0, 2],
model_fn_ops.predictions["classes"].eval())
self.assertItemsEqual(
["head_name"], six.iterkeys(model_fn_ops.output_alternatives))
self.assertEqual(
constants.ProblemType.CLASSIFICATION,
model_fn_ops.output_alternatives["head_name"][0])
predictions_for_serving = (
model_fn_ops.output_alternatives["head_name"][1])
self.assertIn("classes", six.iterkeys(predictions_for_serving))
self.assertAllEqual(
[[b"0", b"1", b"2"], [b"0", b"1", b"2"]],
predictions_for_serving["classes"].eval())
self.assertIn("probabilities", six.iterkeys(predictions_for_serving))
self.assertAllClose(
[[0.576117, 0.2119416, 0.2119416],
[0.2119416, 0.2119416, 0.576117]],
predictions_for_serving["probabilities"].eval())
def testInvalidNClasses(self):
for n_classes in (None, -1, 0, 1):
with self.assertRaisesRegexp(ValueError, "n_classes must be > 1"):
head_lib.multi_class_head(n_classes=n_classes)
def testMultiClassWithLabelKeysInvalidShape(self):
with self.assertRaisesRegexp(
ValueError, "Length of label_keys must equal n_classes"):
head_lib._multi_class_head(
n_classes=3, label_keys=("key0", "key1"))
def testMultiClassWithLabelKeysTwoClasses(self):
with self.assertRaisesRegexp(
ValueError, "label_keys is not supported for n_classes=2"):
head_lib._multi_class_head(
n_classes=2, label_keys=("key0", "key1"))
def testMultiClassWithLabelKeysInfer(self):
n_classes = 3
label_keys = ("key0", "key1", "key2")
head = head_lib._multi_class_head(
n_classes=n_classes, label_keys=label_keys,
metric_class_ids=range(n_classes),
head_name="head_name")
with ops.Graph().as_default():
model_fn_ops = head.create_model_fn_ops(
features={},
mode=model_fn.ModeKeys.INFER,
train_op_fn=head_lib.no_op_train_fn,
logits=((1., 0., 0.), (0., 0., 1.),))
with session.Session():
lookup_ops.tables_initializer().run()
self.assertAllEqual(
[b"key0", b"key2"],
model_fn_ops.predictions["classes"].eval())
self.assertItemsEqual(
["head_name"], six.iterkeys(model_fn_ops.output_alternatives))
self.assertEqual(
constants.ProblemType.CLASSIFICATION,
model_fn_ops.output_alternatives["head_name"][0])
predictions_for_serving = (
model_fn_ops.output_alternatives["head_name"][1])
self.assertIn("classes", six.iterkeys(predictions_for_serving))
self.assertAllEqual(
[[b"key0", b"key1", b"key2"], [b"key0", b"key1", b"key2"]],
predictions_for_serving["classes"].eval())
self.assertIn("probabilities", six.iterkeys(predictions_for_serving))
self.assertAllClose(
[[0.576117, 0.2119416, 0.2119416],
[0.2119416, 0.2119416, 0.576117]],
predictions_for_serving["probabilities"].eval())
def testMultiClassWithLabelKeysEvalAccuracy0(self):
n_classes = 3
label_keys = ("key0", "key1", "key2")
head = head_lib._multi_class_head(
n_classes=n_classes,
label_keys=label_keys)
with ops.Graph().as_default():
model_fn_ops = head.create_model_fn_ops(
features={},
mode=model_fn.ModeKeys.EVAL,
labels=("key2",),
train_op_fn=head_lib.no_op_train_fn,
logits=((1., 0., 0.),))
with session.Session():
lookup_ops.tables_initializer().run()
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514447
expected_eval_metrics = {
"accuracy": 0.,
"loss": expected_loss,
}
_assert_metrics(self, expected_loss,
expected_eval_metrics, model_fn_ops)
def testMultiClassWithLabelKeysEvalAccuracy1(self):
n_classes = 3
label_keys = ("key0", "key1", "key2")
head = head_lib._multi_class_head(
n_classes=n_classes,
label_keys=label_keys)
with ops.Graph().as_default():
model_fn_ops = head.create_model_fn_ops(
features={},
mode=model_fn.ModeKeys.EVAL,
labels=("key2",),
train_op_fn=head_lib.no_op_train_fn,
logits=((0., 0., 1.),))
with session.Session():
lookup_ops.tables_initializer().run()
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 0.5514447
expected_eval_metrics = {
"accuracy": 1.,
"loss": expected_loss,
}
_assert_metrics(self, expected_loss,
expected_eval_metrics, model_fn_ops)
class BinarySvmHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LOGISTIC_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def setUp(self):
# Prediction for first example is in the right side of the hyperplane
# (i.e., < 0) but it is within the [-1,1] margin. There is a 0.5 loss
# incurred by this example. The 2nd prediction is outside the margin so it
# incurs no loss at all.
self._predictions = ((-.5,), (1.2,))
self._labels = (0, 1)
self._expected_losses = (.5, 0.)
def testBinarySVMWithLogits(self):
head = head_lib.binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
self._labels,
head_lib.no_op_train_fn,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithInvalidLogits(self):
head = head_lib.binary_svm_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=np.ones((2, 2)))
def testBinarySVMWithLogitsInput(self):
head = head_lib.binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
self._labels,
head_lib.no_op_train_fn,
logits_input=((0., 0.), (0., 0.)))
self._assert_output_alternatives(model_fn_ops)
w = ("binary_svm_head/logits/weights:0",
"binary_svm_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = 1.
_assert_metrics(self, expected_loss, {
"accuracy": .5,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithLogitsAndLogitsInput(self):
head = head_lib.binary_svm_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
self._labels,
head_lib.no_op_train_fn,
logits_input=((0., 0.), (0., 0.)),
logits=self._predictions)
def testBinarySVMEvalMode(self):
head = head_lib.binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.EVAL,
self._labels,
head_lib.no_op_train_fn,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithLabelName(self):
label_name = "my_label"
head = head_lib.binary_svm_head(label_name=label_name)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
{label_name: self._labels},
head_lib.no_op_train_fn,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWith1DWeights(self):
head = head_lib.binary_svm_head(weight_column_name="weights")
with ops.Graph().as_default(), session.Session():
weights = (7., 11.)
model_fn_ops = head.create_model_fn_ops(
# We have to add an extra dim here for weights broadcasting to work.
features={"weights": weights},
mode=model_fn.ModeKeys.TRAIN,
labels=self._labels,
train_op_fn=head_lib.no_op_train_fn,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_weighted_losses = np.multiply(weights, self._expected_losses)
_assert_metrics(self, np.mean(expected_weighted_losses), {
"accuracy": 1.,
"loss": np.sum(expected_weighted_losses) / np.sum(weights),
}, model_fn_ops)
def testBinarySVMWith2DWeights(self):
head = head_lib.binary_svm_head(weight_column_name="weights")
with ops.Graph().as_default(), session.Session():
weights = (7., 11.)
model_fn_ops = head.create_model_fn_ops(
# We have to add an extra dim here for weights broadcasting to work.
features={"weights": tuple([(w,) for w in weights])},
mode=model_fn.ModeKeys.TRAIN,
labels=self._labels,
train_op_fn=head_lib.no_op_train_fn,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_weighted_losses = np.multiply(weights, self._expected_losses)
_assert_metrics(self, np.mean(expected_weighted_losses), {
"accuracy": 1.,
"loss": np.sum(expected_weighted_losses) / np.sum(weights),
}, model_fn_ops)
def testBinarySVMWithCenteredBias(self):
head = head_lib.binary_svm_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
self._labels,
head_lib.no_op_train_fn,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_variables(
self,
expected_global=(
"binary_svm_head/centered_bias_weight:0",
("binary_svm_head/binary_svm_head/centered_bias_weight/"
"Adagrad:0"),
),
expected_trainable=("binary_svm_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, [
"loss",
"binary_svm_head/centered_bias/bias_0"
])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
class LossOnlyHead(test.TestCase):
def testNoPredictionsAndNoMetrics(self):
head = head_lib.loss_only_head(lambda: 1, head_name="const")
model_fn_ops = head.create_model_fn_ops(
features={},
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn)
self.assertDictEqual(model_fn_ops.predictions, {})
self.assertDictEqual(model_fn_ops.eval_metric_ops, {})
self.assertIsNotNone(model_fn_ops.loss)
with session.Session() as sess:
self.assertEqual(1, sess.run(model_fn_ops.loss))
class MultiHeadTest(test.TestCase):
def testInvalidHeads(self):
named_head = head_lib.multi_class_head(
n_classes=3, label_name="label", head_name="head1")
unnamed_head = head_lib.multi_class_head(
n_classes=4, label_name="label")
with self.assertRaisesRegexp(ValueError, "must have names"):
head_lib.multi_head((named_head, unnamed_head))
def testTrainWithNoneTrainOpFn(self):
head1 = head_lib.multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib.multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib.multi_head((head1, head2))
labels = {
"label1": (1,),
"label2": (1,)
}
with self.assertRaisesRegexp(
ValueError, "train_op_fn can not be None in TRAIN mode"):
head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=None,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
def testTrain_withNoHeadWeights(self):
head1 = head_lib.multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib.multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head3 = head_lib.loss_only_head(lambda: 1.0, head_name="const")
head = head_lib.multi_head((head1, head2, head3))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
self.assertTrue(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(3.224, sess.run(model_fn_ops.loss), places=3)
def testTrain_withHeadWeights(self):
head1 = head_lib.multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib.multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib.multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
self.assertTrue(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(1.531, sess.run(model_fn_ops.loss), places=3)
def testTrain_withDictLogits(self):
head1 = head_lib.multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib.multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib.multi_head((head1, head2))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits={head1.head_name: ((-0.7, 0.2, .1),),
head2.head_name: ((.1, .1, .1, .1),)})
self.assertIsNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
self.assertTrue(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)
def testInfer(self):
head1 = head_lib.multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib.multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib.multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.INFER,
train_op_fn=head_lib.no_op_train_fn,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
# Tests predictions keys.
self.assertItemsEqual((
("head1", prediction_key.PredictionKey.LOGITS),
("head1", prediction_key.PredictionKey.PROBABILITIES),
("head1", prediction_key.PredictionKey.CLASSES),
("head2", prediction_key.PredictionKey.LOGITS),
("head2", prediction_key.PredictionKey.PROBABILITIES),
("head2", prediction_key.PredictionKey.CLASSES),
), model_fn_ops.predictions.keys())
# Tests output alternative.
self.assertEquals({
"head1": constants.ProblemType.CLASSIFICATION,
"head2": constants.ProblemType.CLASSIFICATION,
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
self.assertItemsEqual((
prediction_key.PredictionKey.PROBABILITIES,
prediction_key.PredictionKey.CLASSES,
), model_fn_ops.output_alternatives["head1"][1].keys())
self.assertItemsEqual((
prediction_key.PredictionKey.PROBABILITIES,
prediction_key.PredictionKey.CLASSES,
), model_fn_ops.output_alternatives["head2"][1].keys())
def testEval(self):
head1 = head_lib.multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib.multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib.multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.EVAL,
train_op_fn=head_lib.no_op_train_fn,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
self.assertIsNotNone(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
metric_ops = model_fn_ops.eval_metric_ops
# Tests eval keys.
self.assertIn("accuracy/head1", metric_ops.keys())
self.assertIn("accuracy/head2", metric_ops.keys())
def _sigmoid_cross_entropy(labels, logits, weights):
return losses_lib.sigmoid_cross_entropy(labels, logits, weights)
if __name__ == "__main__":
test.main()
| apache-2.0 |
jdemel/gnuradio | gr-fft/python/fft/qa_goertzel.py | 3 | 1570 | #!/usr/bin/env python
#
# Copyright 2006,2007,2010,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import division
from math import pi, cos
from gnuradio import gr, gr_unittest, fft, blocks
class test_goertzel(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def make_tone_data(self, rate, freq):
return [cos(2*pi*x*freq/rate) for x in range(rate)]
def transform(self, src_data, rate, freq):
src = blocks.vector_source_f(src_data, False)
dft = fft.goertzel_fc(rate, rate, freq)
dst = blocks.vector_sink_c()
self.tb.connect(src, dft, dst)
self.tb.run()
return dst.data()
def test_001(self): # Measure single tone magnitude
rate = 8000
freq = 100
bin = freq
src_data = self.make_tone_data(rate, freq)
expected_result = 0.5
actual_result = abs(self.transform(src_data, rate, bin)[0])
self.assertAlmostEqual(expected_result, actual_result, places=4)
def test_002(self): # Measure off frequency magnitude
rate = 8000
freq = 100
bin = freq / 2
src_data = self.make_tone_data(rate, freq)
expected_result = 0.0
actual_result = abs(self.transform(src_data, rate, bin)[0])
self.assertAlmostEqual(expected_result, actual_result, places=4)
if __name__ == '__main__':
gr_unittest.run(test_goertzel, "test_goertzel.xml")
| gpl-3.0 |
errx/django | django/contrib/gis/gdal/srs.py | 35 | 11986 | """
The Spatial Reference class, represensents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes
#### Spatial Reference class. ####
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
#### Python 'magic' routines ####
def __init__(self, srs_input=''):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
srs_type = 'user'
if isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr:
capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name.decode()
return (units, name)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
#### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr:
capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| bsd-3-clause |
bloff/ZeroNet | src/Ui/UiRequest.py | 1 | 18331 | import time
import re
import os
import mimetypes
import json
import cgi
from Config import config
from Site import SiteManager
from User import UserManager
from Plugin import PluginManager
from Ui.UiWebsocket import UiWebsocket
status_texts = {
200: "200 OK",
400: "400 Bad Request",
403: "403 Forbidden",
404: "404 Not Found",
500: "500 Internal Server Error",
}
@PluginManager.acceptPlugins
class UiRequest(object):
def __init__(self, server, get, env, start_response):
if server:
self.server = server
self.log = server.log
self.get = get # Get parameters
self.env = env # Enviroment settings
# ['CONTENT_LENGTH', 'CONTENT_TYPE', 'GATEWAY_INTERFACE', 'HTTP_ACCEPT', 'HTTP_ACCEPT_ENCODING', 'HTTP_ACCEPT_LANGUAGE',
# 'HTTP_COOKIE', 'HTTP_CACHE_CONTROL', 'HTTP_HOST', 'HTTP_HTTPS', 'HTTP_ORIGIN', 'HTTP_PROXY_CONNECTION', 'HTTP_REFERER',
# 'HTTP_USER_AGENT', 'PATH_INFO', 'QUERY_STRING', 'REMOTE_ADDR', 'REMOTE_PORT', 'REQUEST_METHOD', 'SCRIPT_NAME',
# 'SERVER_NAME', 'SERVER_PORT', 'SERVER_PROTOCOL', 'SERVER_SOFTWARE', 'werkzeug.request', 'wsgi.errors',
# 'wsgi.input', 'wsgi.multiprocess', 'wsgi.multithread', 'wsgi.run_once', 'wsgi.url_scheme', 'wsgi.version']
self.start_response = start_response # Start response function
self.user = None
# Call the request handler function base on path
def route(self, path):
if config.ui_restrict and self.env['REMOTE_ADDR'] not in config.ui_restrict: # Restict Ui access by ip
return self.error403()
path = re.sub("^http://zero[/]+", "/", path) # Remove begining http://zero/ for chrome extension
path = re.sub("^http://", "/", path) # Remove begining http for chrome extension .bit access
if path == "/":
return self.actionIndex()
elif path.endswith("favicon.ico"):
return self.actionFile("src/Ui/media/img/favicon.ico")
# Media
elif path.startswith("/uimedia/"):
return self.actionUiMedia(path)
elif path.startswith("/media"):
return self.actionSiteMedia(path)
# Websocket
elif path == "/Websocket":
return self.actionWebsocket()
# Debug
elif path == "/Debug" and config.debug:
return self.actionDebug()
elif path == "/Console" and config.debug:
return self.actionConsole()
# Site media wrapper
else:
if self.get.get("wrapper") == "False":
return self.actionSiteMedia("/media" + path) # Only serve html files with frame
else:
body = self.actionWrapper(path)
if body:
return body
else:
func = getattr(self, "action" + path.lstrip("/"), None) # Check if we have action+request_path function
if func:
return func()
else:
return self.error404(path)
# The request is proxied by chrome extension
def isProxyRequest(self):
return self.env["PATH_INFO"].startswith("http://")
def isAjaxRequest(self):
return self.env.get("HTTP_X_REQUESTED_WITH") == "XMLHttpRequest"
# Get mime by filename
def getContentType(self, file_name):
content_type = mimetypes.guess_type(file_name)[0]
if not content_type:
if file_name.endswith("json"): # Correct json header
content_type = "application/json"
else:
content_type = "application/octet-stream"
return content_type
# Return: <dict> Posted variables
def getPosted(self):
if self.env['REQUEST_METHOD'] == "POST":
return dict(cgi.parse_qsl(
self.env['wsgi.input'].readline().decode()
))
else:
return {}
# Return: <dict> Cookies based on self.env
def getCookies(self):
raw_cookies = self.env.get('HTTP_COOKIE')
if raw_cookies:
cookies = cgi.parse_qsl(raw_cookies)
return {key.strip(): val for key, val in cookies}
else:
return {}
def getCurrentUser(self):
if self.user:
return self.user # Cache
self.user = UserManager.user_manager.get() # Get user
if not self.user:
self.user = UserManager.user_manager.create()
return self.user
# Send response headers
def sendHeader(self, status=200, content_type="text/html", extra_headers=[]):
if content_type == "text/html":
content_type = "text/html; charset=utf-8"
headers = []
headers.append(("Version", "HTTP/1.1"))
headers.append(("Access-Control-Allow-Origin", "*")) # Allow json access
if self.env["REQUEST_METHOD"] == "OPTIONS":
# Allow json access
headers.append(("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, Cookie"))
headers.append(("Access-Control-Allow-Credentials", "true"))
cacheable_type = (
content_type == "text/css" or content_type.startswith("image") or
self.env["REQUEST_METHOD"] == "OPTIONS" or content_type == "application/javascript"
)
if status == 200 and cacheable_type: # Cache Css, Js, Image files for 10min
headers.append(("Cache-Control", "public, max-age=600")) # Cache 10 min
else: # Images, Css, Js
headers.append(("Cache-Control", "no-cache, no-store, private, must-revalidate, max-age=0")) # No caching at all
headers.append(("Content-Type", content_type))
for extra_header in extra_headers:
headers.append(extra_header)
return self.start_response(status_texts[status], headers)
# Renders a template
def render(self, template_path, *args, **kwargs):
template = open(template_path).read().decode("utf8")
return template.format(**kwargs).encode("utf8")
# - Actions -
# Redirect to an url
def actionRedirect(self, url):
self.start_response('301 Redirect', [('Location', url)])
yield "Location changed: %s" % url
def actionIndex(self):
return self.actionRedirect("/" + config.homepage)
# Render a file from media with iframe site wrapper
def actionWrapper(self, path, extra_headers=None):
if not extra_headers:
extra_headers = []
match = re.match("/(?P<address>[A-Za-z0-9\._-]+)(?P<inner_path>/.*|$)", path)
if match:
address = match.group("address")
inner_path = match.group("inner_path").lstrip("/")
if "." in inner_path and not inner_path.endswith(".html"):
return self.actionSiteMedia("/media" + path) # Only serve html files with frame
if self.env.get("HTTP_X_REQUESTED_WITH"):
return self.error403("Ajax request not allowed to load wrapper") # No ajax allowed on wrapper
site = SiteManager.site_manager.get(address)
if (
site and site.content_manager.contents.get("content.json") and
(not site.getReachableBadFiles() or site.settings["own"])
): # Its downloaded or own
title = site.content_manager.contents["content.json"]["title"]
else:
title = "Loading %s..." % address
site = SiteManager.site_manager.need(address) # Start download site
if not site:
return False
return self.renderWrapper(site, path, inner_path, title, extra_headers)
else: # Bad url
return False
def renderWrapper(self, site, path, inner_path, title, extra_headers):
self.sendHeader(extra_headers=extra_headers[:])
file_inner_path = inner_path
if not file_inner_path:
file_inner_path = "index.html" # If inner path defaults to index.html
address = re.sub("/.*", "", path.lstrip("/"))
if self.isProxyRequest() and (not path or "/" in path[1:]):
file_url = re.sub(".*/", "", inner_path)
else:
file_url = "/" + address + "/" + inner_path
# Wrapper variable inits
query_string = ""
body_style = ""
meta_tags = ""
if self.env.get("QUERY_STRING"):
query_string = "?" + self.env["QUERY_STRING"] + "&wrapper=False"
else:
query_string = "?wrapper=False"
if self.isProxyRequest(): # Its a remote proxy request
if self.env["REMOTE_ADDR"] == "127.0.0.1": # Local client, the server address also should be 127.0.0.1
server_url = "http://127.0.0.1:%s" % self.env["SERVER_PORT"]
else: # Remote client, use SERVER_NAME as server's real address
server_url = "http://%s:%s" % (self.env["SERVER_NAME"], self.env["SERVER_PORT"])
homepage = "http://zero/" + config.homepage
else: # Use relative path
server_url = ""
homepage = "/" + config.homepage
if site.content_manager.contents.get("content.json"): # Got content.json
content = site.content_manager.contents["content.json"]
if content.get("background-color"):
body_style += "background-color: %s;" % \
cgi.escape(site.content_manager.contents["content.json"]["background-color"], True)
if content.get("viewport"):
meta_tags += '<meta name="viewport" id="viewport" content="%s">' % cgi.escape(content["viewport"], True)
yield self.render(
"src/Ui/template/wrapper.html",
server_url=server_url,
inner_path=inner_path,
file_url=file_url,
file_inner_path=file_inner_path,
address=site.address,
title=title,
body_style=body_style,
meta_tags=meta_tags,
query_string=query_string,
wrapper_key=site.settings["wrapper_key"],
permissions=json.dumps(site.settings["permissions"]),
show_loadingscreen=json.dumps(not site.storage.isFile(file_inner_path)),
rev=config.rev,
homepage=homepage
)
# Returns if media request allowed from that referer
def isMediaRequestAllowed(self, site_address, referer):
referer_path = re.sub("http[s]{0,1}://.*?/", "/", referer).replace("/media", "") # Remove site address
return referer_path.startswith("/" + site_address)
# Serve a media for site
def actionSiteMedia(self, path):
path = path.replace("/index.html/", "/") # Base Backward compatibility fix
if path.endswith("/"):
path = path + "index.html"
match = re.match("/media/(?P<address>[A-Za-z0-9\._-]+)/(?P<inner_path>.*)", path)
referer = self.env.get("HTTP_REFERER")
if referer and match: # Only allow same site to receive media
if not self.isMediaRequestAllowed(match.group("address"), referer):
return self.error403("Media referrer error") # Referrer not starts same address as requested path
if match: # Looks like a valid path
address = match.group("address")
file_path = "%s/%s/%s" % (config.data_dir, address, match.group("inner_path"))
allowed_dir = os.path.abspath("%s/%s" % (config.data_dir, address)) # Only files within data/sitehash allowed
data_dir = os.path.abspath("data") # No files from data/ allowed
if (
".." in file_path
or not os.path.dirname(os.path.abspath(file_path)).startswith(allowed_dir)
or allowed_dir == data_dir
): # File not in allowed path
return self.error403()
else:
if config.debug and file_path.split("/")[-1].startswith("all."):
# If debugging merge *.css to all.css and *.js to all.js
site = self.server.sites.get(address)
if site.settings["own"]:
from Debug import DebugMedia
DebugMedia.merge(file_path)
if os.path.isfile(file_path): # File exits
# self.sendHeader(content_type=self.getContentType(file_path)) # ?? Get Exception without this
return self.actionFile(file_path)
else: # File not exits, try to download
site = SiteManager.site_manager.need(address, all_file=False)
result = site.needFile(match.group("inner_path"), priority=1) # Wait until file downloads
if result:
# self.sendHeader(content_type=self.getContentType(file_path))
return self.actionFile(file_path)
else:
self.log.debug("File not found: %s" % match.group("inner_path"))
return self.error404(match.group("inner_path"))
else: # Bad url
return self.error404(path)
# Serve a media for ui
def actionUiMedia(self, path):
match = re.match("/uimedia/(?P<inner_path>.*)", path)
if match: # Looks like a valid path
file_path = "src/Ui/media/%s" % match.group("inner_path")
allowed_dir = os.path.abspath("src/Ui/media") # Only files within data/sitehash allowed
if ".." in file_path or not os.path.dirname(os.path.abspath(file_path)).startswith(allowed_dir):
# File not in allowed path
return self.error403()
else:
if config.debug and match.group("inner_path").startswith("all."):
# If debugging merge *.css to all.css and *.js to all.js
from Debug import DebugMedia
DebugMedia.merge(file_path)
return self.actionFile(file_path)
else: # Bad url
return self.error400()
# Stream a file to client
def actionFile(self, file_path, block_size=64 * 1024):
if os.path.isfile(file_path):
# Try to figure out content type by extension
content_type = self.getContentType(file_path)
# TODO: Dont allow external access: extra_headers=
# [("Content-Security-Policy", "default-src 'unsafe-inline' data: http://localhost:43110 ws://localhost:43110")]
self.sendHeader(content_type=content_type)
if self.env["REQUEST_METHOD"] != "OPTIONS":
file = open(file_path, "rb")
while 1:
try:
block = file.read(block_size)
if block:
yield block
else:
raise StopIteration
except StopIteration:
file.close()
break
else: # File not exits
yield self.error404(file_path)
# On websocket connection
def actionWebsocket(self):
ws = self.env.get("wsgi.websocket")
if ws:
wrapper_key = self.get["wrapper_key"]
# Find site by wrapper_key
site = None
for site_check in self.server.sites.values():
if site_check.settings["wrapper_key"] == wrapper_key:
site = site_check
if site: # Correct wrapper key
user = self.getCurrentUser()
if not user:
self.log.error("No user found")
return self.error403()
ui_websocket = UiWebsocket(ws, site, self.server, user, self)
site.websockets.append(ui_websocket) # Add to site websockets to allow notify on events
ui_websocket.start()
for site_check in self.server.sites.values():
# Remove websocket from every site (admin sites allowed to join other sites event channels)
if ui_websocket in site_check.websockets:
site_check.websockets.remove(ui_websocket)
return "Bye."
else: # No site found by wrapper key
self.log.error("Wrapper key not found: %s" % wrapper_key)
return self.error403()
else:
self.start_response("400 Bad Request", [])
return "Not a websocket!"
# Debug last error
def actionDebug(self):
# Raise last error from DebugHook
import sys
last_error = sys.modules["main"].DebugHook.last_error
if last_error:
raise last_error[0], last_error[1], last_error[2]
else:
self.sendHeader()
return "No error! :)"
# Just raise an error to get console
def actionConsole(self):
import sys
sites = self.server.sites
main = sys.modules["main"]
raise Exception("Here is your console")
# - Tests -
def actionTestStream(self):
self.sendHeader()
yield " " * 1080 # Overflow browser's buffer
yield "He"
time.sleep(1)
yield "llo!"
yield "Running websockets: %s" % len(self.server.websockets)
self.server.sendMessage("Hello!")
# - Errors -
# Send bad request error
def error400(self):
self.sendHeader(400)
return "Bad Request"
# You are not allowed to access this
def error403(self, message="Forbidden"):
self.sendHeader(403)
return message
# Send file not found error
def error404(self, path=None):
self.sendHeader(404)
return "Not Found: %s" % path.encode("utf8")
# Internal server error
def error500(self, message=":("):
self.sendHeader(500)
return "<h1>Server error</h1>%s" % cgi.escape(message)
# - Reload for eaiser developing -
# def reload():
# import imp, sys
# global UiWebsocket
# UiWebsocket = imp.load_source("UiWebsocket", "src/Ui/UiWebsocket.py").UiWebsocket
# reload(sys.modules["User.UserManager"])
# UserManager.reloadModule()
# self.user = UserManager.user_manager.getCurrent()
| gpl-2.0 |
zasdfgbnm/tensorflow | tensorflow/python/ops/transpose_benchmark.py | 88 | 6034 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Transpose op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def build_graph(device, input_shape, perm, datatype, num_iters):
"""builds a graph containing a sequence of conv2d operations.
Args:
device: String, the device to run on.
input_shape: Shape of the input tensor.
perm: A list of ints with the same length as input tensor's dimension.
datatype: numpy data type of the input tensor.
num_iters: number of iterations to run transpose.
Returns:
An array of tensors to run()
"""
with ops.device("/%s:0" % device):
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=datatype).reshape(input_shape)
t = constant_op.constant(inp, shape=input_shape)
outputs = []
transpose_op = array_ops.transpose(t, perm)
outputs.append(transpose_op)
for _ in range(1, num_iters):
with ops.control_dependencies([transpose_op]):
transpose_op = array_ops.transpose(t, perm)
outputs.append(transpose_op)
return control_flow_ops.group(*outputs)
class TransposeBenchmark(test.Benchmark):
"""Benchmark transpose!"""
def _run_graph(self, device, input_shape, perm, num_iters, datatype):
"""runs the graph and print its execution time.
Args:
device: String, the device to run on.
input_shape: Shape of the input tensor.
perm: A list of ints with the same length as input tensor's dimension.
num_iters: Number of iterations to run the benchmark.
datatype: numpy data type of the input tensor.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
outputs = build_graph(device, input_shape, perm, datatype, num_iters)
with session_lib.Session(graph=graph) as session:
variables.global_variables_initializer().run()
# warmup runs
session.run(outputs)
start_time = time.time()
session.run(outputs)
duration = (time.time() - start_time) / num_iters
throughput = np.prod(
np.array(input_shape)) * datatype().itemsize * 2 / duration / 1e9
print("%s %s inputshape:%s perm:%s %d %.6fsec, %.4fGB/s." %
(device, str(datatype), str(input_shape).replace(" ", ""),
str(perm).replace(" ", ""), num_iters, duration, throughput))
name_template = (
"transpose_{device}_{dtype}_input_shape_{inputshape}_perm_{perm}")
self.report_benchmark(
name=name_template.format(
device=device,
dtype=str(datatype).replace(" ", ""),
inputshape=str(input_shape).replace(" ", ""),
perm=str(perm).replace(" ", "")).replace(" ", ""),
iters=num_iters,
wall_time=duration)
return duration
def benchmark_transpose(self):
print("transpose benchmark:")
datatypes = [np.complex128, np.float64, np.float32, np.float16, np.int8]
small_shapes = [[2, 20, 20, 20, 16], [2, 16, 20, 20, 20]] * 2
small_shapes += [[2, 100, 100, 16], [2, 16, 100, 100]] * 2
small_shapes += [[2, 5000, 16], [2, 16, 5000]] * 2
small_perms = [[0, 4, 1, 2, 3], [0, 2, 3, 4, 1]] + [[4, 1, 2, 3, 0]] * 2
small_perms += [[0, 3, 1, 2], [0, 2, 3, 1]] + [[3, 1, 2, 0]] * 2
small_perms += [[0, 2, 1]] * 2 + [[2, 1, 0]] * 2
large_shapes = [[2, 40, 40, 40, 32], [2, 40, 40, 40, 64]] * 2 + [[
2, 300, 300, 32
], [2, 300, 300, 64]] * 2 + [[2, 100000, 32], [2, 100000, 64]] * 2
large_perms = [[0, 4, 1, 2, 3], [0, 2, 3, 4, 1]] + [[4, 1, 2, 3, 0]] * 2 + [
[0, 3, 1, 2], [0, 2, 3, 1]
] + [[3, 1, 2, 0]] * 2 + [[0, 2, 1]] * 2 + [[2, 1, 0]] * 2
num_iters = 40
for datatype in datatypes:
for ishape, perm in zip(small_shapes, small_perms):
self._run_graph("gpu", ishape, perm, num_iters, datatype)
if datatype is not np.complex128:
if datatype is not np.float16:
for ishape, perm in zip(large_shapes, large_perms):
self._run_graph("gpu", ishape, perm, num_iters, datatype)
small_dim_large_shapes = [[2, 10000, 3], [2, 3, 10000], [2, 10000, 8],
[2, 8, 10000]]
small_dim_small_shapes = [[2, 5000, 3], [2, 3, 5000], [2, 5000, 8],
[2, 8, 5000]]
small_dim_perms = [[0, 2, 1]] * 4
num_iters = 320
small_dim_large_shape_datatypes = [np.float64, np.float32, np.int8]
for datatype in small_dim_large_shape_datatypes:
for ishape, perm in zip(small_dim_large_shapes, small_dim_perms):
self._run_graph("gpu", ishape, perm, num_iters, datatype)
small_dim_small_shape_datatypes = [np.complex128, np.float16]
for datatype in small_dim_small_shape_datatypes:
for ishape, perm in zip(small_dim_small_shapes, small_dim_perms):
self._run_graph("gpu", ishape, perm, num_iters, datatype)
if __name__ == "__main__":
test.main()
| apache-2.0 |
stewartpark/django | tests/get_object_or_404/models.py | 409 | 1133 | """
DB-API Shortcuts
``get_object_or_404()`` is a shortcut function to be used in view functions for
performing a ``get()`` lookup and raising a ``Http404`` exception if a
``DoesNotExist`` exception was raised during the ``get()`` call.
``get_list_or_404()`` is a shortcut function to be used in view functions for
performing a ``filter()`` lookup and raising a ``Http404`` exception if a
``DoesNotExist`` exception was raised during the ``filter()`` call.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class ArticleManager(models.Manager):
def get_queryset(self):
return super(ArticleManager, self).get_queryset().filter(authors__name__icontains='sir')
@python_2_unicode_compatible
class Article(models.Model):
authors = models.ManyToManyField(Author)
title = models.CharField(max_length=50)
objects = models.Manager()
by_a_sir = ArticleManager()
def __str__(self):
return self.title
| bsd-3-clause |
staujd02/Pi-RFID-Video-Player | source/tests/migrator_test.py | 1 | 7254 | import unittest
import os
import subprocess
from source.migrators.migrator import Migrator
from source.informationManagers.dataStorageMethods.csvImplementation import CSVImplementation
from source.informationManagers.search.scriptedFileSearch import ScriptedFileSearch
from source.informationManagers.dataStorageMethods.database import Database
class Migrator_test(unittest.TestCase):
TEST_DB = "TestDb.csv"
TEST_SCAN_OUTPUT = "temp.test.csv"
class CopyProvider(object):
filesCopied = []
def copyfile(self, source, dest):
self.filesCopied.append([source, dest])
class FakeMessenger(object):
def __init__(self):
self.messages = []
self.updates = []
def sendMessage(self, message):
self.messages.append(message)
def sendUpdate(self, message):
self.updates.append(message)
class ProcessProvider(object):
cmdCalled = "not called"
def call(self, cmd, shell=False):
self.cmdCalled = cmd
class FakeScriptedFileSearch:
TEMP_LIST = "temp.test.csv"
def __init__(self, processProvider):
self.db = Database(CSVImplementation())
self.db.init()
def scan(self, scriptFile, mediaRoot):
self.calledWithScriptFile = scriptFile
self.calledWithMediaRoot = mediaRoot
self.db.load(self.TEMP_LIST)
def getList(self):
return self.db.iterate()
def getFile(self, key):
return self.db.query(key)
def scanHasRun(self):
return True
def test_migrator_correctly_interfaces_with_the_scanner(self):
self.migrator.migrate("sourceDevice", "/media/pi/", "scriptMe.sh")
self.assertEqual(self.scriptedFileSearch.calledWithScriptFile, "scriptMe.sh")
self.assertEqual(self.scriptedFileSearch.calledWithMediaRoot, "/media/pi/")
def test_migrator_correctly_projects_its_activities(self):
self.migrator.migrate("sourceDevice", "/media/pi/", "scriptMe.sh")
self.assertEqual(self.messenger.updates, [
"Scanning...",
"Copying Title 1",
"Copying Title 7",
"Done"
])
def test_migrator_correctly_records_its_activities(self):
self.migrator.migrate("sourceDevice", "/media/pi/", "scriptMe.sh")
self.assertEqual(self.messenger.messages, [
"Marking all records as inactive...",
"Scanning media devices...",
"Found 6 record(s) on source device",
"Found 5 record(s) on non-source devices",
"6 unique record(s) confirmed on the source device",
"2 new title(s) discovered",
"Copying Title 1 from /media/pi/sourceDevice/Title 1 to /media/pi/sourceDevice/Title 1",
"Copying Title 7 from /media/pi/usb2/Title 7 to /media/pi/sourceDevice/Title 7",
"Migration complete"
])
def test_migrator_throws_exception_when_source_usb_not_found_in_results(self):
self.migrator.migrate("sourceDevice", "/media/pi/", "scanner.sh")
self.assertIsNotNone(self.migrator)
def test_migrator_copies_the_right_files(self):
self.migrator.migrate("sourceDevice", "/media/pi/", "scanner.sh")
self.assertEqual(self.CopyProvider.filesCopied, [
["/media/pi/usb2/Title 1", "/media/pi/sourceDevice/Title 1"],
["/media/pi/usb2/Title 7", "/media/pi/sourceDevice/Title 7"]
])
def test_migrator_updates_the_database_appropriately(self):
self.migrator.migrate("sourceDevice", "/media/pi/", "scanner.sh")
l = [self.videoDatabase.query(i) for i in self.videoDatabase.iterate()]
self.assertEqual(l, [
["Title 1","/media/pi/sourceDevice/Title 1",'True'],
["Title 2","/media/pi/sourceDevice/Title 2",'False'],
["Title 3","/media/pi/sourceDevice/Title 3",'True'],
["Title 4","/media/pi/sourceDevice/Title 4",'True'],
["Title 5","/media/pi/sourceDevice/sub_folder/Title 5",'True'],
["Title 6","/media/pi/sourceDevice/sub_folder/Title 6",'True'],
["Title 8","/media/pi/sourceDevice/Title 8",'True'],
["Title 9","/media/pi/sourceDevice/Title 9",'True'],
["Title 7","/media/pi/sourceDevice/Title 7",'True'],
])
def setUp(self):
self.createTestCSVs()
self.videoDatabase = CSVImplementation.openDB(Database, self.TEST_DB)
self.scriptedFileSearch = self.FakeScriptedFileSearch(self.ProcessProvider())
self.messenger = self.FakeMessenger()
self.migrator = Migrator(
self.scriptedFileSearch, self.videoDatabase, self.CopyProvider(), self.messenger
)
def createTestCSVs(self):
f = open(self.TEST_DB, "w")
f.writelines([
"1,Title 1,/media/pi/sourceDevice/Title 1,True\n",
"2,Title 2,/media/pi/sourceDevice/Title 2,True\n",
"3,Title 3,/media/pi/sourceDevice/Title 3,True\n",
"4,Title 4,/media/pi/sourceDevice/Title 4,True\n",
"5,Title 5,/media/pi/sourceDevice/Title 5,True\n",
"6,Title 6,/media/pi/sourceDevice/Title 6,True\n"
])
f.close()
f = open(self.TEST_SCAN_OUTPUT, "w")
f.writelines([
"1,Title 1,/media/pi/usb2/Title 1\n",
"2,Title 3,/media/pi/sourceDevice/Title 3\n",
"3,Title 3,/media/pi/usb2/Title 3\n",
"4,Title 4,/media/pi/sourceDevice/Title 4\n",
"5,Title 5,/media/pi/sourceDevice/sub_folder/Title 5\n",
"6,Title 5,/media/pi/usb2/Title 5\n",
"7,Title 6,/media/pi/sourceDevice/sub_folder/Title 6\n",
"8,Title 7,/media/pi/usb2/Title 7\n",
"9,Title 8,/media/pi/sourceDevice/Title 8\n",
"10,Title 8,/media/pi/usb2/Title 8\n",
"11,Title 9,/media/pi/sourceDevice/Title 9\n"
])
f.close()
def tearDown(self):
os.remove(self.TEST_DB)
os.remove(self.TEST_SCAN_OUTPUT)
# --: Cases :--
# Title 1: Record in MasterList and not on local device, but is on external device [CC]
# Title 2: Record in MasterList and not on any device (0,0)
# Title 3: Record in MasterList and on local device and on external device (1,1)
# Title 4: Record in MasterList and only on local device (1,0)
# Title 5: Record in MasterList and on local device, different location and on external device
# Title 6: Record in MasterList and only on local device, different location
# Title 7: Record not in MasterList and not on local device, but is on external device (0,1) [CC]
# Title 8: Record not in MasterList and on local device and on external device (1,1)
# Title 9: Record not in MasterList and only on local device (1,0)
# --: Edge Cases :--
# Title 10: Record not in MasterList and on local device twice in different locations (1,0)
# Title 11: Record in MasterList and on local device twice in different locations (1,0)
# Title 12: Record not in MasterList and on two external devices (1,0)
# Title 13: Record in MasterList and on two external devices (1,0)
# Title 14: Record in MasterList and on two external devices (1,0)
| apache-2.0 |
dicortazar/ceres | cereslib/dfutils/format.py | 1 | 3667 | #!/usr/bin/python
# Copyright (C) 2016 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Daniel Izquierdo Cortazar <dizquierdo@bitergia.com>
#
import pandas
import scipy
import datetime
class Format(object):
""" Library that allows to format dataframes to be later enriched
This class is the first step in the enrichment process of data.
Although this can be used alone for other purposes, its main
goal consists of providing well formated [missing fields,
string dates, removal of not needed fields] for the following
steps of the enrichment process.
This data format and cleaning process is done due to
inconsistencies and missing fields that may appear when reading
information.
"""
def fill_missing_fields(self, data, columns):
""" This method fills with 0's missing fields
:param data: original Pandas dataframe
:param columns: list of columns to be filled in the DataFrame
:type data: pandas.DataFrame
:type columns: list of strings
:returns: Pandas dataframe with missing fields filled with 0's
:rtype: pandas.DataFrame
"""
for column in columns:
if column not in data.columns:
data[column] = scipy.zeros(len(data))
return data
def update_field_names(self, data, matching):
""" This method updates the names of the fields according to matching
:param data: original Pandas dataframe
:param matching: dictionary of matchings between old and new values
:type data: pandas.DataFrame
:type matching: dictionary
:returns: Pandas dataframe with updated names
:rtype: pandas.DataFrame
"""
for key in matching.keys():
if key in data.columns:
data.rename(columns={key:matching[key]})
return data
def format_dates(self, data, columns):
""" This method translates columns values into datetime objects
:param data: original Pandas dataframe
:param columns: list of columns to cast the date to a datetime object
:type data: pandas.DataFrame
:type columns: list of strings
:returns: Pandas dataframe with updated 'columns' with datetime objects
:rtype: pandas.DataFrame
"""
for column in columns:
if column in data.columns:
data[column] = pandas.to_datetime(data[column])
return data
def remove_columns(self, data, columns):
""" This method removes columns in data
:param data: original Pandas dataframe
:param columns: list of columns to remove
:type data: pandas.DataFrame
:type columns: list of strings
:returns: Pandas dataframe with removed columns
:rtype: pandas.DataFrame
"""
for column in columns:
if column in data.columns:
data = data.drop(column, axis=1)
return data
| lgpl-3.0 |
cyberden/CouchPotatoServer | couchpotato/core/settings_test.py | 15 | 4990 | import mock
from mock import patch, Mock, MagicMock
import unittest
from unittest import TestCase
from couchpotato.core.settings import Settings
class DoNotUseMe:
""" Do not use this class, it is just for storing Mock ' s of Settings-class
Usage:
Select appropriate Mocks and copy-paste them to your test-method
"""
def __do_not_call(self):
# s = Settings
s = Mock()
# methods:
s.isOptionWritable = Mock(return_value=True)
s.set = Mock(return_value=None)
s.save = Mock()
# props:
s.log = Mock()
# subobjects
s.p = Mock()
s.p.getboolean = Mock(return_value=True)
s.p.has_option = Mock
class SettingsCommon(TestCase):
def setUp(self):
self.s = Settings()
def test_get_directories(self):
s = self.s
raw = ' /some/directory ::/another/dir '
exp = ['/some/directory', '/another/dir']
sec = 'sec'
opt = 'opt'
s.types[sec] = {}
s.types[sec][opt] = 'directories'
s.p = MagicMock()
s.p.get.return_value = raw
act = s.get(option = opt, section = sec)
self.assertEqual(act, exp)
class SettingsSaveWritableNonWritable(TestCase):
def setUp(self):
self.s = Settings()
def test_save_writable(self):
s = self.s
# set up Settings-mocks :
# lets assume, that option is writable:
mock_isOptionWritable = s.isOptionWritable = Mock(return_value=True)
mock_set = s.set = Mock(return_value=None)
mock_p_save = s.save = Mock()
section = 'core'
option = 'option_non_exist_be_sure'
value = "1000"
params = {'section': section, 'name': option, 'value': value}
# call method:
env_mock = Mock()
# HERE is an example of mocking LOCAL 'import'
with patch.dict('sys.modules', {'couchpotato.environment.Env': env_mock}):
result = s.saveView(**params)
self.assertIsInstance(s, Settings)
self.assertIsInstance(result, dict)
self.assertTrue(result['success'])
# -----------------------------------------
# check mock
# -----------------------------------------
mock_isOptionWritable.assert_called_with(section, option)
# check, that Settings tried to save my value:
mock_set.assert_called_with(section, option, value)
def test_save_non_writable(self):
s = self.s
# set up Settings-mocks :
# lets assume, that option is not writable:
mock_is_w = s.isOptionWritable = Mock(return_value=False)
mock_set = s.set = Mock(return_value=None)
mock_p_save = s.save = Mock()
mock_log_s = s.log = Mock()
section = 'core'
option = 'option_non_exist_be_sure'
value = "1000"
params = {'section': section, 'name': option, 'value': value}
# call method:
env_mock = Mock()
# HERE is an example of mocking LOCAL 'import'
with patch.dict('sys.modules', {'couchpotato.environment.Env': env_mock}):
result = s.saveView(**params)
self.assertIsInstance(s, Settings)
self.assertIsInstance(result, dict)
self.assertFalse(result['success'])
# -----------------------------------------
# check mock
# -----------------------------------------
# lets check, that 'set'-method was not called:
self.assertFalse(mock_set.called, 'Method `set` was called')
mock_is_w.assert_called_with(section, option)
class OptionMetaSuite(TestCase):
""" tests for ro rw hidden options """
def setUp(self):
self.s = Settings()
self.meta = self.s.optionMetaSuffix()
# hide real config-parser:
self.s.p = Mock()
def test_no_meta_option(self):
s = self.s
section = 'core'
option = 'url'
option_meta = option + self.meta
# setup mock
s.p.getboolean = Mock(return_value=True)
# there is no META-record for our option:
s.p.has_option = Mock(side_effect=lambda s, o: not (s == section and o == option_meta))
# by default all options are writable and readable
self.assertTrue(s.isOptionWritable(section, option))
self.assertTrue(s.isOptionReadable(section, option))
def test_non_writable(self):
s = self.s
section = 'core'
option = 'url'
def mock_get_meta_ro(s, o):
if (s == section and o == option_meta):
return 'ro'
return 11
option_meta = option + self.meta
# setup mock
s.p.has_option = Mock(return_value=True)
s.p.get = Mock(side_effect=mock_get_meta_ro)
# by default all options are writable and readable
self.assertFalse(s.isOptionWritable(section, option))
self.assertTrue(s.isOptionReadable(section, option))
| gpl-3.0 |
pquentin/django | django/contrib/gis/db/backends/postgis/operations.py | 85 | 15382 | import re
from django.conf import settings
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql_psycopg2.operations import \
DatabaseOperations
from django.db.utils import ProgrammingError
from django.utils.functional import cached_property
from .models import PostGISGeometryColumns, PostGISSpatialRefSys
class PostGISOperator(SpatialOperator):
def __init__(self, geography=False, **kwargs):
# Only a subset of the operators and functions are available
# for the geography type.
self.geography = geography
super(PostGISOperator, self).__init__(**kwargs)
def as_sql(self, connection, lookup, *args):
if lookup.lhs.output_field.geography and not self.geography:
raise ValueError('PostGIS geography does not support the "%s" '
'function/operator.' % (self.func or self.op,))
return super(PostGISOperator, self).as_sql(connection, lookup, *args)
class PostGISDistanceOperator(PostGISOperator):
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %%s'
def as_sql(self, connection, lookup, template_params, sql_params):
if not lookup.lhs.output_field.geography and lookup.lhs.output_field.geodetic(connection):
sql_template = self.sql_template
if len(lookup.rhs) == 3 and lookup.rhs[-1] == 'spheroid':
template_params.update({'op': self.op, 'func': 'ST_Distance_Spheroid'})
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s) %(op)s %%s'
else:
template_params.update({'op': self.op, 'func': 'ST_Distance_Sphere'})
return sql_template % template_params, sql_params
return super(PostGISDistanceOperator, self).as_sql(connection, lookup, template_params, sql_params)
class PostGISOperations(BaseSpatialOperations, DatabaseOperations):
name = 'postgis'
postgis = True
geography = True
geom_func_prefix = 'ST_'
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
Adapter = PostGISAdapter
Adaptor = Adapter # Backwards-compatibility alias.
gis_operators = {
'bbcontains': PostGISOperator(op='~'),
'bboverlaps': PostGISOperator(op='&&', geography=True),
'contained': PostGISOperator(op='@'),
'contains': PostGISOperator(func='ST_Contains'),
'overlaps_left': PostGISOperator(op='&<'),
'overlaps_right': PostGISOperator(op='&>'),
'overlaps_below': PostGISOperator(op='&<|'),
'overlaps_above': PostGISOperator(op='|&>'),
'left': PostGISOperator(op='<<'),
'right': PostGISOperator(op='>>'),
'strictly_below': PostGISOperator(op='<<|'),
'stricly_above': PostGISOperator(op='|>>'),
'same_as': PostGISOperator(op='~='),
'exact': PostGISOperator(op='~='), # alias of same_as
'contains_properly': PostGISOperator(func='ST_ContainsProperly'),
'coveredby': PostGISOperator(func='ST_CoveredBy', geography=True),
'covers': PostGISOperator(func='ST_Covers', geography=True),
'crosses': PostGISOperator(func='ST_Crosses'),
'disjoint': PostGISOperator(func='ST_Disjoint'),
'equals': PostGISOperator(func='ST_Equals'),
'intersects': PostGISOperator(func='ST_Intersects', geography=True),
'overlaps': PostGISOperator(func='ST_Overlaps'),
'relate': PostGISOperator(func='ST_Relate'),
'touches': PostGISOperator(func='ST_Touches'),
'within': PostGISOperator(func='ST_Within'),
'dwithin': PostGISOperator(func='ST_DWithin', geography=True),
'distance_gt': PostGISDistanceOperator(func='ST_Distance', op='>', geography=True),
'distance_gte': PostGISDistanceOperator(func='ST_Distance', op='>=', geography=True),
'distance_lt': PostGISDistanceOperator(func='ST_Distance', op='<', geography=True),
'distance_lte': PostGISDistanceOperator(func='ST_Distance', op='<=', geography=True),
}
def __init__(self, connection):
super(PostGISOperations, self).__init__(connection)
prefix = self.geom_func_prefix
self.area = prefix + 'Area'
self.bounding_circle = prefix + 'MinimumBoundingCircle'
self.centroid = prefix + 'Centroid'
self.collect = prefix + 'Collect'
self.difference = prefix + 'Difference'
self.distance = prefix + 'Distance'
self.distance_sphere = prefix + 'distance_sphere'
self.distance_spheroid = prefix + 'distance_spheroid'
self.envelope = prefix + 'Envelope'
self.extent = prefix + 'Extent'
self.force_rhr = prefix + 'ForceRHR'
self.geohash = prefix + 'GeoHash'
self.geojson = prefix + 'AsGeoJson'
self.gml = prefix + 'AsGML'
self.intersection = prefix + 'Intersection'
self.kml = prefix + 'AsKML'
self.length = prefix + 'Length'
self.length_spheroid = prefix + 'length_spheroid'
self.makeline = prefix + 'MakeLine'
self.mem_size = prefix + 'mem_size'
self.num_geom = prefix + 'NumGeometries'
self.num_points = prefix + 'npoints'
self.perimeter = prefix + 'Perimeter'
self.point_on_surface = prefix + 'PointOnSurface'
self.polygonize = prefix + 'Polygonize'
self.reverse = prefix + 'Reverse'
self.scale = prefix + 'Scale'
self.snap_to_grid = prefix + 'SnapToGrid'
self.svg = prefix + 'AsSVG'
self.sym_difference = prefix + 'SymDifference'
self.transform = prefix + 'Transform'
self.translate = prefix + 'Translate'
self.union = prefix + 'Union'
self.unionagg = prefix + 'Union'
# Following "attributes" are properties due to the spatial_version check and
# to delay database access
@property
def extent3d(self):
if self.spatial_version >= (2, 0, 0):
return self.geom_func_prefix + '3DExtent'
else:
return self.geom_func_prefix + 'Extent3D'
@property
def length3d(self):
if self.spatial_version >= (2, 0, 0):
return self.geom_func_prefix + '3DLength'
else:
return self.geom_func_prefix + 'Length3D'
@property
def perimeter3d(self):
if self.spatial_version >= (2, 0, 0):
return self.geom_func_prefix + '3DPerimeter'
else:
return self.geom_func_prefix + 'Perimeter3D'
@property
def geometry(self):
# Native geometry type support added in PostGIS 2.0.
return self.spatial_version >= (2, 0, 0)
@cached_property
def spatial_version(self):
"""Determine the version of the PostGIS library."""
# Trying to get the PostGIS version because the function
# signatures will depend on the version used. The cost
# here is a database query to determine the version, which
# can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple
# comprising user-supplied values for the major, minor, and
# subminor revision of PostGIS.
if hasattr(settings, 'POSTGIS_VERSION'):
version = settings.POSTGIS_VERSION
else:
try:
vtup = self.postgis_version_tuple()
except ProgrammingError:
raise ImproperlyConfigured(
'Cannot determine PostGIS version for database "%s". '
'GeoDjango requires at least PostGIS version 1.5. '
'Was the database created from a spatial database '
'template?' % self.connection.settings_dict['NAME']
)
version = vtup[1:]
return version
def convert_extent(self, box, srid):
"""
Returns a 4-tuple extent for the `Extent` aggregate by converting
the bounding box text returned by PostGIS (`box` argument), for
example: "BOX(-90.0 30.0, -85.0 40.0)".
"""
if box is None:
return None
ll, ur = box[4:-1].split(',')
xmin, ymin = map(float, ll.split())
xmax, ymax = map(float, ur.split())
return (xmin, ymin, xmax, ymax)
def convert_extent3d(self, box3d, srid):
"""
Returns a 6-tuple extent for the `Extent3D` aggregate by converting
the 3d bounding-box text returned by PostGIS (`box3d` argument), for
example: "BOX3D(-90.0 30.0 1, -85.0 40.0 2)".
"""
if box3d is None:
return None
ll, ur = box3d[6:-1].split(',')
xmin, ymin, zmin = map(float, ll.split())
xmax, ymax, zmax = map(float, ur.split())
return (xmin, ymin, zmin, xmax, ymax, zmax)
def convert_geom(self, hex, geo_field):
"""
Converts the geometry returned from PostGIS aggretates.
"""
if hex:
return Geometry(hex, srid=geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Return the database field type for the given geometry field.
Typically this is `None` because geometry columns are added via
the `AddGeometryColumn` stored procedure, unless the field
has been specified to be of geography type instead.
"""
if f.geography:
if f.srid != 4326:
raise NotImplementedError('PostGIS only supports geography columns with an SRID of 4326.')
return 'geography(%s,%d)' % (f.geom_type, f.srid)
elif self.geometry:
# Postgis 2.0 supports type-based geometries.
# TODO: Support 'M' extension.
if f.dim == 3:
geom_type = f.geom_type + 'Z'
else:
geom_type = f.geom_type
return 'geometry(%s,%d)' % (geom_type, f.srid)
else:
return None
def get_distance(self, f, dist_val, lookup_type):
"""
Retrieve the distance parameters for the given geometry field,
distance lookup value, and the distance lookup type.
This is the most complex implementation of the spatial backends due to
what is supported on geodetic geometry columns vs. what's available on
projected geometry columns. In addition, it has to take into account
the geography column type newly introduced in PostGIS 1.5.
"""
# Getting the distance parameter and any options.
if len(dist_val) == 1:
value, option = dist_val[0], None
else:
value, option = dist_val
# Shorthand boolean flags.
geodetic = f.geodetic(self.connection)
geography = f.geography
if isinstance(value, Distance):
if geography:
dist_param = value.m
elif geodetic:
if lookup_type == 'dwithin':
raise ValueError('Only numeric values of degree units are '
'allowed on geographic DWithin queries.')
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
# Assuming the distance is in the units of the field.
dist_param = value
if (not geography and geodetic and lookup_type != 'dwithin'
and option == 'spheroid'):
# using distance_spheroid requires the spheroid of the field as
# a parameter.
return [f._spheroid, dist_param]
else:
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
ST_Transform() function call.
"""
if value is None or value.srid == f.srid:
placeholder = '%s'
else:
# Adding Transform() to the SQL placeholder.
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
if hasattr(value, 'as_sql'):
# If this is an F expression, then we don't really want
# a placeholder and instead substitute in the column
# of the expression.
sql, _ = compiler.compile(value)
placeholder = placeholder % sql
return placeholder
def _get_postgis_func(self, func):
"""
Helper routine for calling PostGIS functions and returning their result.
"""
# Close out the connection. See #9437.
with self.connection.temporary_connection() as cursor:
cursor.execute('SELECT %s()' % func)
return cursor.fetchone()[0]
def postgis_geos_version(self):
"Returns the version of the GEOS library used with PostGIS."
return self._get_postgis_func('postgis_geos_version')
def postgis_lib_version(self):
"Returns the version number of the PostGIS library used with PostgreSQL."
return self._get_postgis_func('postgis_lib_version')
def postgis_proj_version(self):
"Returns the version of the PROJ.4 library used with PostGIS."
return self._get_postgis_func('postgis_proj_version')
def postgis_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_version')
def postgis_full_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_full_version')
def postgis_version_tuple(self):
"""
Returns the PostGIS version as a tuple (version string, major,
minor, subminor).
"""
# Getting the PostGIS version
version = self.postgis_lib_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse PostGIS version string: %s' % version)
return (version, major, minor1, minor2)
def proj_version_tuple(self):
"""
Return the version of PROJ.4 used by PostGIS as a tuple of the
major, minor, and subminor release numbers.
"""
proj_regex = re.compile(r'(\d+)\.(\d+)\.(\d+)')
proj_ver_str = self.postgis_proj_version()
m = proj_regex.search(proj_ver_str)
if m:
return tuple(map(int, [m.group(1), m.group(2), m.group(3)]))
else:
raise Exception('Could not determine PROJ.4 version from PostGIS.')
def spatial_aggregate_name(self, agg_name):
if agg_name == 'Extent3D':
return self.extent3d
else:
return self.geom_func_prefix + agg_name
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
return PostGISGeometryColumns
def spatial_ref_sys(self):
return PostGISSpatialRefSys
| bsd-3-clause |
bosondata/badwolf | badwolf/deploy/__init__.py | 1 | 5917 | # -*- coding: utf-8 -*-
import time
import logging
import requests
from flask import url_for
from badwolf.extensions import bitbucket, sentry
from badwolf.utils import run_command
from badwolf.bitbucket import BitbucketAPIError, BuildStatus
from badwolf.deploy.providers.script import ScriptProvider
from badwolf.deploy.providers.pypi import PypiProvider
logger = logging.getLogger(__name__)
class Deployer(object):
PROVIDERS = {
'script': ScriptProvider,
'pypi': PypiProvider,
}
def __init__(self, context, spec, providers, working_dir=None):
self.context = context
self.spec = spec
self.working_dir = working_dir or context.clone_path
self.providers = providers
def deploy(self):
if not self.providers:
logger.info('No deploy provider active')
return
commit_hash = self.context.source['commit']['hash']
run_after_deploy = False
notification = self.spec.notification
slack_webhook = notification.slack_webhook
for provider_config in self.providers:
provider_name = provider_config.provider
provider_class = self.PROVIDERS.get(provider_name)
if not provider_class:
logger.warning('Provider %s not found', provider_name)
continue
provider = provider_class(self.working_dir, provider_config, self.context)
if not provider.is_usable():
logger.warning('Provider %s is not usable', provider_name)
continue
status_url = provider.url() or url_for('log.build_log',
sha=commit_hash,
task_id=self.context.task_id,
_external=True)
build_status = BuildStatus(
bitbucket,
self.context.source['repository']['full_name'],
commit_hash,
'badwolf/deploy/{}'.format(provider_name),
status_url
)
self._update_build_status(build_status, 'INPROGRESS', '{} deploy in progress'.format(provider_name))
succeed, output = provider.deploy()
logger.info('Provider %s deploy %s, output: \n%s',
provider_name, 'succeed' if succeed else 'failed', output)
state = 'SUCCESSFUL' if succeed else 'FAILED'
self._update_build_status(build_status, state, '{} deploy {}'.format(provider_name, state.lower()))
if succeed:
run_after_deploy = True
if slack_webhook and slack_webhook.on_success == 'always':
trigger_slack_webhook(slack_webhook.webhooks, self.context, provider, True)
else:
if slack_webhook and slack_webhook.on_failure == 'always':
trigger_slack_webhook(slack_webhook.webhooks, self.context, provider, False)
# after deploy
if not run_after_deploy or not self.spec.after_deploy:
return
for script in self.spec.after_deploy:
exit_code, output = run_command(script, shell=True)
logger.info('After deploy command `%s` exit code: %s, output: \n %s', script, exit_code, output)
def _update_build_status(self, build_status, state, description=None):
try:
build_status.update(state, description=description)
except BitbucketAPIError:
logger.exception('Error calling Bitbucket API')
sentry.captureException()
def trigger_slack_webhook(webhooks, context, provider, succeed):
actor = context.actor
if succeed:
title = '{} deploy succeed'.format(provider.name)
color = 'good'
else:
title = '{} deploy failed'.format(provider.name)
color = 'warning'
fields = []
fields.append({
'title': 'Repository',
'value': '<https://bitbucket.org/{repo}|{repo}>'.format(repo=context.repository),
'short': True,
})
if context.type == 'tag':
fields.append({
'title': 'Tag',
'value': '<https://bitbucket.org/{repo}/commits/tag/{tag}|{tag}>'.format(
repo=context.repository,
tag=context.source['branch']['name']
),
'short': True,
})
else:
fields.append({
'title': 'Branch',
'value': '<https://bitbucket.org/{repo}/src?at={branch}|{branch}>'.format(
repo=context.repository,
branch=context.source['branch']['name']
),
'short': True,
})
if context.type in {'branch', 'tag'}:
fields.append({
'title': 'Commit',
'value': '<https://bitbucket.org/{repo}/commits/{sha}|{sha}>'.format(
repo=context.repository,
sha=context.source['commit']['hash'],
),
'short': False
})
attachment = {
'fallback': title,
'title': title,
'color': color,
'fields': fields,
'footer': context.repo_name,
'ts': int(time.time()),
'author_name': actor['display_name'],
'author_link': actor['links']['html']['href'],
'author_icon': actor['links']['avatar']['href'],
}
if context.type in {'branch', 'tag'}:
attachment['text'] = context.message
payload = {'attachments': [attachment]}
session = requests.Session()
for webhook in webhooks:
logger.info('Triggering Slack webhook %s', webhook)
res = session.post(webhook, json=payload, timeout=10)
try:
res.raise_for_status()
except requests.RequestException:
logger.exception('Error triggering Slack webhook %s', webhook)
sentry.captureException()
| mit |
hickford/youtube-dl | test/test_InfoExtractor.py | 104 | 2243 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL
from youtube_dl.extractor.common import InfoExtractor
from youtube_dl.extractor import YoutubeIE, get_info_extractor
class TestIE(InfoExtractor):
pass
class TestInfoExtractor(unittest.TestCase):
def setUp(self):
self.ie = TestIE(FakeYDL())
def test_ie_key(self):
self.assertEqual(get_info_extractor(YoutubeIE.ie_key()), YoutubeIE)
def test_html_search_regex(self):
html = '<p id="foo">Watch this <a href="http://www.youtube.com/watch?v=BaW_jenozKc">video</a></p>'
search = lambda re, *args: self.ie._html_search_regex(re, html, *args)
self.assertEqual(search(r'<p id="foo">(.+?)</p>', 'foo'), 'Watch this video')
def test_opengraph(self):
ie = self.ie
html = '''
<meta name="og:title" content='Foo'/>
<meta content="Some video's description " name="og:description"/>
<meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&key2=val2'/>
'''
self.assertEqual(ie._og_search_title(html), 'Foo')
self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2')
def test_html_search_meta(self):
ie = self.ie
html = '''
<meta name="a" content="1" />
<meta name='b' content='2'>
<meta name="c" content='3'>
<meta name=d content='4'>
<meta property="e" content='5' >
<meta content="6" name="f">
'''
self.assertEqual(ie._html_search_meta('a', html), '1')
self.assertEqual(ie._html_search_meta('b', html), '2')
self.assertEqual(ie._html_search_meta('c', html), '3')
self.assertEqual(ie._html_search_meta('d', html), '4')
self.assertEqual(ie._html_search_meta('e', html), '5')
self.assertEqual(ie._html_search_meta('f', html), '6')
if __name__ == '__main__':
unittest.main()
| unlicense |
le9i0nx/ansible | lib/ansible/modules/network/f5/bigip_qkview.py | 7 | 12750 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_qkview
short_description: Manage qkviews on the device
description:
- Manages creating and downloading qkviews from a BIG-IP. Various
options can be provided when creating qkviews. The qkview is important
when dealing with F5 support. It may be required that you upload this
qkview to the supported channels during resolution of an SRs that you
may have opened.
version_added: "2.4"
options:
filename:
description:
- Name of the qkview to create on the remote BIG-IP.
default: "localhost.localdomain.qkview"
dest:
description:
- Destination on your local filesystem when you want to save the qkview.
required: True
asm_request_log:
description:
- When C(True), includes the ASM request log data. When C(False),
excludes the ASM request log data.
default: no
choices:
- yes
- no
max_file_size:
description:
- Max file size, in bytes, of the qkview to create. By default, no max
file size is specified.
default: 0
complete_information:
description:
- Include complete information in the qkview.
default: yes
choices:
- yes
- no
exclude_core:
description:
- Exclude core files from the qkview.
default: no
choices:
- yes
- no
exclude:
description:
- Exclude various file from the qkview.
choices:
- all
- audit
- secure
- bash_history
force:
description:
- If C(no), the file will only be transferred if the destination does not
exist.
default: yes
choices:
- yes
- no
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- This module does not include the "max time" or "restrict to blade" options.
requirements:
- f5-sdk >= 2.2.3
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Fetch a qkview from the remote device
bigip_qkview:
asm_request_log: yes
exclude:
- audit
- secure
dest: /tmp/localhost.localdomain.qkview
delegate_to: localhost
'''
RETURN = r'''
stdout:
description: The set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
'''
import re
import os
from ansible.module_utils.six import string_types
from ansible.module_utils.f5_utils import AnsibleF5Client
from ansible.module_utils.f5_utils import AnsibleF5Parameters
from ansible.module_utils.f5_utils import HAS_F5SDK
from ansible.module_utils.f5_utils import F5ModuleError
from distutils.version import LooseVersion
try:
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_attributes = [
'exclude', 'exclude_core', 'complete_information', 'max_file_size',
'asm_request_log', 'filename_cmd'
]
returnables = ['stdout', 'stdout_lines', 'warnings']
@property
def exclude(self):
if self._values['exclude'] is None:
return None
exclude = ' '.join(self._values['exclude'])
return "--exclude='{0}'".format(exclude)
@property
def exclude_raw(self):
return self._values['exclude']
@property
def exclude_core(self):
if self._values['exclude']:
return '-C'
else:
return None
@property
def complete_information(self):
if self._values['complete_information']:
return '-c'
return None
@property
def max_file_size(self):
if self._values['max_file_size'] in [None, 0]:
return '-s0'
return '-s {0}'.format(self._values['max_file_size'])
@property
def asm_request_log(self):
if self._values['asm_request_log']:
return '-o asm-request-log'
return None
@property
def filename(self):
pattern = r'^[\w\.]+$'
filename = os.path.basename(self._values['filename'])
if re.match(pattern, filename):
return filename
else:
raise F5ModuleError(
"The provided filename must contain word characters only."
)
@property
def filename_cmd(self):
return '-f {0}'.format(self.filename)
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
def exec_module(self):
if self.is_version_less_than_14():
manager = self.get_manager('madm')
else:
manager = self.get_manager('bulk')
return manager.exec_module()
def get_manager(self, type):
if type == 'madm':
return MadmLocationManager(self.client)
elif type == 'bulk':
return BulkLocationManager(self.client)
def is_version_less_than_14(self):
"""Checks to see if the TMOS version is less than 14
Anything less than BIG-IP 13.x does not support users
on different partitions.
:return: Bool
"""
version = self.client.api.tmos_version
if LooseVersion(version) < LooseVersion('14.0.0'):
return True
else:
return False
class BaseManager(object):
def __init__(self, client):
self.client = client
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _to_lines(self, stdout):
lines = []
if isinstance(stdout, string_types):
lines = str(stdout).split('\n')
return lines
def exec_module(self):
result = dict()
try:
self.present()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.changes.to_return())
result.update(dict(changed=False))
return result
def present(self):
if os.path.exists(self.want.dest) and not self.want.force:
raise F5ModuleError(
"The specified 'dest' file already exists"
)
if self.want.exclude:
choices = ['all', 'audit', 'secure', 'bash_history']
if not all(x in choices for x in self.want.exclude_raw):
raise F5ModuleError(
"The specified excludes must be in the following list: "
"{0}".format(','.join(choices))
)
self.execute()
def exists(self):
ls = self.client.api.tm.util.unix_ls.exec_cmd(
'run', utilCmdArgs=self.remote_dir
)
# Empty directories return nothing to the commandResult
if not hasattr(ls, 'commandResult'):
return False
if self.want.filename in ls.commandResult:
return True
else:
return False
def execute(self):
response = self.execute_on_device()
result = self._move_qkview_to_download()
if not result:
raise F5ModuleError(
"Failed to move the file to a downloadable location"
)
self._download_file()
if not os.path.exists(self.want.dest):
raise F5ModuleError(
"Failed to save the qkview to local disk"
)
self._delete_qkview()
result = self.exists()
if result:
raise F5ModuleError(
"Failed to remove the remote qkview"
)
self.changes = Parameters({
'stdout': response,
'stdout_lines': self._to_lines(response)
})
def _delete_qkview(self):
tpath_name = '{0}/{1}'.format(self.remote_dir, self.want.filename)
self.client.api.tm.util.unix_rm.exec_cmd(
'run', utilCmdArgs=tpath_name
)
def execute_on_device(self):
params = self.want.api_params().values()
output = self.client.api.tm.util.qkview.exec_cmd(
'run',
utilCmdArgs='{0}'.format(' '.join(params))
)
if hasattr(output, 'commandResult'):
return str(output.commandResult)
return None
class BulkLocationManager(BaseManager):
def __init__(self, client):
super(BulkLocationManager, self).__init__(client)
self.remote_dir = '/var/config/rest/bulk'
def _move_qkview_to_download(self):
try:
move_path = '/var/tmp/{0} {1}/{0}'.format(
self.want.filename, self.remote_dir
)
self.client.api.tm.util.unix_mv.exec_cmd(
'run',
utilCmdArgs=move_path
)
return True
except Exception:
return False
def _download_file(self):
bulk = self.client.api.shared.file_transfer.bulk
bulk.download_file(self.want.filename, self.want.dest)
if os.path.exists(self.want.dest):
return True
return False
class MadmLocationManager(BaseManager):
def __init__(self, client):
super(MadmLocationManager, self).__init__(client)
self.remote_dir = '/var/config/rest/madm'
def _move_qkview_to_download(self):
try:
move_path = '/var/tmp/{0} {1}/{0}'.format(
self.want.filename, self.remote_dir
)
self.client.api.tm.util.unix_mv.exec_cmd(
'run',
utilCmdArgs=move_path
)
return True
except Exception:
return False
def _download_file(self):
madm = self.client.api.shared.file_transfer.madm
madm.download_file(self.want.filename, self.want.dest)
if os.path.exists(self.want.dest):
return True
return False
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
filename=dict(
default='localhost.localdomain.qkview'
),
asm_request_log=dict(
type='bool',
default='no',
),
max_file_size=dict(
type='int',
),
complete_information=dict(
default='no',
type='bool'
),
exclude_core=dict(
default="no",
type='bool'
),
force=dict(
default=True,
type='bool'
),
exclude=dict(
type='list'
),
dest=dict(
type='path',
required=True
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
avroshk/VRDAW | VRDAW_working/LiveOSCCallbacks.py | 1 | 50281 | """
# Copyright (C) 2007 Rob King (rob@re-mu.org)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Rob King <rob@e-mu.org> or visit http://www.e-mu.org
This file contains all the current Live OSC callbacks.
"""
import Live
import RemixNet
import OSC
import LiveUtils
import sys
from Logger import log
class LiveOSCCallbacks:
def __init__(self, c_instance, oscEndpoint):
self.oscEndpoint = oscEndpoint
self.callbackManager = oscEndpoint.callbackManager
self.c_instance = c_instance
self.callbackManager.add("/live/tempo", self.tempoCB)
self.callbackManager.add("/live/time", self.timeCB)
self.callbackManager.add("/live/next/cue", self.nextCueCB)
self.callbackManager.add("/live/prev/cue", self.prevCueCB)
self.callbackManager.add("/live/play", self.playCB)
self.callbackManager.add("/live/play/continue", self.playContinueCB)
self.callbackManager.add("/live/play/selection", self.playSelectionCB)
self.callbackManager.add("/live/play/clip", self.playClipCB)
self.callbackManager.add("/live/play/scene", self.playSceneCB)
self.callbackManager.add("/live/stop", self.stopCB)
self.callbackManager.add("/live/stop/clip", self.stopClipCB)
self.callbackManager.add("/live/stop/track", self.stopTrackCB)
self.callbackManager.add("/live/scenes", self.scenesCB)
self.callbackManager.add("/live/tracks", self.tracksCB)
self.callbackManager.add("/live/name/scene", self.nameSceneCB)
self.callbackManager.add("/live/scene", self.sceneCB)
self.callbackManager.add("/live/name/sceneblock", self.nameSceneBlockCB)
self.callbackManager.add("/live/name/track", self.nameTrackCB)
self.callbackManager.add("/live/name/trackblock", self.nameTrackBlockCB)
self.callbackManager.add("/live/name/clip", self.nameClipCB)
self.callbackManager.add("/live/name/clipblock", self.nameClipBlockCB)
self.callbackManager.add("/live/arm", self.armTrackCB)
self.callbackManager.add("/live/mute", self.muteTrackCB)
self.callbackManager.add("/live/solo", self.soloTrackCB)
self.callbackManager.add("/live/volume", self.volumeCB)
self.callbackManager.add("/live/pan", self.panCB)
self.callbackManager.add("/live/send", self.sendCB)
self.callbackManager.add("/live/pitch", self.pitchCB)
self.callbackManager.add("/live/track/jump", self.trackJump)
self.callbackManager.add("/live/track/info", self.trackInfoCB)
self.callbackManager.add("/live/undo", self.undoCB)
self.callbackManager.add("/live/redo", self.redoCB)
self.callbackManager.add("/live/play/clipslot", self.playClipSlotCB)
self.callbackManager.add("/live/scene/view", self.viewSceneCB)
self.callbackManager.add("/live/track/view", self.viewTrackCB)
self.callbackManager.add("/live/return/view", self.viewTrackCB)
self.callbackManager.add("/live/master/view", self.mviewTrackCB)
self.callbackManager.add("/live/track/device/view", self.viewDeviceCB)
self.callbackManager.add("/live/return/device/view", self.viewDeviceCB)
self.callbackManager.add("/live/master/device/view", self.mviewDeviceCB)
self.callbackManager.add("/live/clip/view", self.viewClipCB)
self.callbackManager.add("/live/detail/view", self.detailViewCB)
self.callbackManager.add("/live/overdub", self.overdubCB)
self.callbackManager.add("/live/state", self.stateCB)
self.callbackManager.add("/live/clip/info", self.clipInfoCB)
self.callbackManager.add("/live/return/mute", self.muteTrackCB)
self.callbackManager.add("/live/return/solo", self.soloTrackCB)
self.callbackManager.add("/live/return/volume", self.volumeCB)
self.callbackManager.add("/live/return/pan", self.panCB)
self.callbackManager.add("/live/return/send", self.sendCB)
self.callbackManager.add("/live/master/volume", self.volumeCB)
self.callbackManager.add("/live/master/pan", self.panCB)
self.callbackManager.add("/live/devicelist", self.devicelistCB)
self.callbackManager.add("/live/return/devicelist", self.devicelistCB)
self.callbackManager.add("/live/master/devicelist", self.mdevicelistCB)
self.callbackManager.add("/live/device/range", self.devicerangeCB)
self.callbackManager.add("/live/return/device/range", self.devicerangeCB)
self.callbackManager.add("/live/master/device/range", self.mdevicerangeCB)
self.callbackManager.add("/live/device", self.deviceCB)
self.callbackManager.add("/live/return/device", self.deviceCB)
self.callbackManager.add("/live/master/device", self.mdeviceCB)
self.callbackManager.add("/live/clip/loopstate", self.loopStateCB)
self.callbackManager.add("/live/clip/loopstart", self.loopStartCB)
self.callbackManager.add("/live/clip/loopend", self.loopEndCB)
self.callbackManager.add("/live/clip/loopstate_id", self.loopStateCB)
self.callbackManager.add("/live/clip/loopstart_id", self.loopStartCB)
self.callbackManager.add("/live/clip/loopend_id", self.loopEndCB)
self.callbackManager.add("/live/clip/warping", self.warpingCB)
self.callbackManager.add("/live/clip/signature", self.sigCB)
self.callbackManager.add("/live/clip/add_note", self.addNoteCB)
self.callbackManager.add("/live/clip/notes", self.getNotesCB)
self.callbackManager.add("/live/master/crossfader", self.crossfaderCB)
self.callbackManager.add("/live/track/crossfader", self.trackxfaderCB)
self.callbackManager.add("/live/return/crossfader", self.trackxfaderCB)
self.callbackManager.add("/live/quantization", self.quantizationCB)
self.callbackManager.add("/live/selection", self.selectionCB)
#non-liveOSC callbacks
self.callbackManager.add("/live/beats", self.selectionCB)
def sigCB(self, msg, source):
""" Called when a /live/clip/signature message is recieved
"""
track = msg[2]
clip = msg[3]
c = LiveUtils.getSong().visible_tracks[track].clip_slots[clip].clip
if len(msg) == 4:
self.oscEndpoint.send("/live/clip/signature", (track, clip, c.signature_numerator, c.signature_denominator))
if len(msg) == 6:
self.oscEndpoint.send("/live/clip/signature", 1)
c.signature_denominator = msg[5]
c.signature_numerator = msg[4]
def warpingCB(self, msg, source):
""" Called when a /live/clip/warping message is recieved
"""
track = msg[2]
clip = msg[3]
if len(msg) == 4:
state = LiveUtils.getSong().visible_tracks[track].clip_slots[clip].clip.warping
self.oscEndpoint.send("/live/clip/warping", (track, clip, int(state)))
elif len(msg) == 5:
LiveUtils.getSong().visible_tracks[track].clip_slots[clip].clip.warping = msg[4]
def selectionCB(self, msg, source):
""" Called when a /live/selection message is received
"""
if len(msg) == 6:
self.c_instance.set_session_highlight(msg[2], msg[3], msg[4], msg[5], 0)
def trackxfaderCB(self, msg, source):
""" Called when a /live/track/crossfader or /live/return/crossfader message is received
"""
ty = msg[0] == '/live/return/crossfader' and 1 or 0
if len(msg) == 3:
track = msg[2]
if ty == 1:
assign = LiveUtils.getSong().return_tracks[track].mixer_device.crossfade_assign
name = LiveUtils.getSong().return_tracks[track].mixer_device.crossfade_assignments.values[assign]
self.oscEndpoint.send("/live/return/crossfader", (track, str(assign), str(name)))
else:
assign = LiveUtils.getSong().visible_tracks[track].mixer_device.crossfade_assign
name = LiveUtils.getSong().visible_tracks[track].mixer_device.crossfade_assignments.values[assign]
self.oscEndpoint.send("/live/track/crossfader", (track, str(assign), str(name)))
elif len(msg) == 4:
track = msg[2]
assign = msg[3]
if ty == 1:
LiveUtils.getSong().return_tracks[track].mixer_device.crossfade_assign = assign
else:
LiveUtils.getSong().visible_tracks[track].mixer_device.crossfade_assign = assign
def tempoCB(self, msg, source):
"""Called when a /live/tempo message is received.
Messages:
/live/tempo Request current tempo, replies with /live/tempo (float tempo)
/live/tempo (float tempo) Set the tempo, replies with /live/tempo (float tempo)
"""
if len(msg) == 2 or (len(msg) == 3 and msg[2] == "query"):
self.oscEndpoint.send("/live/tempo", LiveUtils.getTempo())
elif len(msg) == 3:
tempo = msg[2]
LiveUtils.setTempo(tempo)
def timeCB(self, msg, source):
"""Called when a /live/time message is received.
Messages:
/live/time Request current song time, replies with /live/time (float time)
/live/time (float time) Set the time , replies with /live/time (float time)
"""
if len(msg) == 2 or (len(msg) == 3 and msg[2] == "query"):
self.oscEndpoint.send("/live/time", float(LiveUtils.currentTime()))
elif len(msg) == 3:
time = msg[2]
LiveUtils.currentTime(time)
def nextCueCB(self, msg, source):
"""Called when a /live/next/cue message is received.
Messages:
/live/next/cue Jumps to the next cue point
"""
LiveUtils.jumpToNextCue()
def prevCueCB(self, msg, source):
"""Called when a /live/prev/cue message is received.
Messages:
/live/prev/cue Jumps to the previous cue point
"""
LiveUtils.jumpToPrevCue()
def playCB(self, msg, source):
"""Called when a /live/play message is received.
Messages:
/live/play Starts the song playing
"""
LiveUtils.play()
def playContinueCB(self, msg, source):
"""Called when a /live/play/continue message is received.
Messages:
/live/play/continue Continues playing the song from the current point
"""
LiveUtils.continuePlaying()
def playSelectionCB(self, msg, source):
"""Called when a /live/play/selection message is received.
Messages:
/live/play/selection Plays the current selection
"""
LiveUtils.playSelection()
def playClipCB(self, msg, source):
"""Called when a /live/play/clip message is received.
Messages:
/live/play/clip (int track, int clip) Launches clip number clip in track number track
"""
if len(msg) == 4:
track = msg[2]
clip = msg[3]
LiveUtils.launchClip(track, clip)
def playSceneCB(self, msg, source):
"""Called when a /live/play/scene message is received.
Messages:
/live/play/scene (int scene) Launches scene number scene
"""
if len(msg) == 3:
scene = msg[2]
LiveUtils.launchScene(scene)
def stopCB(self, msg, source):
"""Called when a /live/stop message is received.
Messages:
/live/stop Stops playing the song
"""
LiveUtils.stop()
def stopClipCB(self, msg, source):
"""Called when a /live/stop/clip message is received.
Messages:
/live/stop/clip (int track, int clip) Stops clip number clip in track number track
"""
if len(msg) == 4:
track = msg[2]
clip = msg[3]
LiveUtils.stopClip(track, clip)
def stopTrackCB(self, msg, source):
"""Called when a /live/stop/track message is received.
Messages:
/live/stop/track (int track, int clip) Stops track number track
"""
if len(msg) == 3:
track = msg[2]
LiveUtils.stopTrack(track)
def scenesCB(self, msg, source):
"""Called when a /live/scenes message is received.
Messages:
/live/scenes no argument or 'query' Returns the total number of scenes
"""
if len(msg) == 2 or (len(msg) == 3 and msg[2] == "query"):
sceneTotal = len(LiveUtils.getScenes())
self.oscEndpoint.send("/live/scenes", (sceneTotal))
return
def sceneCB(self, msg, source):
"""Called when a /live/scene message is received.
Messages:
/live/scene no argument or 'query' Returns the currently playing scene number
"""
if len(msg) == 2 or (len(msg) == 3 and msg[2] == "query"):
selected_scene = LiveUtils.getSong().view.selected_scene
scenes = LiveUtils.getScenes()
index = 0
selected_index = 0
for scene in scenes:
index = index + 1
if scene == selected_scene:
selected_index = index
self.oscEndpoint.send("/live/scene", (selected_index))
elif len(msg) == 3:
scene = msg[2]
LiveUtils.getSong().view.selected_scene = LiveUtils.getSong().scenes[scene]
def tracksCB(self, msg, source):
"""Called when a /live/tracks message is received.
Messages:
/live/tracks no argument or 'query' Returns the total number of scenes
"""
if len(msg) == 2 or (len(msg) == 3 and msg[2] == "query"):
trackTotal = len(LiveUtils.getTracks())
self.oscEndpoint.send("/live/tracks", (trackTotal))
return
def nameSceneCB(self, msg, source):
"""Called when a /live/name/scene message is received.
Messages:
/live/name/scene Returns a a series of all the scene names in the form /live/name/scene (int scene, string name)
/live/name/scene (int scene) Returns a single scene's name in the form /live/name/scene (int scene, string name)
/live/name/scene (int scene, string name)Sets scene number scene's name to name
"""
#Requesting all scene names
if len(msg) == 2 or (len(msg) == 3 and msg[2] == "query"):
bundle = OSC.OSCBundle()
sceneNumber = 0
for scene in LiveUtils.getScenes():
bundle.append("/live/name/scene", (sceneNumber, str(scene.name)))
sceneNumber = sceneNumber + 1
self.oscEndpoint.sendMessage(bundle)
return
#Requesting a single scene name
if len(msg) == 3:
sceneNumber = msg[2]
self.oscEndpoint.send("/live/name/scene", (sceneNumber, str(LiveUtils.getScene(sceneNumber).name)))
return
#renaming a scene
if len(msg) == 4:
sceneNumber = msg[2]
name = msg[3]
LiveUtils.getScene(sceneNumber).name = name
def nameSceneBlockCB(self, msg, source):
"""Called when a /live/name/sceneblock message is received.
/live/name/clipblock (int offset, int blocksize) Returns a list of blocksize scene names starting at offset
"""
if len(msg) == 4:
block = []
sceneOffset = msg[2]
blocksize = msg[3]
for scene in range(0, blocksize):
block.extend([str(LiveUtils.getScene(sceneOffset+scene).name)])
self.oscEndpoint.send("/live/name/sceneblock", block)
def nameTrackCB(self, msg, source):
"""Called when a /live/name/track message is received.
Messages:
/live/name/track Returns a a series of all the track names in the form /live/name/track (int track, string name)
/live/name/track (int track) Returns a single track's name in the form /live/name/track (int track, string name)
/live/name/track (int track, string name)Sets track number track's name to name
"""
#Requesting all track names
if len(msg) == 2 or (len(msg) == 3 and msg[2] == "query"):
trackNumber = 0
bundle = OSC.OSCBundle()
for track in LiveUtils.getTracks():
bundle.append("/live/name/track", (trackNumber, str(track.name), track.color, int(track.has_midi_input)))
trackNumber = trackNumber + 1
self.oscEndpoint.sendMessage(bundle)
return
#Requesting a single track name
if len(msg) == 3:
trackNumber = msg[2]
# self.oscEndpoint.send("/live/name/track", (trackNumber, str(LiveUtils.getTrack(trackNumber).name),int(LiveUtils.getTrack(trackNumber).has_midi_input)))
self.oscEndpoint.send("/live/name/track", (trackNumber, str(LiveUtils.getTrack(trackNumber).name), LiveUtils.getTrack(trackNumber).color, int(LiveUtils.getTrack(trackNumber).has_midi_input)))
return
#renaming a track
if len(msg) == 4:
trackNumber = msg[2]
name = msg[3]
LiveUtils.getTrack(trackNumber).name = name
def nameTrackBlockCB(self, msg, source):
"""Called when a /live/name/trackblock message is received.
/live/name/trackblock (int offset, int blocksize) Returns a list of blocksize track names starting at offset
"""
if len(msg) == 4:
block = []
trackOffset = msg[2]
blocksize = msg[3]
for track in range(0, blocksize):
block.extend([str(LiveUtils.getTrack(trackOffset+track).name)])
self.oscEndpoint.send("/live/name/trackblock", block)
def nameClipBlockCB(self, msg, source):
"""Called when a /live/name/clipblock message is received.
/live/name/clipblock (int track, int clip, blocksize x/tracks, blocksize y/clipslots) Returns a list of clip names for a block of clips (int blockX, int blockY, clipname)
"""
#Requesting a block of clip names X1 Y1 X2 Y2 where X1,Y1 is the first clip (track, clip) of the block, X2 the number of tracks to cover and Y2 the number of scenes
if len(msg) == 6:
block = []
trackOffset = msg[2]
clipOffset = msg[3]
blocksizeX = msg[4]
blocksizeY = msg[5]
for clip in range(0, blocksizeY):
for track in range(0, blocksizeX):
trackNumber = trackOffset+track
clipNumber = clipOffset+clip
if LiveUtils.getClip(trackNumber, clipNumber) != None:
block.extend([str(LiveUtils.getClip(trackNumber, clipNumber).name)])
else:
block.extend([""])
self.oscEndpoint.send("/live/name/clipblock", block)
def nameClipCB(self, msg, source):
"""Called when a /live/name/clip message is received.
Messages:
/live/name/clip Returns a a series of all the clip names in the form /live/name/clip (int track, int clip, string name)
/live/name/clip (int track, int clip) Returns a single clip's name in the form /live/name/clip (int clip, string name)
/live/name/clip (int track, int clip, string name)Sets clip number clip in track number track's name to name
"""
#Requesting all clip names
if len(msg) == 2 or (len(msg) == 3 and msg[2] == "query"):
trackNumber = 0
clipNumber = 0
for track in LiveUtils.getTracks():
bundle = OSC.OSCBundle()
for clipSlot in track.clip_slots:
if clipSlot.clip != None:
bundle.append("/live/name/clip", (trackNumber, clipNumber, str(clipSlot.clip.name), clipSlot.clip.color))
clipNumber = clipNumber + 1
self.oscEndpoint.sendMessage(bundle)
clipNumber = 0
trackNumber = trackNumber + 1
return
#Requesting a single clip name
if len(msg) == 4:
trackNumber = msg[2]
clipNumber = msg[3]
self.oscEndpoint.send("/live/name/clip", (trackNumber, clipNumber, str(LiveUtils.getClip(trackNumber, clipNumber).name), LiveUtils.getClip(trackNumber, clipNumber).color))
return
#renaming a clip
if len(msg) >= 5:
trackNumber = msg[2]
clipNumber = msg[3]
name = msg[4]
LiveUtils.getClip(trackNumber, clipNumber).name = name
if len(msg) >= 6:
trackNumber = msg[2]
clipNumber = msg[3]
color = msg[5]
LiveUtils.getClip(trackNumber, clipNumber).color = color
def addNoteCB(self, msg, source):
"""Called when a /live/clip/add_note message is received
Messages:
/live/clip/add_note (int pitch) (double time) (double duration) (int velocity) (bool muted) Add the given note to the clip
"""
trackNumber = msg[2]
clipNumber = msg[3]
pitch = msg[4]
time = msg[5]
duration = msg[6]
velocity = msg[7]
muted = msg[8]
LiveUtils.getClip(trackNumber, clipNumber).deselect_all_notes()
notes = ((pitch, time, duration, velocity, muted),)
LiveUtils.getClip(trackNumber, clipNumber).replace_selected_notes(notes)
self.oscEndpoint.send('/live/clip/note', (trackNumber, clipNumber, pitch, time, duration, velocity, muted))
def getNotesCB(self, msg, source):
"""Called when a /live/clip/notes message is received
Messages:
/live/clip/notes Return all notes in the clip in /live/clip/note messages. Each note is sent in the format
(int trackNumber) (int clipNumber) (int pitch) (double time) (double duration) (int velocity) (int muted)
"""
trackNumber = msg[2]
clipNumber = msg[3]
LiveUtils.getClip(trackNumber, clipNumber).select_all_notes()
bundle = OSC.OSCBundle()
for note in LiveUtils.getClip(trackNumber, clipNumber).get_selected_notes():
pitch = note[0]
time = note[1]
duration = note[2]
velocity = note[3]
muted = 0
if note[4]:
muted = 1
bundle.append('/live/clip/note', (trackNumber, clipNumber, pitch, time, duration, velocity, muted))
self.oscEndpoint.sendMessage(bundle)
def armTrackCB(self, msg, source):
"""Called when a /live/arm message is received.
Messages:
/live/arm (int track) (int armed/disarmed) Arms track number track
"""
track = msg[2]
if len(msg) == 4:
if msg[3] == 1:
LiveUtils.armTrack(track)
else:
LiveUtils.disarmTrack(track)
# Return arm status
elif len(msg) == 3:
status = LiveUtils.getTrack(track).arm
self.oscEndpoint.send("/live/arm", (track, int(status)))
def muteTrackCB(self, msg, source):
"""Called when a /live/mute message is received.
Messages:
/live/mute (int track) Mutes track number track
"""
ty = msg[0] == '/live/return/mute' and 1 or 0
track = msg[2]
if len(msg) == 4:
if msg[3] == 1:
LiveUtils.muteTrack(track, ty)
else:
LiveUtils.unmuteTrack(track, ty)
elif len(msg) == 3:
if ty == 1:
status = LiveUtils.getSong().return_tracks[track].mute
self.oscEndpoint.send("/live/return/mute", (track, int(status)))
else:
status = LiveUtils.getTrack(track).mute
self.oscEndpoint.send("/live/mute", (track, int(status)))
def soloTrackCB(self, msg, source):
"""Called when a /live/solo message is received.
Messages:
/live/solo (int track) Solos track number track
"""
ty = msg[0] == '/live/return/solo' and 1 or 0
track = msg[2]
if len(msg) == 4:
if msg[3] == 1:
LiveUtils.soloTrack(track, ty)
else:
LiveUtils.unsoloTrack(track, ty)
elif len(msg) == 3:
if ty == 1:
status = LiveUtils.getSong().return_tracks[track].solo
self.oscEndpoint.send("/live/return/solo", (track, int(status)))
else:
status = LiveUtils.getTrack(track).solo
self.oscEndpoint.send("/live/solo", (track, int(status)))
def volumeCB(self, msg, source):
"""Called when a /live/volume message is received.
Messages:
/live/volume (int track) Returns the current volume of track number track as: /live/volume (int track, float volume(0.0 to 1.0))
/live/volume (int track, float volume(0.0 to 1.0)) Sets track number track's volume to volume
"""
if msg[0] == '/live/return/volume':
ty = 1
elif msg[0] == '/live/master/volume':
ty = 2
else:
ty = 0
if len(msg) == 2 and ty == 2:
self.oscEndpoint.send("/live/master/volume", LiveUtils.getSong().master_track.mixer_device.volume.value)
elif len(msg) == 3 and ty == 2:
volume = msg[2]
LiveUtils.getSong().master_track.mixer_device.volume.value = volume
elif len(msg) == 4:
track = msg[2]
volume = msg[3]
if ty == 0:
LiveUtils.trackVolume(track, volume)
elif ty == 1:
LiveUtils.getSong().return_tracks[track].mixer_device.volume.value = volume
elif len(msg) == 3:
track = msg[2]
if ty == 1:
self.oscEndpoint.send("/live/return/volume", (track, LiveUtils.getSong().return_tracks[track].mixer_device.volume.value))
else:
self.oscEndpoint.send("/live/volume", (track, LiveUtils.trackVolume(track)))
def panCB(self, msg, source):
"""Called when a /live/pan message is received.
Messages:
/live/pan (int track) Returns the pan of track number track as: /live/pan (int track, float pan(-1.0 to 1.0))
/live/pan (int track, float pan(-1.0 to 1.0)) Sets track number track's pan to pan
"""
if msg[0] == '/live/return/pan':
ty = 1
elif msg[0] == '/live/master/pan':
ty = 2
else:
ty = 0
if len(msg) == 2 and ty == 2:
self.oscEndpoint.send("/live/master/pan", LiveUtils.getSong().master_track.mixer_device.panning.value)
elif len(msg) == 3 and ty == 2:
pan = msg[2]
LiveUtils.getSong().master_track.mixer_device.panning.value = pan
elif len(msg) == 4:
track = msg[2]
pan = msg[3]
if ty == 0:
LiveUtils.trackPan(track, pan)
elif ty == 1:
LiveUtils.getSong().return_tracks[track].mixer_device.panning.value = pan
elif len(msg) == 3:
track = msg[2]
if ty == 1:
self.oscEndpoint.send("/live/pan", (track, LiveUtils.getSong().return_tracks[track].mixer_device.panning.value))
else:
self.oscEndpoint.send("/live/pan", (track, LiveUtils.trackPan(track)))
def sendCB(self, msg, source):
"""Called when a /live/send message is received.
Messages:
/live/send (int track, int send) Returns the send level of send (send) on track number track as: /live/send (int track, int send, float level(0.0 to 1.0))
/live/send (int track, int send, float level(0.0 to 1.0)) Sets the send (send) of track number (track)'s level to (level)
"""
ty = msg[0] == '/live/return/send' and 1 or 0
track = msg[2]
if len(msg) == 5:
send = msg[3]
level = msg[4]
if ty == 1:
LiveUtils.getSong().return_tracks[track].mixer_device.sends[send].value = level
else:
LiveUtils.trackSend(track, send, level)
elif len(msg) == 4:
send = msg[3]
if ty == 1:
self.oscEndpoint.send("/live/return/send", (track, send, float(LiveUtils.getSong().return_tracks[track].mixer_device.sends[send].value)))
else:
self.oscEndpoint.send("/live/send", (track, send, float(LiveUtils.trackSend(track, send))))
elif len(msg) == 3:
if ty == 1:
sends = LiveUtils.getSong().return_tracks[track].mixer_device.sends
else:
sends = LiveUtils.getSong().visible_tracks[track].mixer_device.sends
so = [track]
for i in range(len(sends)):
so.append(i)
so.append(float(sends[i].value))
if ty == 1:
self.oscEndpoint.send("/live/return/send", tuple(so))
else:
self.oscEndpoint.send("/live/send", tuple(so))
def pitchCB(self, msg, source):
"""Called when a /live/pitch message is received.
Messages:
/live/pitch (int track, int clip) Returns the pan of track number track as: /live/pan (int track, int clip, int coarse(-48 to 48), int fine (-50 to 50))
/live/pitch (int track, int clip, int coarse(-48 to 48), int fine (-50 to 50)) Sets clip number clip in track number track's pitch to coarse / fine
"""
if len(msg) == 6:
track = msg[2]
clip = msg[3]
coarse = msg[4]
fine = msg[5]
LiveUtils.clipPitch(track, clip, coarse, fine)
if len(msg) ==4:
track = msg[2]
clip = msg[3]
self.oscEndpoint.send("/live/pitch", LiveUtils.clipPitch(track, clip))
def trackJump(self, msg, source):
"""Called when a /live/track/jump message is received.
Messages:
/live/track/jump (int track, float beats) Jumps in track's currently running session clip by beats
"""
if len(msg) == 4:
track = msg[2]
beats = msg[3]
track = LiveUtils.getTrack(track)
track.jump_in_running_session_clip(beats)
def trackInfoCB(self, msg, source):
"""Called when a /live/track/info message is received.
Messages:
/live/track/info (int track) Returns clip slot status' for all clips in a track in the form /live/track/info (tracknumber, armed (clipnumber, state, length))
[state: 1 = Has Clip, 2 = Playing, 3 = Triggered]
"""
clipslots = LiveUtils.getClipSlots()
new = []
if len(msg) == 3:
new.append(clipslots[msg[2]])
tracknum = msg[2] - 1
else:
new = clipslots
tracknum = -1
for track in new:
tracknum = tracknum + 1
clipnum = -1
tmptrack = LiveUtils.getTrack(tracknum)
armed = tmptrack.arm and 1 or 0
li = [tracknum, armed]
for clipSlot in track:
clipnum = clipnum + 1
li.append(clipnum);
if clipSlot.clip != None:
clip = clipSlot.clip
if clip.is_playing == 1:
li.append(2)
li.append(clip.length)
elif clip.is_triggered == 1:
li.append(3)
li.append(clip.length)
else:
li.append(1)
li.append(clip.length)
else:
li.append(0)
li.append(0.0)
tu = tuple(li)
self.oscEndpoint.send("/live/track/info", tu)
def undoCB(self, msg, source):
"""Called when a /live/undo message is received.
Messages:
/live/undo Requests the song to undo the last action
"""
LiveUtils.getSong().undo()
def redoCB(self, msg, source):
"""Called when a /live/redo message is received.
Messages:
/live/redo Requests the song to redo the last action
"""
LiveUtils.getSong().redo()
def playClipSlotCB(self, msg, source):
"""Called when a /live/play/clipslot message is received.
Messages:
/live/play/clipslot (int track, int clip) Launches clip number clip in track number track
"""
if len(msg) == 4:
track_num = msg[2]
clip_num = msg[3]
track = LiveUtils.getTrack(track_num)
clipslot = track.clip_slots[clip_num]
clipslot.fire()
def viewSceneCB(self, msg, source):
"""Called when a /live/scene/view message is received.
Messages:
/live/scene/view (int track) Selects a track to view
"""
if len(msg) == 3:
scene = msg[2]
LiveUtils.getSong().view.selected_scene = LiveUtils.getSong().scenes[scene]
def viewTrackCB(self, msg, source):
"""Called when a /live/track/view message is received.
Messages:
/live/track/view (int track) Selects a track to view
"""
ty = msg[0] == '/live/return/view' and 1 or 0
track_num = msg[2]
if len(msg) == 3:
if ty == 1:
track = LiveUtils.getSong().return_tracks[track_num]
else:
track = LiveUtils.getSong().visible_tracks[track_num]
LiveUtils.getSong().view.selected_track = track
Live.Application.get_application().view.show_view("Detail/DeviceChain")
#track.view.select_instrument()
def mviewTrackCB(self, msg, source):
"""Called when a /live/master/view message is received.
Messages:
/live/track/view (int track) Selects a track to view
"""
track = LiveUtils.getSong().master_track
LiveUtils.getSong().view.selected_track = track
Live.Application.get_application().view.show_view("Detail/DeviceChain")
#track.view.select_instrument()
def viewClipCB(self, msg, source):
"""Called when a /live/clip/view message is received.
Messages:
/live/clip/view (int track, int clip) Selects a track to view
"""
track = LiveUtils.getSong().visible_tracks[msg[2]]
if len(msg) == 4:
clip = msg[3]
else:
clip = 0
LiveUtils.getSong().view.selected_track = track
LiveUtils.getSong().view.detail_clip = track.clip_slots[clip].clip
Live.Application.get_application().view.show_view("Detail/Clip")
def detailViewCB(self, msg, source):
"""Called when a /live/detail/view message is received. Used to switch between clip/track detail
Messages:
/live/detail/view (int) Selects view where 0=clip detail, 1=track detail
"""
if len(msg) == 3:
if msg[2] == 0:
Live.Application.get_application().view.show_view("Detail/Clip")
elif msg[2] == 1:
Live.Application.get_application().view.show_view("Detail/DeviceChain")
def viewDeviceCB(self, msg, source):
"""Called when a /live/track/device/view message is received.
Messages:
/live/track/device/view (int track) Selects a track to view
"""
ty = msg[0] == '/live/return/device/view' and 1 or 0
track_num = msg[2]
if len(msg) == 4:
if ty == 1:
track = LiveUtils.getSong().return_tracks[track_num]
else:
track = LiveUtils.getSong().visible_tracks[track_num]
LiveUtils.getSong().view.selected_track = track
LiveUtils.getSong().view.select_device(track.devices[msg[3]])
Live.Application.get_application().view.show_view("Detail/DeviceChain")
def mviewDeviceCB(self, msg, source):
track = LiveUtils.getSong().master_track
if len(msg) == 3:
LiveUtils.getSong().view.selected_track = track
LiveUtils.getSong().view.select_device(track.devices[msg[2]])
Live.Application.get_application().view.show_view("Detail/DeviceChain")
def overdubCB(self, msg, source):
"""Called when a /live/overdub message is received.
Messages:
/live/overdub (int on/off) Enables/disables overdub
"""
if len(msg) == 3:
overdub = msg[2]
LiveUtils.getSong().overdub = overdub
def stateCB(self, msg, source):
"""Called when a /live/state is received.
Messages:
/live/state Returns the current tempo and overdub status
"""
if len(msg) == 2 or (len(msg) == 3 and msg[2] == "query"):
tempo = LiveUtils.getTempo()
overdub = LiveUtils.getSong().overdub
self.oscEndpoint.send("/live/state", (tempo, int(overdub)))
def clipInfoCB(self,msg, source):
"""Called when a /live/clip/info message is received.
Messages:
/live/clip/info (int track, int clip) Gets the status of a single clip in the form /live/clip/info (tracknumber, clipnumber, state)
[state: 1 = Has Clip, 2 = Playing, 3 = Triggered]
"""
if len(msg) == 4:
trackNumber = msg[2]
clipNumber = msg[3]
clip = LiveUtils.getClip(trackNumber, clipNumber)
playing = 0
if clip != None:
playing = 1
if clip.is_playing == 1:
playing = 2
elif clip.is_triggered == 1:
playing = 3
self.oscEndpoint.send("/live/clip/info", (trackNumber, clipNumber, playing))
return
def deviceCB(self, msg, source):
ty = msg[0] == '/live/return/device' and 1 or 0
track = msg[2]
if len(msg) == 4:
device = msg[3]
po = [track, device]
if ty == 1:
params = LiveUtils.getSong().return_tracks[track].devices[device].parameters
else:
params = LiveUtils.getSong().visible_tracks[track].devices[device].parameters
for i in range(len(params)):
po.append(i)
po.append(float(params[i].value))
po.append(str(params[i].name))
po.append(params[i].min)
po.append(params[i].max)
self.oscEndpoint.send(ty == 1 and "/live/return/device/allparam" or "/live/device/allparam", tuple(po))
elif len(msg) == 5:
device = msg[3]
param = msg[4]
if ty == 1:
p = LiveUtils.getSong().return_tracks[track].devices[device].parameters[param]
else:
p = LiveUtils.getSong().visible_tracks[track].devices[device].parameters[param]
self.oscEndpoint.send(ty == 1 and "/live/return/device/param" or "/live/device/param", (track, device, param, p.value, str(p.name), p.min, p.max))
elif len(msg) == 6:
device = msg[3]
param = msg[4]
value = msg[5]
if ty == 1:
LiveUtils.getSong().return_tracks[track].devices[device].parameters[param].value = value
else:
LiveUtils.getSong().visible_tracks[track].devices[device].parameters[param].value = value
def devicerangeCB(self, msg, source):
ty = msg[0] == '/live/return/device/range' and 1 or 0
track = msg[2]
if len(msg) == 4:
device = msg[3]
po = [track, device]
if ty == 1:
params = LiveUtils.getSong().return_tracks[track].devices[device].parameters
else:
params = LiveUtils.getSong().visible_tracks[track].devices[device].parameters
for i in range(len(params)):
po.append(i)
po.append(params[i].min)
po.append(params[i].max)
self.oscEndpoint.send(ty == 1 and "/live/return/device/range" or "/live/device/range", tuple(po))
elif len(msg) == 5:
device = msg[3]
param = msg[4]
if ty == 1:
p = LiveUtils.getSong().return_tracks[track].devices[device].parameters[param]
else:
p = LiveUtils.getSong().visible_tracks[track].devices[device].parameters[param]
self.oscEndpoint.send(ty == 1 and "/live/return/device/range" or "/live/device/range", (track, device, param, p.min, p.max))
def devicelistCB(self, msg, source):
ty = msg[0] == '/live/return/devicelist' and 1 or 0
track = msg[2]
if len(msg) == 3:
do = [track]
if ty == 1:
devices = LiveUtils.getSong().return_tracks[track].devices
else:
devices = LiveUtils.getSong().visible_tracks[track].devices
for i in range(len(devices)):
do.append(i)
do.append(str(devices[i].name))
self.oscEndpoint.send(ty == 1 and "/live/return/devicelist" or "/live/devicelist", tuple(do))
def mdeviceCB(self, msg, source):
if len(msg) == 3:
device = msg[2]
po = [device]
params = LiveUtils.getSong().master_track.devices[device].parameters
for i in range(len(params)):
po.append(i)
po.append(float(params[i].value))
po.append(str(params[i].name))
self.oscEndpoint.send("/live/master/device", tuple(po))
elif len(msg) == 4:
device = msg[2]
param = msg[3]
p = LiveUtils.getSong().master_track.devices[device].parameters[param]
self.oscEndpoint.send("/live/master/device", (device, param, p.value, str(p.name)))
elif len(msg) == 5:
device = msg[2]
param = msg[3]
value = msg[4]
LiveUtils.getSong().master_track.devices[device].parameters[param].value = value
def mdevicerangeCB(self, msg, source):
if len(msg) == 3:
device = msg[2]
po = [device]
params = LiveUtils.getSong().master_track.devices[device].parameters
for i in range(len(params)):
po.append(i)
po.append(params[i].max)
po.append(params[i].min)
self.oscEndpoint.send("/live/master/device/range", tuple(po))
elif len(msg) == 4:
device = msg[2]
param = msg[3]
p = LiveUtils.getSong().master_track.devices[device].parameters[param]
self.oscEndpoint.send("/live/master/device/range", (device, param, p.min, p.max))
def mdevicelistCB(self, msg, source):
if len(msg) == 2 or (len(msg) == 3 and msg[2] == "query"):
do = []
devices = LiveUtils.getSong().master_track.devices
for i in range(len(devices)):
do.append(i)
do.append(str(devices[i].name))
self.oscEndpoint.send("/live/master/devicelist", tuple(do))
def crossfaderCB(self, msg, source):
if len(msg) == 2 or (len(msg) == 3 and msg[2] == "query"):
self.oscEndpoint.send("/live/master/crossfader", float(LiveUtils.getSong().master_track.mixer_device.crossfader.value))
elif len(msg) == 3:
val = msg[2]
LiveUtils.getSong().master_track.mixer_device.crossfader.value = val
def loopStateCB(self, msg, source):
type = msg[0] == '/live/clip/loopstate_id' and 1 or 0
trackNumber = msg[2]
clipNumber = msg[3]
if len(msg) == 4:
if type == 1:
self.oscEndpoint.send("/live/clip/loopstate", (trackNumber, clipNumber, int(LiveUtils.getClip(trackNumber, clipNumber).looping)))
else:
self.oscEndpoint.send("/live/clip/loopstate", (int(LiveUtils.getClip(trackNumber, clipNumber).looping)))
elif len(msg) == 5:
loopState = msg[4]
LiveUtils.getClip(trackNumber, clipNumber).looping = loopState
def loopStartCB(self, msg, source):
type = msg[0] == '/live/clip/loopstart_id' and 1 or 0
trackNumber = msg[2]
clipNumber = msg[3]
if len(msg) == 4:
if type == 1:
self.oscEndpoint.send("/live/clip/loopstart", (trackNumber, clipNumber, float(LiveUtils.getClip(trackNumber, clipNumber).loop_start)))
else:
self.oscEndpoint.send("/live/clip/loopstart", (float(LiveUtils.getClip(trackNumber, clipNumber).loop_start)))
elif len(msg) == 5:
loopStart = msg[4]
LiveUtils.getClip(trackNumber, clipNumber).loop_start = loopStart
def loopEndCB(self, msg, source):
type = msg[0] == '/live/clip/loopend_id' and 1 or 0
trackNumber = msg[2]
clipNumber = msg[3]
if len(msg) == 4:
if type == 1:
self.oscEndpoint.send("/live/clip/loopend", (trackNumber, clipNumber, float(LiveUtils.getClip(trackNumber, clipNumber).loop_end)))
else:
self.oscEndpoint.send("/live/clip/loopend", (float(LiveUtils.getClip(trackNumber, clipNumber).loop_end)))
elif len(msg) == 5:
loopEnd = msg[4]
LiveUtils.getClip(trackNumber, clipNumber).loop_end = loopEnd
def quantizationCB(self, msg, source):
quant = msg[2]
LiveUtils.getSong().clip_trigger_quantization = quant
| gpl-3.0 |
PiWare/kicad_library | script/fpgen/chip.py | 1 | 1778 | import fp
from fp import cfg
class chip(fp.base):
"""Generator for chip resistors, capacitors, inductors, MELF and Tantal devices"""
def __init__(self, name, model, description, tags, package_width, package_height, pad_width, pad_height, pad_distance):
super(chip, self).__init__(name, model, description, tags, True, False)
self.package_width = package_width
self.package_height = package_height
self.pad_width = pad_width
self.pad_height = pad_height
fp.base.add(self, fp.text(cfg.FOOTPRINT_REFERENCE_LAYER, "reference", "REF**", 0, -package_height / 2 - cfg.FOOTPRINT_REFERENCE_FONT_SIZE, 0, cfg.FOOTPRINT_REFERENCE_FONT_SIZE, cfg.FOOTPRINT_REFERENCE_FONT_THICKNESS))
fp.base.add(self, fp.text(cfg.FOOTPRINT_VALUE_LAYER, "value", "VAL**", 0, 0, 0, cfg.FOOTPRINT_VALUE_FONT_SIZE, cfg.FOOTPRINT_VALUE_FONT_THICKNESS))
fp.base.add(self, fp.rectangle(cfg.FOOTPRINT_PACKAGE_LAYER, 0, 0, package_width, package_height, cfg.FOOTPRINT_PACKAGE_LINE_WIDTH, True))
fp.base.add(self, fp.pad(cfg.FOOTPRINT_SMD_LAYERS, 1, fp.technology.smd, fp.type.rect, -pad_distance / 2, 0, pad_width, pad_height))
fp.base.add(self, fp.pad(cfg.FOOTPRINT_SMD_LAYERS, 2, fp.technology.smd, fp.type.rect, +pad_distance / 2, 0, pad_width, pad_height))
class chip_pol(chip):
"""Generator for chip devices with polarity marker"""
def __init__(self, name, description, tags, package_width, package_height, pad_width, pad_height, pad_distance):
super(chip_pol, self).__init__(name, description, tags, package_width, package_height, pad_width, pad_height, pad_distance)
line_x = package_width / 2 + package_widht * 0.1
line_y = package_height / 2
fp.base.add(self, fp.line(cfg.FOOTPRINT_PACKAGE_LAYER, -line_x, -line_y, -line_x, line_y, cfg.FOOTPRINT_PACKAGE_LINE_WIDTH))
| gpl-2.0 |
dm-dashboard/dashboard | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py | 2779 | 1665 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
| mit |
cwisecarver/osf.io | tests/framework_tests/test_email.py | 26 | 2676 | # -*- coding: utf-8 -*-
import unittest
import smtplib
import mock
from nose.tools import * # flake8: noqa (PEP8 asserts)
import sendgrid
from framework.email.tasks import send_email, _send_with_sendgrid
from website import settings
from tests.base import fake
# Check if local mail server is running
SERVER_RUNNING = True
try:
s = smtplib.SMTP(settings.MAIL_SERVER)
s.quit()
except Exception as err:
SERVER_RUNNING = False
class TestEmail(unittest.TestCase):
@unittest.skipIf(not SERVER_RUNNING,
"Mailserver isn't running. Run \"invoke mailserver\".")
@unittest.skipIf(not settings.USE_EMAIL,
"settings.USE_EMAIL is False")
def test_sending_email(self):
assert_true(send_email("foo@bar.com", "baz@quux.com", subject='no subject',
message="<h1>Greetings!</h1>", ttls=False, login=False))
def test_send_with_sendgrid_success(self):
mock_client = mock.MagicMock()
mock_client.send.return_value = 200, 'success'
from_addr, to_addr = fake.email(), fake.email()
category1, category2 = fake.word(), fake.word()
subject = fake.bs()
message = fake.text()
ret = _send_with_sendgrid(
from_addr=from_addr,
to_addr=to_addr,
subject=subject,
message=message,
mimetype='txt',
client=mock_client,
categories=(category1, category2)
)
assert_true(ret)
assert_equal(mock_client.send.call_count, 1)
# First call's argument should be a Mail object with
# the correct configuration
first_call_arg = mock_client.send.call_args[0][0]
assert_is_instance(first_call_arg, sendgrid.Mail)
assert_equal(first_call_arg.from_email, from_addr)
assert_equal(first_call_arg.to[0], to_addr)
assert_equal(first_call_arg.subject, subject)
assert_equal(first_call_arg.text, message)
# Categories are set
assert_equal(first_call_arg.smtpapi.data['category'], (category1, category2))
def test_send_with_sendgrid_failure_returns_false(self):
mock_client = mock.MagicMock()
mock_client.send.return_value = 400, 'failed'
from_addr, to_addr = fake.email(), fake.email()
subject = fake.bs()
message = fake.text()
ret = _send_with_sendgrid(
from_addr=from_addr,
to_addr=to_addr,
subject=subject,
message=message,
mimetype='txt',
client=mock_client
)
assert_false(ret)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
canaltinova/servo | python/servo/post_build_commands.py | 8 | 9646 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import print_function, unicode_literals
import os
import os.path as path
import subprocess
from shutil import copytree, rmtree, copy2
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from servo.command_base import (
CommandBase,
check_call, check_output, BIN_SUFFIX,
is_linux, set_osmesa_env,
)
def read_file(filename, if_exists=False):
if if_exists and not path.exists(filename):
return None
with open(filename) as f:
return f.read()
@CommandProvider
class PostBuildCommands(CommandBase):
@Command('run',
description='Run Servo',
category='post-build')
@CommandArgument('--release', '-r', action='store_true',
help='Run the release build')
@CommandArgument('--dev', '-d', action='store_true',
help='Run the dev build')
@CommandArgument('--android', action='store_true', default=None,
help='Run on an Android device through `adb shell`')
@CommandArgument('--debug', action='store_true',
help='Enable the debugger. Not specifying a '
'--debugger option will result in the default '
'debugger being used. The following arguments '
'have no effect without this.')
@CommandArgument('--debugger', default=None, type=str,
help='Name of debugger to use.')
@CommandArgument('--headless', '-z', action='store_true',
help='Launch in headless mode')
@CommandArgument('--software', '-s', action='store_true',
help='Launch with software rendering')
@CommandArgument('--bin', default=None,
help='Launch with specific binary')
@CommandArgument('--nightly', '-n', default=None,
help='Specify a YYYY-MM-DD nightly build to run')
@CommandArgument(
'params', nargs='...',
help="Command-line arguments to be passed through to Servo")
def run(self, params, release=False, dev=False, android=None, debug=False, debugger=None,
headless=False, software=False, bin=None, nightly=None):
env = self.build_env()
env["RUST_BACKTRACE"] = "1"
# Make --debugger imply --debug
if debugger:
debug = True
if android is None:
android = self.config["build"]["android"]
if android:
if debug:
print("Android on-device debugging is not supported by mach yet. See")
print("https://github.com/servo/servo/wiki/Building-for-Android#debugging-on-device")
return
script = [
"am force-stop com.mozilla.servo",
"echo servo >/sdcard/Android/data/com.mozilla.servo/files/android_params"
]
for param in params:
script += [
"echo '%s' >>/sdcard/Android/data/com.mozilla.servo/files/android_params"
% param.replace("'", "\\'")
]
script += [
"am start com.mozilla.servo/com.mozilla.servo.MainActivity",
"exit"
]
shell = subprocess.Popen(["adb", "shell"], stdin=subprocess.PIPE)
shell.communicate("\n".join(script) + "\n")
return shell.wait()
args = [bin or self.get_nightly_binary_path(nightly) or self.get_binary_path(release, dev)]
if headless:
set_osmesa_env(args[0], env)
args.append('-z')
if software:
if not is_linux():
print("Software rendering is only supported on Linux at the moment.")
return
env['LIBGL_ALWAYS_SOFTWARE'] = "1"
# Borrowed and modified from:
# http://hg.mozilla.org/mozilla-central/file/c9cfa9b91dea/python/mozbuild/mozbuild/mach_commands.py#l883
if debug:
import mozdebug
if not debugger:
# No debugger name was provided. Look for the default ones on
# current OS.
debugger = mozdebug.get_default_debugger_name(
mozdebug.DebuggerSearch.KeepLooking)
self.debuggerInfo = mozdebug.get_debugger_info(debugger)
if not self.debuggerInfo:
print("Could not find a suitable debugger in your PATH.")
return 1
command = self.debuggerInfo.path
if debugger == 'gdb' or debugger == 'lldb':
rustCommand = 'rust-' + debugger
try:
subprocess.check_call([rustCommand, '--version'], env=env, stdout=open(os.devnull, 'w'))
except (OSError, subprocess.CalledProcessError):
pass
else:
command = rustCommand
# Prepend the debugger args.
args = ([command] + self.debuggerInfo.args +
args + params)
else:
args = args + params
try:
check_call(args, env=env)
except subprocess.CalledProcessError as e:
print("Servo exited with return value %d" % e.returncode)
return e.returncode
except OSError as e:
if e.errno == 2:
print("Servo Binary can't be found! Run './mach build'"
" and try again!")
else:
raise e
@Command('rr-record',
description='Run Servo whilst recording execution with rr',
category='post-build')
@CommandArgument('--release', '-r', action='store_true',
help='Use release build')
@CommandArgument('--dev', '-d', action='store_true',
help='Use dev build')
@CommandArgument('--bin', default=None,
help='Launch with specific binary')
@CommandArgument('--nightly', '-n', default=None,
help='Specify a YYYY-MM-DD nightly build to run')
@CommandArgument(
'params', nargs='...',
help="Command-line arguments to be passed through to Servo")
def rr_record(self, release=False, dev=False, bin=None, nightly=None, params=[]):
env = self.build_env()
env["RUST_BACKTRACE"] = "1"
servo_cmd = [bin or self.get_nightly_binary_path(nightly) or
self.get_binary_path(release, dev)] + params
rr_cmd = ['rr', '--fatal-errors', 'record']
try:
check_call(rr_cmd + servo_cmd)
except OSError as e:
if e.errno == 2:
print("rr binary can't be found!")
else:
raise e
@Command('rr-replay',
description='Replay the most recent execution of Servo that was recorded with rr',
category='post-build')
def rr_replay(self):
try:
check_call(['rr', '--fatal-errors', 'replay'])
except OSError as e:
if e.errno == 2:
print("rr binary can't be found!")
else:
raise e
@Command('doc',
description='Generate documentation',
category='post-build')
@CommandArgument(
'params', nargs='...',
help="Command-line arguments to be passed through to cargo doc")
def doc(self, params):
env = os.environ.copy()
env["RUSTUP_TOOLCHAIN"] = self.toolchain()
rustc_path = check_output(["rustup" + BIN_SUFFIX, "which", "rustc"], env=env)
assert path.basename(path.dirname(rustc_path)) == "bin"
toolchain_path = path.dirname(path.dirname(rustc_path))
rust_docs = path.join(toolchain_path, "share", "doc", "rust", "html")
self.ensure_bootstrapped()
docs = path.join(self.get_target_dir(), "doc")
if not path.exists(docs):
os.makedirs(docs)
if read_file(path.join(docs, "version_info.html"), if_exists=True) != \
read_file(path.join(rust_docs, "version_info.html")):
print("Copying Rust documentation.")
# copytree doesn't like the destination already existing.
for name in os.listdir(rust_docs):
if not name.startswith('.'):
full_name = path.join(rust_docs, name)
destination = path.join(docs, name)
if path.isdir(full_name):
if path.exists(destination):
rmtree(destination)
copytree(full_name, destination)
else:
copy2(full_name, destination)
return self.call_rustup_run(
["cargo", "doc", "--manifest-path", self.servo_manifest()] + params,
env=self.build_env()
)
@Command('browse-doc',
description='Generate documentation and open it in a web browser',
category='post-build')
def serve_docs(self):
self.doc([])
import webbrowser
webbrowser.open("file://" + path.abspath(path.join(
self.get_target_dir(), "doc", "servo", "index.html")))
| mpl-2.0 |
ThoughtWorksInc/treadmill | treadmill/sproc/init.py | 3 | 8051 | """Treadmill initialization and server presence daemon.
This service register the node into the Treadmill cell and, as such, is
responsible for publishing the node's capacity to the scheduler.
This service is also responsible for shutting down the node, when necessary or
requested, by disabling all traffic from and to the containers.
"""
import logging
import os
import time
import click
import kazoo
from treadmill import appenv
from treadmill import context
from treadmill import exc
from treadmill import sysinfo
from treadmill import utils
from treadmill import zknamespace as z
from treadmill import zkutils
if os.name == 'posix':
from .. import netdev
from .. import subproc
_LOGGER = logging.getLogger(__name__)
_WATCHDOG_CHECK_INTERVAL = 30
_KERNEL_WATCHDOG = None
def init():
"""Top level command handler."""
@click.command()
@click.option('--exit-on-fail', is_flag=True, default=False)
@click.option('--zkid', help='Zookeeper session ID file.')
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
def top(exit_on_fail, zkid, approot):
"""Run treadmill init process."""
_LOGGER.info('Initializing Treadmill: %s', approot)
tm_env = appenv.AppEnvironment(approot)
zkclient = zkutils.connect(context.GLOBAL.zk.url,
idpath=zkid,
listener=_exit_clear_watchdog_on_lost)
utils.report_ready()
while not zkclient.exists(z.SERVER_PRESENCE):
_LOGGER.warn('namespace not ready.')
time.sleep(30)
hostname = sysinfo.hostname()
zk_blackout_path = z.path.blackedout_server(hostname)
zk_presence_path = z.path.server_presence(hostname)
zk_server_path = z.path.server(hostname)
while not zkclient.exists(zk_server_path):
_LOGGER.warn('server %s not defined in the cell.', hostname)
time.sleep(30)
_LOGGER.info('Checking blackout list.')
blacklisted = bool(zkclient.exists(zk_blackout_path))
if not blacklisted:
# Node startup.
_node_start(tm_env, zkclient, hostname,
zk_server_path, zk_presence_path)
# Cleanup the watchdog directory
tm_env.watchdogs.initialize()
_init_network()
_LOGGER.info('Ready.')
down_reason = _main_loop(tm_env, zkclient, zk_presence_path)
if down_reason is not None:
_LOGGER.warning('Shutting down: %s', down_reason)
# Blackout the server.
zkutils.ensure_exists(
zkclient,
zk_blackout_path,
acl=[zkutils.make_host_acl(hostname, 'rwcda')],
data=down_reason
)
else:
# Node was already blacked out.
_LOGGER.warning('Shutting down blacked out node.')
# This is the shutdown phase.
# Delete the node
zkutils.ensure_deleted(zkclient, zk_presence_path)
zkclient.remove_listener(_exit_clear_watchdog_on_lost)
zkclient.stop()
zkclient.close()
_cleanup_network()
# to ternminate all the running apps
_blackout_terminate(tm_env)
if exit_on_fail:
utils.sys_exit(-1)
else:
# Sit forever in a broken state
while True:
time.sleep(1000000)
return top
def _blackout_terminate(tm_env):
"""Blackout by terminating all containers in running dir.
"""
if os.name == 'posix':
# XXX: This should be replaced with a supervisor module call hidding
# away all s6 related stuff
supervisor_dir = os.path.join(tm_env.init_dir, 'supervisor')
cleanupd_dir = os.path.join(tm_env.init_dir, 'cleanup')
# we first shutdown cleanup so link in /var/tmp/treadmill/cleanup
# will not be recycled before blackout clear
_LOGGER.info('try to shutdown cleanup service')
subproc.check_call(['s6_svc', '-d', cleanupd_dir])
subproc.check_call(['s6_svwait', '-d', cleanupd_dir])
# shutdown all the applications by shutting down supervisor
_LOGGER.info('try to shutdown supervisor')
subproc.check_call(['s6_svc', '-d', supervisor_dir])
else:
# TODO: Implement terminating containers on windows
pass
def _init_network():
"""Initialize network.
"""
if os.name == 'nt':
return
# (Re)Enable IP forwarding
netdev.dev_conf_forwarding_set('tm0', True)
def _cleanup_network():
"""Cleanup network.
"""
if os.name == 'nt':
return
# Disable network traffic from and to the containers.
netdev.dev_conf_forwarding_set('tm0', False)
def _node_start(tm_env, zkclient, hostname,
zk_server_path, zk_presence_path):
"""Node startup. Try to re-establish old session or start fresh.
"""
old_session_ok = False
try:
_data, metadata = zkclient.get(zk_presence_path)
if metadata.owner_session_id == zkclient.client_id[0]:
_LOGGER.info('Reconnecting with previous session: %s',
metadata.owner_session_id)
old_session_ok = True
else:
_LOGGER.info('Session id does not match, new session.')
zkclient.delete(zk_presence_path)
except kazoo.client.NoNodeError:
_LOGGER.info('%s does not exist.', zk_presence_path)
if not old_session_ok:
_node_initialize(tm_env,
zkclient, hostname,
zk_server_path, zk_presence_path)
def _node_initialize(tm_env, zkclient, hostname,
zk_server_path, zk_presence_path):
"""Node initialization. Should only be done on a cold start.
"""
new_node_info = sysinfo.node_info(tm_env)
# Merging scheduler data with node_info data
node_info = zkutils.get(zkclient, zk_server_path)
node_info.update(new_node_info)
_LOGGER.info('Registering node: %s: %s, %r',
zk_server_path, hostname, node_info)
zkutils.update(zkclient, zk_server_path, node_info)
host_acl = zkutils.make_host_acl(hostname, 'rwcda')
_LOGGER.debug('host_acl: %r', host_acl)
zkutils.put(zkclient,
zk_presence_path, {'seen': False},
acl=[host_acl],
ephemeral=True)
# Invoke the local node initialization
tm_env.initialize(node_info)
def _exit_clear_watchdog_on_lost(state):
_LOGGER.debug('ZK connection state: %s', state)
if state == zkutils.states.KazooState.LOST:
_LOGGER.info('Exiting on ZK connection lost.')
utils.sys_exit(-1)
def _main_loop(tm_env, zkclient, zk_presence_path):
"""Main loop.
Wait for zk event and check watchdogs.
"""
down_reason = None
# Now that the server is registered, setup the stop-on-delete
# trigger and the deadman's trigger.
node_deleted_event = zkclient.handler.event_object()
node_deleted_event.clear()
@zkclient.DataWatch(zk_presence_path)
@exc.exit_on_unhandled
def _exit_on_delete(data, _stat, event):
"""Force exit if server node is deleted."""
if (data is None or
(event is not None and event.type == 'DELETED')):
# The node is deleted
node_deleted_event.set()
return False
else:
# Reestablish the watch.
return True
while not node_deleted_event.wait(_WATCHDOG_CHECK_INTERVAL):
# NOTE: The loop time above is tailored to the kernel watchdog time.
# Be very careful before changing it.
# Check our watchdogs
result = tm_env.watchdogs.check()
if result:
# Something is wrong with the node, shut it down
down_reason = 'watchdogs %r failed.' % result
break
return down_reason
| apache-2.0 |
achernet/cython | Cython/CodeWriter.py | 13 | 15260 | """
Serializes a Cython code tree to Cython code. This is primarily useful for
debugging and testing purposes.
The output is in a strict format, no whitespace or comments from the input
is preserved (and it could not be as it is not present in the code tree).
"""
from __future__ import absolute_import, print_function
from .Compiler.Visitor import TreeVisitor
from .Compiler.ExprNodes import *
class LinesResult(object):
def __init__(self):
self.lines = []
self.s = u""
def put(self, s):
self.s += s
def newline(self):
self.lines.append(self.s)
self.s = u""
def putline(self, s):
self.put(s)
self.newline()
class DeclarationWriter(TreeVisitor):
indent_string = u" "
def __init__(self, result = None):
super(DeclarationWriter, self).__init__()
if result is None:
result = LinesResult()
self.result = result
self.numindents = 0
self.tempnames = {}
self.tempblockindex = 0
def write(self, tree):
self.visit(tree)
return self.result
def indent(self):
self.numindents += 1
def dedent(self):
self.numindents -= 1
def startline(self, s = u""):
self.result.put(self.indent_string * self.numindents + s)
def put(self, s):
self.result.put(s)
def putline(self, s):
self.result.putline(self.indent_string * self.numindents + s)
def endline(self, s = u""):
self.result.putline(s)
def line(self, s):
self.startline(s)
self.endline()
def comma_separated_list(self, items, output_rhs=False):
if len(items) > 0:
for item in items[:-1]:
self.visit(item)
if output_rhs and item.default is not None:
self.put(u" = ")
self.visit(item.default)
self.put(u", ")
self.visit(items[-1])
def visit_Node(self, node):
raise AssertionError("Node not handled by serializer: %r" % node)
def visit_ModuleNode(self, node):
self.visitchildren(node)
def visit_StatListNode(self, node):
self.visitchildren(node)
def visit_CDefExternNode(self, node):
if node.include_file is None:
file = u'*'
else:
file = u'"%s"' % node.include_file
self.putline(u"cdef extern from %s:" % file)
self.indent()
self.visit(node.body)
self.dedent()
def visit_CPtrDeclaratorNode(self, node):
self.put('*')
self.visit(node.base)
def visit_CReferenceDeclaratorNode(self, node):
self.put('&')
self.visit(node.base)
def visit_CArrayDeclaratorNode(self, node):
self.visit(node.base)
self.put(u'[')
if node.dimension is not None:
self.visit(node.dimension)
self.put(u']')
def visit_CArrayDeclaratorNode(self, node):
self.visit(node.base)
self.put(u'[')
if node.dimension is not None:
self.visit(node.dimension)
self.put(u']')
def visit_CFuncDeclaratorNode(self, node):
# TODO: except, gil, etc.
self.visit(node.base)
self.put(u'(')
self.comma_separated_list(node.args)
self.endline(u')')
def visit_CNameDeclaratorNode(self, node):
self.put(node.name)
def visit_CSimpleBaseTypeNode(self, node):
# See Parsing.p_sign_and_longness
if node.is_basic_c_type:
self.put(("unsigned ", "", "signed ")[node.signed])
if node.longness < 0:
self.put("short " * -node.longness)
elif node.longness > 0:
self.put("long " * node.longness)
self.put(node.name)
def visit_CComplexBaseTypeNode(self, node):
self.put(u'(')
self.visit(node.base_type)
self.visit(node.declarator)
self.put(u')')
def visit_CNestedBaseTypeNode(self, node):
self.visit(node.base_type)
self.put(u'.')
self.put(node.name)
def visit_TemplatedTypeNode(self, node):
self.visit(node.base_type_node)
self.put(u'[')
self.comma_separated_list(node.positional_args + node.keyword_args.key_value_pairs)
self.put(u']')
def visit_CVarDefNode(self, node):
self.startline(u"cdef ")
self.visit(node.base_type)
self.put(u" ")
self.comma_separated_list(node.declarators, output_rhs=True)
self.endline()
def visit_container_node(self, node, decl, extras, attributes):
# TODO: visibility
self.startline(decl)
if node.name:
self.put(u' ')
self.put(node.name)
if node.cname is not None:
self.put(u' "%s"' % node.cname)
if extras:
self.put(extras)
self.endline(':')
self.indent()
if not attributes:
self.putline('pass')
else:
for attribute in attributes:
self.visit(attribute)
self.dedent()
def visit_CStructOrUnionDefNode(self, node):
if node.typedef_flag:
decl = u'ctypedef '
else:
decl = u'cdef '
if node.visibility == 'public':
decl += u'public '
if node.packed:
decl += u'packed '
decl += node.kind
self.visit_container_node(node, decl, None, node.attributes)
def visit_CppClassNode(self, node):
extras = ""
if node.templates:
extras = u"[%s]" % ", ".join(node.templates)
if node.base_classes:
extras += "(%s)" % ", ".join(node.base_classes)
self.visit_container_node(node, u"cdef cppclass", extras, node.attributes)
def visit_CEnumDefNode(self, node):
self.visit_container_node(node, u"cdef enum", None, node.items)
def visit_CEnumDefItemNode(self, node):
self.startline(node.name)
if node.cname:
self.put(u' "%s"' % node.cname)
if node.value:
self.put(u" = ")
self.visit(node.value)
self.endline()
def visit_CClassDefNode(self, node):
assert not node.module_name
if node.decorators:
for decorator in node.decorators:
self.visit(decorator)
self.startline(u"cdef class ")
self.put(node.class_name)
if node.base_class_name:
self.put(u"(")
if node.base_class_module:
self.put(node.base_class_module)
self.put(u".")
self.put(node.base_class_name)
self.put(u")")
self.endline(u":")
self.indent()
self.visit(node.body)
self.dedent()
def visit_CTypeDefNode(self, node):
self.startline(u"ctypedef ")
self.visit(node.base_type)
self.put(u" ")
self.visit(node.declarator)
self.endline()
def visit_FuncDefNode(self, node):
self.startline(u"def %s(" % node.name)
self.comma_separated_list(node.args)
self.endline(u"):")
self.indent()
self.visit(node.body)
self.dedent()
def visit_CArgDeclNode(self, node):
if node.base_type.name is not None:
self.visit(node.base_type)
self.put(u" ")
self.visit(node.declarator)
if node.default is not None:
self.put(u" = ")
self.visit(node.default)
def visit_CImportStatNode(self, node):
self.startline(u"cimport ")
self.put(node.module_name)
if node.as_name:
self.put(u" as ")
self.put(node.as_name)
self.endline()
def visit_FromCImportStatNode(self, node):
self.startline(u"from ")
self.put(node.module_name)
self.put(u" cimport ")
first = True
for pos, name, as_name, kind in node.imported_names:
assert kind is None
if first:
first = False
else:
self.put(u", ")
self.put(name)
if as_name:
self.put(u" as ")
self.put(as_name)
self.endline()
def visit_NameNode(self, node):
self.put(node.name)
def visit_IntNode(self, node):
self.put(node.value)
def visit_NoneNode(self, node):
self.put(u"None")
def visit_NotNode(self, node):
self.put(u"(not ")
self.visit(node.operand)
self.put(u")")
def visit_DecoratorNode(self, node):
self.startline("@")
self.visit(node.decorator)
self.endline()
def visit_BinopNode(self, node):
self.visit(node.operand1)
self.put(u" %s " % node.operator)
self.visit(node.operand2)
def visit_AttributeNode(self, node):
self.visit(node.obj)
self.put(u".%s" % node.attribute)
def visit_BoolNode(self, node):
self.put(str(node.value))
# FIXME: represent string nodes correctly
def visit_StringNode(self, node):
value = node.value
if value.encoding is not None:
value = value.encode(value.encoding)
self.put(repr(value))
def visit_PassStatNode(self, node):
self.startline(u"pass")
self.endline()
class CodeWriter(DeclarationWriter):
def visit_SingleAssignmentNode(self, node):
self.startline()
self.visit(node.lhs)
self.put(u" = ")
self.visit(node.rhs)
self.endline()
def visit_CascadedAssignmentNode(self, node):
self.startline()
for lhs in node.lhs_list:
self.visit(lhs)
self.put(u" = ")
self.visit(node.rhs)
self.endline()
def visit_PrintStatNode(self, node):
self.startline(u"print ")
self.comma_separated_list(node.arg_tuple.args)
if not node.append_newline:
self.put(u",")
self.endline()
def visit_ForInStatNode(self, node):
self.startline(u"for ")
self.visit(node.target)
self.put(u" in ")
self.visit(node.iterator.sequence)
self.endline(u":")
self.indent()
self.visit(node.body)
self.dedent()
if node.else_clause is not None:
self.line(u"else:")
self.indent()
self.visit(node.else_clause)
self.dedent()
def visit_IfStatNode(self, node):
# The IfClauseNode is handled directly without a seperate match
# for clariy.
self.startline(u"if ")
self.visit(node.if_clauses[0].condition)
self.endline(":")
self.indent()
self.visit(node.if_clauses[0].body)
self.dedent()
for clause in node.if_clauses[1:]:
self.startline("elif ")
self.visit(clause.condition)
self.endline(":")
self.indent()
self.visit(clause.body)
self.dedent()
if node.else_clause is not None:
self.line("else:")
self.indent()
self.visit(node.else_clause)
self.dedent()
def visit_SequenceNode(self, node):
self.comma_separated_list(node.args) # Might need to discover whether we need () around tuples...hmm...
def visit_SimpleCallNode(self, node):
self.visit(node.function)
self.put(u"(")
self.comma_separated_list(node.args)
self.put(")")
def visit_GeneralCallNode(self, node):
self.visit(node.function)
self.put(u"(")
posarg = node.positional_args
if isinstance(posarg, AsTupleNode):
self.visit(posarg.arg)
else:
self.comma_separated_list(posarg.args) # TupleNode.args
if node.keyword_args:
if isinstance(node.keyword_args, DictNode):
for i, (name, value) in enumerate(node.keyword_args.key_value_pairs):
if i > 0:
self.put(', ')
self.visit(name)
self.put('=')
self.visit(value)
else:
raise Exception("Not implemented yet")
self.put(u")")
def visit_ExprStatNode(self, node):
self.startline()
self.visit(node.expr)
self.endline()
def visit_InPlaceAssignmentNode(self, node):
self.startline()
self.visit(node.lhs)
self.put(u" %s= " % node.operator)
self.visit(node.rhs)
self.endline()
def visit_WithStatNode(self, node):
self.startline()
self.put(u"with ")
self.visit(node.manager)
if node.target is not None:
self.put(u" as ")
self.visit(node.target)
self.endline(u":")
self.indent()
self.visit(node.body)
self.dedent()
def visit_TryFinallyStatNode(self, node):
self.line(u"try:")
self.indent()
self.visit(node.body)
self.dedent()
self.line(u"finally:")
self.indent()
self.visit(node.finally_clause)
self.dedent()
def visit_TryExceptStatNode(self, node):
self.line(u"try:")
self.indent()
self.visit(node.body)
self.dedent()
for x in node.except_clauses:
self.visit(x)
if node.else_clause is not None:
self.visit(node.else_clause)
def visit_ExceptClauseNode(self, node):
self.startline(u"except")
if node.pattern is not None:
self.put(u" ")
self.visit(node.pattern)
if node.target is not None:
self.put(u", ")
self.visit(node.target)
self.endline(":")
self.indent()
self.visit(node.body)
self.dedent()
def visit_ReturnStatNode(self, node):
self.startline("return ")
self.visit(node.value)
self.endline()
def visit_ReraiseStatNode(self, node):
self.line("raise")
def visit_ImportNode(self, node):
self.put(u"(import %s)" % node.module_name.value)
def visit_TempsBlockNode(self, node):
"""
Temporaries are output like $1_1', where the first number is
an index of the TempsBlockNode and the second number is an index
of the temporary which that block allocates.
"""
idx = 0
for handle in node.temps:
self.tempnames[handle] = "$%d_%d" % (self.tempblockindex, idx)
idx += 1
self.tempblockindex += 1
self.visit(node.body)
def visit_TempRefNode(self, node):
self.put(self.tempnames[node.handle])
class PxdWriter(DeclarationWriter):
def __call__(self, node):
print(u'\n'.join(self.write(node).lines))
return node
def visit_CFuncDefNode(self, node):
if 'inline' in node.modifiers:
return
if node.overridable:
self.startline(u'cpdef ')
else:
self.startline(u'cdef ')
if node.visibility != 'private':
self.put(node.visibility)
self.put(u' ')
if node.api:
self.put(u'api ')
self.visit(node.declarator)
def visit_StatNode(self, node):
pass
| apache-2.0 |
av8ramit/tensorflow | tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils_test.py | 46 | 37444 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of utilities supporting export to SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
from tensorflow.contrib.layers.python.layers import feature_column as fc
from tensorflow.contrib.learn.python.learn import export_strategy as export_strategy_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.util import compat
class TestEstimator(estimator.Estimator):
def __init__(self, *args, **kwargs):
super(TestEstimator, self).__init__(*args, **kwargs)
self.last_exported_checkpoint = ""
self.last_exported_dir = ""
# @Override
def export_savedmodel(self,
export_dir,
serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False):
if not os.path.exists(export_dir):
os.makedirs(export_dir)
open(os.path.join(export_dir, "placeholder.txt"), "a").close()
self.last_exported_checkpoint = checkpoint_path
self.last_exported_dir = export_dir
return export_dir
class SavedModelExportUtilsTest(test.TestCase):
def test_build_standardized_signature_def_regression(self):
input_tensors = {
"input-1":
array_ops.placeholder(dtypes.string, 1, name="input-tensor-1")
}
output_tensors = {
"output-1":
array_ops.placeholder(dtypes.float32, 1, name="output-tensor-1")
}
problem_type = constants.ProblemType.LINEAR_REGRESSION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[signature_constants.REGRESS_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.REGRESS_OUTPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-1:0", dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = signature_constants.REGRESS_METHOD_NAME
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification(self):
"""Tests classification with one output tensor."""
input_tensors = {
"input-1":
array_ops.placeholder(dtypes.string, 1, name="input-tensor-1")
}
output_tensors = {
"output-1":
array_ops.placeholder(dtypes.string, 1, name="output-tensor-1")
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-1:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification2(self):
"""Tests multiple output tensors that include classes and probabilities."""
input_tensors = {
"input-1":
array_ops.placeholder(dtypes.string, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.string, 1, name="output-tensor-classes"),
# Will be used for CLASSIFY_OUTPUT_SCORES.
"probabilities":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-proba"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-classes:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-proba:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification3(self):
"""Tests multiple output tensors that include classes and scores."""
input_tensors = {
"input-1":
array_ops.placeholder(dtypes.string, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.string, 1, name="output-tensor-classes"),
"scores":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-scores"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-classes:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-scores:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification4(self):
"""Tests classification without classes tensor."""
input_tensors = {
"input-1":
array_ops.placeholder(dtypes.string, 1, name="input-tensor-1")
}
output_tensors = {
"probabilities":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-proba"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-proba:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification5(self):
"""Tests multiple output tensors that include integer classes and scores.
Integer classes are dropped out, because Servo classification can only serve
string classes. So, only scores are present in the signature.
"""
input_tensors = {
"input-1":
array_ops.placeholder(dtypes.string, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.int64, 1, name="output-tensor-classes"),
"scores":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-scores"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits-unused"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-scores:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification6(self):
"""Tests multiple output tensors that with integer classes and no scores.
Servo classification cannot serve integer classes, but no scores are
available. So, we fall back to predict signature.
"""
input_tensors = {
"input-1":
array_ops.placeholder(dtypes.string, 1, name="input-tensor-1")
}
output_tensors = {
"classes":
array_ops.placeholder(
dtypes.int64, 1, name="output-tensor-classes"),
"logits":
array_ops.placeholder(
dtypes.float32, 1, name="output-tensor-logits"),
}
problem_type = constants.ProblemType.CLASSIFICATION
actual_signature_def = (
saved_model_export_utils.build_standardized_signature_def(
input_tensors, output_tensors, problem_type))
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_int64 = types_pb2.DataType.Value("DT_INT64")
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs["input-1"].CopyFrom(
meta_graph_pb2.TensorInfo(
name="input-tensor-1:0", dtype=dtype_string, tensor_shape=shape))
expected_signature_def.outputs["classes"].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-classes:0",
dtype=dtype_int64,
tensor_shape=shape))
expected_signature_def.outputs["logits"].CopyFrom(
meta_graph_pb2.TensorInfo(
name="output-tensor-logits:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.PREDICT_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_get_input_alternatives(self):
input_ops = input_fn_utils.InputFnOps("bogus features dict", None,
"bogus default input dict")
input_alternatives, _ = saved_model_export_utils.get_input_alternatives(
input_ops)
self.assertEqual(input_alternatives[
saved_model_export_utils.DEFAULT_INPUT_ALTERNATIVE_KEY],
"bogus default input dict")
# self.assertEqual(input_alternatives[
# saved_model_export_utils.FEATURES_INPUT_ALTERNATIVE_KEY],
# "bogus features dict")
def test_get_output_alternatives_explicit_default(self):
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION,
"bogus output dict"),
"head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"),
"head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": "bogus_tensor"},
output_alternatives=provided_output_alternatives)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops, "head-1")
self.assertEqual(provided_output_alternatives, output_alternatives)
def test_get_output_alternatives_wrong_default(self):
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION,
"bogus output dict"),
"head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"),
"head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": "bogus_tensor"},
output_alternatives=provided_output_alternatives)
with self.assertRaises(ValueError) as e:
saved_model_export_utils.get_output_alternatives(model_fn_ops, "WRONG")
self.assertEqual("Requested default_output_alternative: WRONG, but "
"available output_alternatives are: ['head-1', 'head-2', "
"'head-3']", str(e.exception))
def test_get_output_alternatives_single_no_default(self):
prediction_tensor = constant_op.constant(["bogus"])
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION, {
"output": prediction_tensor
}),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions=prediction_tensor,
output_alternatives=provided_output_alternatives)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual({
"head-1": (constants.ProblemType.LINEAR_REGRESSION, {
"output": prediction_tensor
})
}, output_alternatives)
def test_get_output_alternatives_multi_no_default(self):
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION,
"bogus output dict"),
"head-2": (constants.ProblemType.CLASSIFICATION, "bogus output dict 2"),
"head-3": (constants.ProblemType.UNSPECIFIED, "bogus output dict 3"),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": "bogus_tensor"},
output_alternatives=provided_output_alternatives)
with self.assertRaises(ValueError) as e:
saved_model_export_utils.get_output_alternatives(model_fn_ops)
self.assertEqual("Please specify a default_output_alternative. Available "
"output_alternatives are: ['head-1', 'head-2', 'head-3']",
str(e.exception))
def test_get_output_alternatives_none_provided(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": prediction_tensor},
output_alternatives=None)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual({
"default_output_alternative": (constants.ProblemType.UNSPECIFIED, {
"some_output": prediction_tensor
})
}, output_alternatives)
def test_get_output_alternatives_empty_provided_with_default(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": prediction_tensor},
output_alternatives={})
with self.assertRaises(ValueError) as e:
saved_model_export_utils.get_output_alternatives(model_fn_ops, "WRONG")
self.assertEqual("Requested default_output_alternative: WRONG, but "
"available output_alternatives are: []", str(e.exception))
def test_get_output_alternatives_empty_provided_no_default(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": prediction_tensor},
output_alternatives={})
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual({
"default_output_alternative": (constants.ProblemType.UNSPECIFIED, {
"some_output": prediction_tensor
})
}, output_alternatives)
def test_get_output_alternatives_implicit_single(self):
prediction_tensor = constant_op.constant(["bogus"])
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions=prediction_tensor,
output_alternatives=None)
output_alternatives, _ = saved_model_export_utils.get_output_alternatives(
model_fn_ops)
self.assertEqual({
"default_output_alternative": (constants.ProblemType.UNSPECIFIED, {
"output": prediction_tensor
})
}, output_alternatives)
def test_build_all_signature_defs(self):
input_features = constant_op.constant(["10"])
input_example = constant_op.constant(["input string"])
input_ops = input_fn_utils.InputFnOps({
"features": input_features
}, None, {
"default input": input_example
})
input_alternatives, _ = (
saved_model_export_utils.get_input_alternatives(input_ops))
output_1 = constant_op.constant([1.0])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION, {
"some_output_1": output_1
}),
"head-2": (constants.ProblemType.CLASSIFICATION, {
"some_output_2": output_2
}),
"head-3": (constants.ProblemType.UNSPECIFIED, {
"some_output_3": output_3
}),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": constant_op.constant(["4"])},
output_alternatives=provided_output_alternatives)
output_alternatives, _ = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, "head-1"))
signature_defs = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives, "head-1")
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(
input_example, output_1),
"default_input_alternative:head-1":
signature_def_utils.regression_signature_def(
input_example, output_1),
"default_input_alternative:head-2":
signature_def_utils.classification_signature_def(
input_example, output_2, None),
"default_input_alternative:head-3":
signature_def_utils.predict_signature_def({
"default input": input_example
}, {
"some_output_3": output_3
}),
# "features_input_alternative:head-1":
# signature_def_utils.regression_signature_def(input_features,
# output_1),
# "features_input_alternative:head-2":
# signature_def_utils.classification_signature_def(input_features,
# output_2, None),
# "features_input_alternative:head-3":
# signature_def_utils.predict_signature_def({
# "input": input_features
# }, {"output": output_3}),
}
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_build_all_signature_defs_legacy_input_fn_not_supported(self):
"""Tests that legacy input_fn returning (features, labels) raises error.
serving_input_fn must return InputFnOps including a default input
alternative.
"""
input_features = constant_op.constant(["10"])
input_ops = ({"features": input_features}, None)
input_alternatives, _ = (
saved_model_export_utils.get_input_alternatives(input_ops))
output_1 = constant_op.constant(["1"])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
provided_output_alternatives = {
"head-1": (constants.ProblemType.LINEAR_REGRESSION, {
"some_output_1": output_1
}),
"head-2": (constants.ProblemType.CLASSIFICATION, {
"some_output_2": output_2
}),
"head-3": (constants.ProblemType.UNSPECIFIED, {
"some_output_3": output_3
}),
}
model_fn_ops = model_fn.ModelFnOps(
model_fn.ModeKeys.INFER,
predictions={"some_output": constant_op.constant(["4"])},
output_alternatives=provided_output_alternatives)
output_alternatives, _ = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, "head-1"))
with self.assertRaisesRegexp(
ValueError, "A default input_alternative must be provided"):
saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives, "head-1")
def test_get_timestamped_export_dir(self):
export_dir_base = tempfile.mkdtemp() + "export/"
export_dir_1 = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_2 = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_3 = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
# Export directories should be named using a timestamp that is seconds
# since epoch. Such a timestamp is 10 digits long.
time_1 = os.path.basename(export_dir_1)
self.assertEqual(10, len(time_1))
time_2 = os.path.basename(export_dir_2)
self.assertEqual(10, len(time_2))
time_3 = os.path.basename(export_dir_3)
self.assertEqual(10, len(time_3))
self.assertTrue(int(time_1) < int(time_2))
self.assertTrue(int(time_2) < int(time_3))
def test_garbage_collect_exports(self):
export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(export_dir_base)
export_dir_1 = _create_test_export_dir(export_dir_base)
export_dir_2 = _create_test_export_dir(export_dir_base)
export_dir_3 = _create_test_export_dir(export_dir_base)
export_dir_4 = _create_test_export_dir(export_dir_base)
self.assertTrue(gfile.Exists(export_dir_1))
self.assertTrue(gfile.Exists(export_dir_2))
self.assertTrue(gfile.Exists(export_dir_3))
self.assertTrue(gfile.Exists(export_dir_4))
# Garbage collect all but the most recent 2 exports,
# where recency is determined based on the timestamp directory names.
saved_model_export_utils.garbage_collect_exports(export_dir_base, 2)
self.assertFalse(gfile.Exists(export_dir_1))
self.assertFalse(gfile.Exists(export_dir_2))
self.assertTrue(gfile.Exists(export_dir_3))
self.assertTrue(gfile.Exists(export_dir_4))
def test_get_most_recent_export(self):
export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(export_dir_base)
_create_test_export_dir(export_dir_base)
_create_test_export_dir(export_dir_base)
_create_test_export_dir(export_dir_base)
export_dir_4 = _create_test_export_dir(export_dir_base)
(most_recent_export_dir, most_recent_export_version) = (
saved_model_export_utils.get_most_recent_export(export_dir_base))
self.assertEqual(
compat.as_bytes(export_dir_4), compat.as_bytes(most_recent_export_dir))
self.assertEqual(
compat.as_bytes(export_dir_4),
os.path.join(
compat.as_bytes(export_dir_base),
compat.as_bytes(str(most_recent_export_version))))
def test_make_export_strategy(self):
"""Only tests that an ExportStrategy instance is created."""
def _serving_input_fn():
return array_ops.constant([1]), None
export_strategy = saved_model_export_utils.make_export_strategy(
serving_input_fn=_serving_input_fn,
default_output_alternative_key="default",
assets_extra={"from/path": "to/path"},
as_text=False,
exports_to_keep=5)
self.assertTrue(
isinstance(export_strategy, export_strategy_lib.ExportStrategy))
def test_make_parsing_export_strategy(self):
"""Only tests that an ExportStrategy instance is created."""
sparse_col = fc.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = fc.embedding_column(
fc.sparse_column_with_hash_bucket(
"sparse_column_for_embedding", hash_bucket_size=10),
dimension=4)
real_valued_col1 = fc.real_valued_column("real_valued_column1")
bucketized_col1 = fc.bucketized_column(
fc.real_valued_column("real_valued_column_for_bucketization1"), [0, 4])
feature_columns = [
sparse_col, embedding_col, real_valued_col1, bucketized_col1
]
export_strategy = saved_model_export_utils.make_parsing_export_strategy(
feature_columns=feature_columns)
self.assertTrue(
isinstance(export_strategy, export_strategy_lib.ExportStrategy))
def test_make_best_model_export_strategy(self):
export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(export_dir_base)
test_estimator = TestEstimator()
export_strategy = saved_model_export_utils.make_best_model_export_strategy(
serving_input_fn=None, exports_to_keep=3, compare_fn=None)
self.assertNotEqual("",
export_strategy.export(test_estimator, export_dir_base,
"fake_ckpt_0", {
"loss": 100
}))
self.assertNotEqual("", test_estimator.last_exported_dir)
self.assertNotEqual("", test_estimator.last_exported_checkpoint)
self.assertEqual("",
export_strategy.export(test_estimator, export_dir_base,
"fake_ckpt_1", {
"loss": 101
}))
self.assertEqual(test_estimator.last_exported_dir,
os.path.join(export_dir_base, "fake_ckpt_0"))
self.assertNotEqual("",
export_strategy.export(test_estimator, export_dir_base,
"fake_ckpt_2", {
"loss": 10
}))
self.assertEqual(test_estimator.last_exported_dir,
os.path.join(export_dir_base, "fake_ckpt_2"))
self.assertEqual("",
export_strategy.export(test_estimator, export_dir_base,
"fake_ckpt_3", {
"loss": 20
}))
self.assertEqual(test_estimator.last_exported_dir,
os.path.join(export_dir_base, "fake_ckpt_2"))
def test_make_best_model_export_strategy_with_preemption(self):
model_dir = self.get_temp_dir()
eval_dir_base = os.path.join(model_dir, "eval_continuous")
core_estimator._write_dict_to_summary(eval_dir_base, {"loss": 50}, 1)
core_estimator._write_dict_to_summary(eval_dir_base, {"loss": 60}, 2)
test_estimator = TestEstimator()
export_strategy = saved_model_export_utils.make_best_model_export_strategy(
serving_input_fn=None,
exports_to_keep=3,
model_dir=model_dir,
event_file_pattern="eval_continuous/*.tfevents.*",
compare_fn=None)
export_dir_base = os.path.join(self.get_temp_dir(), "export")
self.assertEqual("",
export_strategy.export(test_estimator, export_dir_base,
"fake_ckpt_0", {
"loss": 100
}))
self.assertEqual("", test_estimator.last_exported_dir)
self.assertEqual("", test_estimator.last_exported_checkpoint)
self.assertNotEqual("",
export_strategy.export(test_estimator, export_dir_base,
"fake_ckpt_2", {
"loss": 10
}))
self.assertEqual(test_estimator.last_exported_dir,
os.path.join(export_dir_base, "fake_ckpt_2"))
self.assertEqual("",
export_strategy.export(test_estimator, export_dir_base,
"fake_ckpt_3", {
"loss": 20
}))
self.assertEqual(test_estimator.last_exported_dir,
os.path.join(export_dir_base, "fake_ckpt_2"))
def test_make_best_model_export_strategy_exceptions(self):
export_dir_base = tempfile.mkdtemp() + "export/"
test_estimator = TestEstimator()
export_strategy = saved_model_export_utils.make_best_model_export_strategy(
serving_input_fn=None, exports_to_keep=3, compare_fn=None)
with self.assertRaises(ValueError):
export_strategy.export(test_estimator, export_dir_base, "", {"loss": 200})
with self.assertRaises(ValueError):
export_strategy.export(test_estimator, export_dir_base, "fake_ckpt_1",
None)
def test_extend_export_strategy(self):
def _base_export_fn(unused_estimator,
export_dir_base,
unused_checkpoint_path=None):
base_path = os.path.join(export_dir_base, "e1")
gfile.MkDir(base_path)
return base_path
def _post_export_fn(orig_path, new_path):
assert orig_path.endswith("/e1")
post_export_path = os.path.join(new_path, "rewrite")
gfile.MkDir(post_export_path)
return post_export_path
base_export_strategy = export_strategy_lib.ExportStrategy(
"Servo", _base_export_fn)
final_export_strategy = saved_model_export_utils.extend_export_strategy(
base_export_strategy, _post_export_fn, "Servo2")
self.assertEqual(final_export_strategy.name, "Servo2")
test_estimator = TestEstimator()
tmpdir = tempfile.mkdtemp()
export_model_dir = os.path.join(tmpdir, "model")
checkpoint_path = os.path.join(tmpdir, "checkpoint")
final_path = final_export_strategy.export(test_estimator, export_model_dir,
checkpoint_path)
self.assertEqual(os.path.join(export_model_dir, "rewrite"), final_path)
def test_extend_export_strategy_same_name(self):
def _base_export_fn(unused_estimator,
export_dir_base,
unused_checkpoint_path=None):
base_path = os.path.join(export_dir_base, "e1")
gfile.MkDir(base_path)
return base_path
def _post_export_fn(orig_path, new_path):
assert orig_path.endswith("/e1")
post_export_path = os.path.join(new_path, "rewrite")
gfile.MkDir(post_export_path)
return post_export_path
base_export_strategy = export_strategy_lib.ExportStrategy(
"Servo", _base_export_fn)
final_export_strategy = saved_model_export_utils.extend_export_strategy(
base_export_strategy, _post_export_fn)
self.assertEqual(final_export_strategy.name, "Servo")
test_estimator = TestEstimator()
tmpdir = tempfile.mkdtemp()
export_model_dir = os.path.join(tmpdir, "model")
checkpoint_path = os.path.join(tmpdir, "checkpoint")
final_path = final_export_strategy.export(test_estimator, export_model_dir,
checkpoint_path)
self.assertEqual(os.path.join(export_model_dir, "rewrite"), final_path)
def test_extend_export_strategy_raises_error(self):
def _base_export_fn(unused_estimator,
export_dir_base,
unused_checkpoint_path=None):
base_path = os.path.join(export_dir_base, "e1")
gfile.MkDir(base_path)
return base_path
def _post_export_fn(unused_orig_path, unused_new_path):
return tempfile.mkdtemp()
base_export_strategy = export_strategy_lib.ExportStrategy(
"Servo", _base_export_fn)
final_export_strategy = saved_model_export_utils.extend_export_strategy(
base_export_strategy, _post_export_fn)
test_estimator = TestEstimator()
tmpdir = tempfile.mkdtemp()
with self.assertRaises(ValueError) as ve:
final_export_strategy.export(test_estimator, tmpdir,
os.path.join(tmpdir, "checkpoint"))
self.assertTrue(
"post_export_fn must return a sub-directory" in str(ve.exception))
def _create_test_export_dir(export_dir_base):
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
gfile.MkDir(export_dir)
time.sleep(2)
return export_dir
if __name__ == "__main__":
test.main()
| apache-2.0 |
abawchen/leetcode | solutions/077_combinations.py | 1 | 1172 | # Given two integers n and k, return all possible combinations of k numbers out of 1 ... n.
# For example,
# If n = 4 and k = 2, a solution is:
# [
# [2,4],
# [3,4],
# [2,3],
# [1,2],
# [1,3],
# [1,4],
# ]
class Solution:
# @param {integer} n
# @param {integer} k
# @return {integer[][]}
def combine(self, n, k):
if n == 0 or k == 0:
return [[]]
ans = []
nums = [ x+1 for x in range(n) ]
self._combine(ans, [], k, nums)
return ans
# stack = []
# nums = [ x+1 for x in range(n) ]
# for i in xrange(n-k+1):
# stack.append(([i+1], k-1, nums[i+1:]))
# ans = []
# while stack:
# c, k, r = stack.pop()
# if k == 0:
# ans.append(c)
# else:
# for i in xrange(len(r)-k+1):
# stack.append((c+[r[i]], k-1, r[i+1:]))
# return ans
def _combine(self, ans, tmp, k, nums):
if k == 0:
ans.append(tmp)
else:
for i in xrange(len(nums)-k+1):
self._combine(ans, tmp+[nums[i]], k-1, nums[i+1:])
| mit |
jpope777/searx | searx/engines/faroo.py | 6 | 3527 | """
Faroo (Web, News)
@website http://www.faroo.com
@provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
@using-api yes
@results JSON
@stable yes
@parse url, title, content, publishedDate, img_src
"""
from urllib import urlencode
from json import loads
import datetime
from searx.utils import searx_useragent
# engine dependent config
categories = ['general', 'news']
paging = True
language_support = True
number_of_results = 10
api_key = None
# search-url
url = 'http://www.faroo.com/'
search_url = url + 'api?{query}'\
'&start={offset}'\
'&length={number_of_results}'\
'&l={language}'\
'&src={categorie}'\
'&i=false'\
'&f=json'\
'&key={api_key}' # noqa
search_category = {'general': 'web',
'news': 'news'}
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * number_of_results + 1
categorie = search_category.get(params['category'], 'web')
if params['language'] == 'all':
language = 'en'
else:
language = params['language'].split('_')[0]
# if language is not supported, put it in english
if language != 'en' and\
language != 'de' and\
language != 'zh':
language = 'en'
params['url'] = search_url.format(offset=offset,
number_of_results=number_of_results,
query=urlencode({'q': query}),
language=language,
categorie=categorie,
api_key=api_key)
# using searx User-Agent
params['headers']['User-Agent'] = searx_useragent()
return params
# get response from search-request
def response(resp):
# HTTP-Code 401: api-key is not valide
if resp.status_code == 401:
raise Exception("API key is not valide")
# HTTP-Code 429: rate limit exceeded
if resp.status_code == 429:
raise Exception("rate limit has been exceeded!")
results = []
search_res = loads(resp.text)
# return empty array if there are no results
if not search_res.get('results', {}):
return []
# parse results
for result in search_res['results']:
if result['news']:
# timestamp (milliseconds since 1970)
publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0) # noqa
# append news result
results.append({'url': result['url'],
'title': result['title'],
'publishedDate': publishedDate,
'content': result['kwic']})
else:
# append general result
# TODO, publishedDate correct?
results.append({'url': result['url'],
'title': result['title'],
'content': result['kwic']})
# append image result if image url is set
# TODO, show results with an image like in faroo
if result['iurl']:
results.append({'template': 'images.html',
'url': result['url'],
'title': result['title'],
'content': result['kwic'],
'img_src': result['iurl']})
# return results
return results
| agpl-3.0 |
Parallel-in-Time/pySDC | pySDC/implementations/problem_classes/FullSolarSystem.py | 1 | 4225 | import numpy as np
from pySDC.core.Errors import ParameterError
from pySDC.implementations.datatype_classes.particles import particles, acceleration
from pySDC.implementations.problem_classes.OuterSolarSystem import outer_solar_system
# noinspection PyUnusedLocal
class full_solar_system(outer_solar_system):
"""
Example implementing the full solar system problem
"""
def __init__(self, problem_params, dtype_u=particles, dtype_f=acceleration):
"""
Initialization routine
Args:
problem_params (dict): custom parameters for the example
dtype_u: particle data type (will be passed to parent class)
dtype_f: acceleration data type (will be passed to parent class)
"""
if 'sun_only' not in problem_params:
problem_params['sun_only'] = False
# these parameters will be used later, so assert their existence
essential_keys = []
for key in essential_keys:
if key not in problem_params:
msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys()))
raise ParameterError(msg)
# invoke parant's class (!) super init, passing nparts, dtype_u and dtype_f
super(outer_solar_system, self).__init__(((3, 10), None, np.dtype('float64')), dtype_u, dtype_f, problem_params)
# gravitational constant
self.G = 2.95912208286E-4
def u_exact(self, t):
"""
Routine to compute the exact/initial trajectory at time t
Args:
t (float): current time
Returns:
dtype_u: exact/initial position and velocity
"""
assert t == 0.0, 'error, u_exact only works for the initial time t0=0'
me = self.dtype_u(self.init)
# initial positions and velocities taken from
# https://www.aanda.org/articles/aa/full/2002/08/aa1405/aa1405.right.html
me.pos[:, 0] = [0.0, 0.0, 0.0]
me.pos[:, 1] = [-2.503321047836E-01, +1.873217481656E-01, +1.260230112145E-01]
me.pos[:, 2] = [+1.747780055994E-02, -6.624210296743E-01, -2.991203277122E-01]
me.pos[:, 3] = [-9.091916173950E-01, +3.592925969244E-01, +1.557729610506E-01]
me.pos[:, 4] = [+1.203018828754E+00, +7.270712989688E-01, +3.009561427569E-01]
me.pos[:, 5] = [+3.733076999471E+00, +3.052424824299E+00, +1.217426663570E+00]
me.pos[:, 6] = [+6.164433062913E+00, +6.366775402981E+00, +2.364531109847E+00]
me.pos[:, 7] = [+1.457964661868E+01, -1.236891078519E+01, -5.623617280033E+00]
me.pos[:, 8] = [+1.695491139909E+01, -2.288713988623E+01, -9.789921035251E+00]
me.pos[:, 9] = [-9.707098450131E+00, -2.804098175319E+01, -5.823808919246E+00]
me.vel[:, 0] = [0.0, 0.0, 0.0]
me.vel[:, 1] = [-2.438808424736E-02, -1.850224608274E-02, -7.353811537540E-03]
me.vel[:, 2] = [+2.008547034175E-02, +8.365454832702E-04, -8.947888514893E-04]
me.vel[:, 3] = [-7.085843239142E-03, -1.455634327653E-02, -6.310912842359E-03]
me.vel[:, 4] = [-7.124453943885E-03, +1.166307407692E-02, +5.542098698449E-03]
me.vel[:, 5] = [-5.086540617947E-03, +5.493643783389E-03, +2.478685100749E-03]
me.vel[:, 6] = [-4.426823593779E-03, +3.394060157503E-03, +1.592261423092E-03]
me.vel[:, 7] = [+2.647505630327E-03, +2.487457379099E-03, +1.052000252243E-03]
me.vel[:, 8] = [+2.568651772461E-03, +1.681832388267E-03, +6.245613982833E-04]
me.vel[:, 9] = [+3.034112963576E-03, -1.111317562971E-03, -1.261841468083E-03]
# masses relative to the sun taken from
# https://en.wikipedia.org/wiki/Planetary_mass#Values_from_the_DE405_ephemeris
me.m[0] = 1.0 # Sun
me.m[1] = 0.1660100 * 1E-06 # Mercury
me.m[2] = 2.4478383 * 1E-06 # Venus
me.m[3] = 3.0404326 * 1E-06 # Earth+Moon
me.m[4] = 0.3227151 * 1E-06 # Mars
me.m[5] = 954.79194 * 1E-06 # Jupiter
me.m[6] = 285.88600 * 1E-06 # Saturn
me.m[7] = 43.662440 * 1E-06 # Uranus
me.m[8] = 51.513890 * 1E-06 # Neptune
me.m[9] = 0.0073960 * 1E-06 # Pluto
return me
| bsd-2-clause |
DenL/pogom-webhook | pogom/pgoapi/protos/POGOProtos/Enums/IapItemCategory_pb2.py | 16 | 2408 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Enums/IapItemCategory.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Enums/IapItemCategory.proto',
package='POGOProtos.Enums',
syntax='proto3',
serialized_pb=_b('\n&POGOProtos/Enums/IapItemCategory.proto\x12\x10POGOProtos.Enums*\x94\x01\n\x13HoloIapItemCategory\x12\x15\n\x11IAP_CATEGORY_NONE\x10\x00\x12\x17\n\x13IAP_CATEGORY_BUNDLE\x10\x01\x12\x16\n\x12IAP_CATEGORY_ITEMS\x10\x02\x12\x19\n\x15IAP_CATEGORY_UPGRADES\x10\x03\x12\x1a\n\x16IAP_CATEGORY_POKECOINS\x10\x04\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_HOLOIAPITEMCATEGORY = _descriptor.EnumDescriptor(
name='HoloIapItemCategory',
full_name='POGOProtos.Enums.HoloIapItemCategory',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_BUNDLE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_ITEMS', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_UPGRADES', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IAP_CATEGORY_POKECOINS', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=61,
serialized_end=209,
)
_sym_db.RegisterEnumDescriptor(_HOLOIAPITEMCATEGORY)
HoloIapItemCategory = enum_type_wrapper.EnumTypeWrapper(_HOLOIAPITEMCATEGORY)
IAP_CATEGORY_NONE = 0
IAP_CATEGORY_BUNDLE = 1
IAP_CATEGORY_ITEMS = 2
IAP_CATEGORY_UPGRADES = 3
IAP_CATEGORY_POKECOINS = 4
DESCRIPTOR.enum_types_by_name['HoloIapItemCategory'] = _HOLOIAPITEMCATEGORY
# @@protoc_insertion_point(module_scope)
| mit |
Gillu13/scipy | scipy/integrate/tests/test_banded_ode_solvers.py | 117 | 6863 |
from __future__ import division, print_function, absolute_import
import itertools
import numpy as np
from numpy.testing import run_module_suite, assert_allclose
from scipy.integrate import ode
def _band_count(a):
"""Returns ml and mu, the lower and upper band sizes of a."""
nrows, ncols = a.shape
ml = 0
for k in range(-nrows+1, 0):
if np.diag(a, k).any():
ml = -k
break
mu = 0
for k in range(nrows-1, 0, -1):
if np.diag(a, k).any():
mu = k
break
return ml, mu
def _linear_func(t, y, a):
"""Linear system dy/dt = a * y"""
return a.dot(y)
def _linear_jac(t, y, a):
"""Jacobian of a * y is a."""
return a
def _linear_banded_jac(t, y, a):
"""Banded Jacobian."""
ml, mu = _band_count(a)
bjac = []
for k in range(mu, 0, -1):
bjac.append(np.r_[[0] * k, np.diag(a, k)])
bjac.append(np.diag(a))
for k in range(-1, -ml-1, -1):
bjac.append(np.r_[np.diag(a, k), [0] * (-k)])
return bjac
def _solve_linear_sys(a, y0, tend=1, dt=0.1,
solver=None, method='bdf', use_jac=True,
with_jacobian=False, banded=False):
"""Use scipy.integrate.ode to solve a linear system of ODEs.
a : square ndarray
Matrix of the linear system to be solved.
y0 : ndarray
Initial condition
tend : float
Stop time.
dt : float
Step size of the output.
solver : str
If not None, this must be "vode", "lsoda" or "zvode".
method : str
Either "bdf" or "adams".
use_jac : bool
Determines if the jacobian function is passed to ode().
with_jacobian : bool
Passed to ode.set_integrator().
banded : bool
Determines whether a banded or full jacobian is used.
If `banded` is True, `lband` and `uband` are determined by the
values in `a`.
"""
if banded:
lband, uband = _band_count(a)
else:
lband = None
uband = None
if use_jac:
if banded:
r = ode(_linear_func, _linear_banded_jac)
else:
r = ode(_linear_func, _linear_jac)
else:
r = ode(_linear_func)
if solver is None:
if np.iscomplexobj(a):
solver = "zvode"
else:
solver = "vode"
r.set_integrator(solver,
with_jacobian=with_jacobian,
method=method,
lband=lband, uband=uband,
rtol=1e-9, atol=1e-10,
)
t0 = 0
r.set_initial_value(y0, t0)
r.set_f_params(a)
r.set_jac_params(a)
t = [t0]
y = [y0]
while r.successful() and r.t < tend:
r.integrate(r.t + dt)
t.append(r.t)
y.append(r.y)
t = np.array(t)
y = np.array(y)
return t, y
def _analytical_solution(a, y0, t):
"""
Analytical solution to the linear differential equations dy/dt = a*y.
The solution is only valid if `a` is diagonalizable.
Returns a 2-d array with shape (len(t), len(y0)).
"""
lam, v = np.linalg.eig(a)
c = np.linalg.solve(v, y0)
e = c * np.exp(lam * t.reshape(-1, 1))
sol = e.dot(v.T)
return sol
def test_banded_ode_solvers():
# Test the "lsoda", "vode" and "zvode" solvers of the `ode` class
# with a system that has a banded Jacobian matrix.
t_exact = np.linspace(0, 1.0, 5)
# --- Real arrays for testing the "lsoda" and "vode" solvers ---
# lband = 2, uband = 1:
a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0],
[0.2, -0.5, 0.9, 0.0, 0.0],
[0.1, 0.1, -0.4, 0.1, 0.0],
[0.0, 0.3, -0.1, -0.9, -0.3],
[0.0, 0.0, 0.1, 0.1, -0.7]])
# lband = 0, uband = 1:
a_real_upper = np.triu(a_real)
# lband = 2, uband = 0:
a_real_lower = np.tril(a_real)
# lband = 0, uband = 0:
a_real_diag = np.triu(a_real_lower)
real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag]
real_solutions = []
for a in real_matrices:
y0 = np.arange(1, a.shape[0] + 1)
y_exact = _analytical_solution(a, y0, t_exact)
real_solutions.append((y0, t_exact, y_exact))
def check_real(idx, solver, meth, use_jac, with_jac, banded):
a = real_matrices[idx]
y0, t_exact, y_exact = real_solutions[idx]
t, y = _solve_linear_sys(a, y0,
tend=t_exact[-1],
dt=t_exact[1] - t_exact[0],
solver=solver,
method=meth,
use_jac=use_jac,
with_jacobian=with_jac,
banded=banded)
assert_allclose(t, t_exact)
assert_allclose(y, y_exact)
for idx in range(len(real_matrices)):
p = [['vode', 'lsoda'], # solver
['bdf', 'adams'], # method
[False, True], # use_jac
[False, True], # with_jacobian
[False, True]] # banded
for solver, meth, use_jac, with_jac, banded in itertools.product(*p):
yield check_real, idx, solver, meth, use_jac, with_jac, banded
# --- Complex arrays for testing the "zvode" solver ---
# complex, lband = 2, uband = 1:
a_complex = a_real - 0.5j * a_real
# complex, lband = 0, uband = 0:
a_complex_diag = np.diag(np.diag(a_complex))
complex_matrices = [a_complex, a_complex_diag]
complex_solutions = []
for a in complex_matrices:
y0 = np.arange(1, a.shape[0] + 1) + 1j
y_exact = _analytical_solution(a, y0, t_exact)
complex_solutions.append((y0, t_exact, y_exact))
def check_complex(idx, solver, meth, use_jac, with_jac, banded):
a = complex_matrices[idx]
y0, t_exact, y_exact = complex_solutions[idx]
t, y = _solve_linear_sys(a, y0,
tend=t_exact[-1],
dt=t_exact[1] - t_exact[0],
solver=solver,
method=meth,
use_jac=use_jac,
with_jacobian=with_jac,
banded=banded)
assert_allclose(t, t_exact)
assert_allclose(y, y_exact)
for idx in range(len(complex_matrices)):
p = [['bdf', 'adams'], # method
[False, True], # use_jac
[False, True], # with_jacobian
[False, True]] # banded
for meth, use_jac, with_jac, banded in itertools.product(*p):
yield check_complex, idx, "zvode", meth, use_jac, with_jac, banded
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
redhat-openstack/nova | nova/tests/test_policy.py | 15 | 8862 | # Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Policy Engine For Nova."""
import os.path
import StringIO
import mock
import six.moves.urllib.request as urlrequest
from nova import context
from nova import exception
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova.tests import policy_fixture
from nova import utils
class PolicyFileTestCase(test.NoDBTestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.target = {}
def test_modified_policy_reloads(self):
with utils.tempdir() as tmpdir:
tmpfilename = os.path.join(tmpdir, 'policy')
self.flags(policy_file=tmpfilename)
# NOTE(uni): context construction invokes policy check to determin
# is_admin or not. As a side-effect, policy reset is needed here
# to flush existing policy cache.
policy.reset()
action = "example:test"
with open(tmpfilename, "w") as policyfile:
policyfile.write('{"example:test": ""}')
policy.enforce(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
policyfile.write('{"example:test": "!"}')
policy._ENFORCER.load_rules(True)
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
class PolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
rules = {
"true": '@',
"example:allowed": '@',
"example:denied": "!",
"example:get_http": "http://www.example.com",
"example:my_file": "role:compute_admin or "
"project_id:%(project_id)s",
"example:early_and_fail": "! and @",
"example:early_or_success": "@ or !",
"example:lowercase_admin": "role:admin or role:sysadmin",
"example:uppercase_admin": "role:ADMIN or role:sysadmin",
}
policy.reset()
policy.init()
policy.set_rules(dict((k, common_policy.parse_rule(v))
for k, v in rules.items()))
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.target = {}
def test_enforce_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_enforce_bad_action_throws(self):
action = "example:denied"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_enforce_bad_action_noraise(self):
action = "example:denied"
result = policy.enforce(self.context, action, self.target, False)
self.assertEqual(result, False)
def test_enforce_good_action(self):
action = "example:allowed"
result = policy.enforce(self.context, action, self.target)
self.assertEqual(result, True)
@mock.patch.object(urlrequest, 'urlopen',
return_value=StringIO.StringIO("True"))
def test_enforce_http_true(self, mock_urlrequest):
action = "example:get_http"
target = {}
result = policy.enforce(self.context, action, target)
self.assertEqual(result, True)
@mock.patch.object(urlrequest, 'urlopen',
return_value=StringIO.StringIO("False"))
def test_enforce_http_false(self, mock_urlrequest):
action = "example:get_http"
target = {}
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, target)
def test_templatized_enforcement(self):
target_mine = {'project_id': 'fake'}
target_not_mine = {'project_id': 'another'}
action = "example:my_file"
policy.enforce(self.context, action, target_mine)
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, target_not_mine)
def test_early_AND_enforcement(self):
action = "example:early_and_fail"
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
def test_early_OR_enforcement(self):
action = "example:early_or_success"
policy.enforce(self.context, action, self.target)
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_admin"
uppercase_action = "example:uppercase_admin"
# NOTE(dprince) we mix case in the Admin role here to ensure
# case is ignored
admin_context = context.RequestContext('admin',
'fake',
roles=['AdMiN'])
policy.enforce(admin_context, lowercase_action, self.target)
policy.enforce(admin_context, uppercase_action, self.target)
class DefaultPolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(DefaultPolicyTestCase, self).setUp()
self.rules = {
"default": '',
"example:exist": "!",
}
self._set_rules('default')
self.context = context.RequestContext('fake', 'fake')
def _set_rules(self, default_rule):
policy.reset()
rules = dict((k, common_policy.parse_rule(v))
for k, v in self.rules.items())
policy.init(rules=rules, default_rule=default_rule, use_conf=False)
def test_policy_called(self):
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:exist", {})
def test_not_found_policy_calls_default(self):
policy.enforce(self.context, "example:noexist", {})
def test_default_not_found(self):
self._set_rules("default_noexist")
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, "example:noexist", {})
class IsAdminCheckTestCase(test.NoDBTestCase):
def setUp(self):
super(IsAdminCheckTestCase, self).setUp()
policy.init()
def test_init_true(self):
check = policy.IsAdminCheck('is_admin', 'True')
self.assertEqual(check.kind, 'is_admin')
self.assertEqual(check.match, 'True')
self.assertEqual(check.expected, True)
def test_init_false(self):
check = policy.IsAdminCheck('is_admin', 'nottrue')
self.assertEqual(check.kind, 'is_admin')
self.assertEqual(check.match, 'False')
self.assertEqual(check.expected, False)
def test_call_true(self):
check = policy.IsAdminCheck('is_admin', 'True')
self.assertEqual(check('target', dict(is_admin=True),
policy._ENFORCER), True)
self.assertEqual(check('target', dict(is_admin=False),
policy._ENFORCER), False)
def test_call_false(self):
check = policy.IsAdminCheck('is_admin', 'False')
self.assertEqual(check('target', dict(is_admin=True),
policy._ENFORCER), False)
self.assertEqual(check('target', dict(is_admin=False),
policy._ENFORCER), True)
class AdminRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(AdminRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(policy_fixture.RoleBasedPolicyFixture())
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.actions = policy.get_rules().keys()
self.target = {}
def test_enforce_admin_actions_with_nonadmin_context_throws(self):
"""Check if non-admin context passed to admin actions throws
Policy not authorized exception
"""
for action in self.actions:
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
self.context, action, self.target)
| apache-2.0 |
psmit/kaldi-recipes | spraakbanken/local/spl.py | 2 | 2522 | import collections
import sys
class Spl(object):
def __init__(self, filename):
self._encoding = "ascii"
self._delimiter = ";"
self._f = {"system": {"ansi codepage": self._set_ansi_encoding,
"delimiter": self._set_delimiter},
"info states": self._add_info,
"record states": self._add_record,
"validation states": self._add_validation}
self._records = {}
self._validations = {}
self._infos = collections.OrderedDict()
self._parse(filename)
def _parse(self, filename):
try:
section = None
for line in open(filename, "rb").readlines():
l = line.decode(self._encoding).strip().strip("\0")
if len(l) < 2:
continue
if l.startswith('['):
section = l[1:-1]
else:
key, val = l.split("=", 1)
if key.isnumeric():
self._f.get(section.lower(), lambda y, z: 0)(int(key),val)
else:
if section.lower() in self._f:
s = self._f[section.lower()]
if key.lower() in s:
s[key.lower()](val)
except Exception as e:
print("Error when parsing {}: {}".format(filename, e), file=sys.stderr)
if len(self._records) != len(self._validations):
print("Filename: {} has {} records and {} validations".format(filename, len(self._records), len(self._validations)))
def _set_ansi_encoding(self, e):
self._encoding = "cp{}".format(e)
def _set_delimiter(self, d):
self._delimiter = d
def _add_record(self, i, r):
self._records[i] = r.split(self._delimiter)
def _add_info(self, _, info):
key, val = info.split(self._delimiter)[:2]
self._infos[key] = val
def _add_validation(self, i, v):
self._validations[i] = v.split(self._delimiter)
def records(self):
for key, val in self._records.items():
yield self._validations[key], val
def key_records(self):
for key, record in self._records.items():
if key not in self._validations:
continue
yield key, self._validations[key], record
if __name__ == "__main__":
s = Spl("test.spl")
print(list(s.records()))
print(s._validations) | apache-2.0 |
ppries/tensorflow | tensorflow/contrib/bayesflow/python/ops/monte_carlo.py | 8 | 11623 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Monte Carlo integration and helpers.
## Background
Monte Carlo integration refers to the practice of estimating an expectation with
a sample mean. For example, given random variable `Z in R^k` with density `p`,
the expectation of function `f` can be approximated like:
```
E_p[f(Z)] = \int f(z) p(z) dz
~ S_n
:= n^{-1} \sum_{i=1}^n f(z_i), z_i iid samples from p.
```
If `E_p[|f(Z)|] < infinity`, then `S_n --> E_p[f(Z)]` by the strong law of large
numbers. If `E_p[f(Z)^2] < infinity`, then `S_n` is asymptotically normal with
variance `Var[f(Z)] / n`.
Practitioners of Bayesian statistics often find themselves wanting to estimate
`E_p[f(Z)]` when the distribution `p` is known only up to a constant. For
example, the joint distribution `p(z, x)` may be known, but the evidence
`p(x) = \int p(z, x) dz` may be intractable. In that case, a parameterized
distribution family `q_lambda(z)` may be chosen, and the optimal `lambda` is the
one minimizing the KL divergence between `q_lambda(z)` and
`p(z | x)`. We only know `p(z, x)`, but that is sufficient to find `lambda`.
## Log-space evaluation and subtracting the maximum.
Care must be taken when the random variable lives in a high dimensional space.
For example, the naive importance sample estimate `E_q[f(Z) p(Z) / q(Z)]`
involves the ratio of two terms `p(Z) / q(Z)`, each of which must have tails
dropping off faster than `O(|z|^{-(k + 1)})` in order to have finite integral.
This ratio would often be zero or infinity up to numerical precision.
For that reason, we write
```
Log E_q[ f(Z) p(Z) / q(Z) ]
= Log E_q[ exp{Log[f(Z)] + Log[p(Z)] - Log[q(Z)] - C} ] + C, where
C := Max[ Log[f(Z)] + Log[p(Z)] - Log[q(Z)] ].
```
The maximum value of the exponentiated term will be 0.0, and the expectation
can be evaluated in a stable manner.
## Ops
@@expectation
@@expectation_importance_sampler
@@expectation_importance_sampler_logspace
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
__all__ = [
'expectation',
'expectation_importance_sampler',
'expectation_importance_sampler_logspace',
]
def expectation_importance_sampler(f,
log_p,
sampling_dist_q,
z=None,
n=None,
seed=None,
name='expectation_importance_sampler'):
r"""Monte Carlo estimate of `E_p[f(Z)] = E_q[f(Z) p(Z) / q(Z)]`.
With `p(z) := exp{log_p(z)}`, this `Op` returns
```
n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ], z_i ~ q,
\approx E_q[ f(Z) p(Z) / q(Z) ]
= E_p[f(Z)]
```
This integral is done in log-space with max-subtraction to better handle the
often extreme values that `f(z) p(z) / q(z)` can take on.
If `f >= 0`, it is up to 2x more efficient to exponentiate the result of
`expectation_importance_sampler_logspace` applied to `Log[f]`.
User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
f: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape
broadcastable to `q.batch_shape`.
For example, `f` works "just like" `q.log_prob`.
log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `sampling_dist_q.log_prob`.
sampling_dist_q: The sampling distribution.
`tf.contrib.distributions.Distribution`.
`float64` `dtype` recommended.
`log_p` and `q` should be supported on the same set.
z: `Tensor` of samples from `q`, produced by `q.sample_n`.
n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
The importance sampling estimate. `Tensor` with `shape` equal
to batch shape of `q`, and `dtype` = `q.dtype`.
"""
q = sampling_dist_q
with ops.name_scope(name, values=[z, n]):
z = _get_samples(q, z, n, seed)
log_p_z = log_p(z)
q_log_prob_z = q.log_prob(z)
def _importance_sampler_positive_f(log_f_z):
# Same as expectation_importance_sampler_logspace, but using Tensors
# rather than samples and functions. Allows us to sample once.
log_values = log_f_z + log_p_z - q_log_prob_z
return _logspace_mean(log_values)
# With f_plus(z) = max(0, f(z)), f_minus(z) = max(0, -f(z)),
# E_p[f(Z)] = E_p[f_plus(Z)] - E_p[f_minus(Z)]
# = E_p[f_plus(Z) + 1] - E_p[f_minus(Z) + 1]
# Without incurring bias, 1 is added to each to prevent zeros in logspace.
# The logarithm is approximately linear around 1 + epsilon, so this is good
# for small values of 'z' as well.
f_z = f(z)
log_f_plus_z = math_ops.log(nn.relu(f_z) + 1.)
log_f_minus_z = math_ops.log(nn.relu(-1. * f_z) + 1.)
log_f_plus_integral = _importance_sampler_positive_f(log_f_plus_z)
log_f_minus_integral = _importance_sampler_positive_f(log_f_minus_z)
return math_ops.exp(log_f_plus_integral) - math_ops.exp(log_f_minus_integral)
def expectation_importance_sampler_logspace(
log_f,
log_p,
sampling_dist_q,
z=None,
n=None,
seed=None,
name='expectation_importance_sampler_logspace'):
r"""Importance sampling with a positive function, in log-space.
With `p(z) := exp{log_p(z)}`, and `f(z) = exp{log_f(z)}`, this `Op`
returns
```
Log[ n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ] ], z_i ~ q,
\approx Log[ E_q[ f(Z) p(Z) / q(Z) ] ]
= Log[E_p[f(Z)]]
```
This integral is done in log-space with max-subtraction to better handle the
often extreme values that `f(z) p(z) / q(z)` can take on.
In contrast to `expectation_importance_sampler`, this `Op` returns values in
log-space.
User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
log_f: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_f` works "just like" `sampling_dist_q.log_prob`.
log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `q.log_prob`.
sampling_dist_q: The sampling distribution.
`tf.contrib.distributions.Distribution`.
`float64` `dtype` recommended.
`log_p` and `q` should be supported on the same set.
z: `Tensor` of samples from `q`, produced by `q.sample_n`.
n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
Logarithm of the importance sampling estimate. `Tensor` with `shape` equal
to batch shape of `q`, and `dtype` = `q.dtype`.
"""
q = sampling_dist_q
with ops.name_scope(name, values=[z, n]):
z = _get_samples(q, z, n, seed)
log_values = log_f(z) + log_p(z) - q.log_prob(z)
return _logspace_mean(log_values)
def _logspace_mean(log_values):
"""Evaluate `Log[E[values]]` in a stable manner.
Args:
log_values: `Tensor` holding `Log[values]`.
Returns:
`Tensor` of same `dtype` as `log_values`, reduced across dim 0.
`Log[Mean[values]]`.
"""
# center = Max[Log[values]], with stop-gradient
# The center hopefully keep the exponentiated term small. It is cancelled
# from the final result, so putting stop gradient on it will not change the
# final result. We put stop gradient on to eliminate unnecessary computation.
center = array_ops.stop_gradient(_sample_max(log_values))
# centered_values = exp{Log[values] - E[Log[values]]}
centered_values = math_ops.exp(log_values - center)
# log_mean_of_values = Log[ E[centered_values] ] + center
# = Log[ E[exp{log_values - E[log_values]}] ] + center
# = Log[E[values]] - E[log_values] + center
# = Log[E[values]]
log_mean_of_values = math_ops.log(_sample_mean(centered_values)) + center
return log_mean_of_values
def expectation(f, p, z=None, n=None, seed=None, name='expectation'):
r"""Monte Carlo estimate of an expectation: `E_p[f(Z)]` with sample mean.
This `Op` returns
```
n^{-1} sum_{i=1}^n f(z_i), where z_i ~ p
\approx E_p[f(Z)]
```
User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
f: Callable mapping samples from `p` to `Tensors`.
p: `tf.contrib.distributions.Distribution`.
z: `Tensor` of samples from `p`, produced by `p.sample_n`.
n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
A `Tensor` with the same `dtype` as `p`.
Example:
```python
N_samples = 10000
distributions = tf.contrib.distributions
dist = distributions.Uniform([0.0, 0.0], [1.0, 2.0])
elementwise_mean = lambda x: x
mean_sum = lambda x: tf.reduce_sum(x, 1)
estimate_elementwise_mean_tf = monte_carlo.expectation(elementwise_mean,
dist,
n=N_samples)
estimate_mean_sum_tf = monte_carlo.expectation(mean_sum,
dist,
n=N_samples)
with tf.Session() as sess:
estimate_elementwise_mean, estimate_mean_sum = (
sess.run([estimate_elementwise_mean_tf, estimate_mean_sum_tf]))
print estimate_elementwise_mean
>>> np.array([ 0.50018013 1.00097895], dtype=np.float32)
print estimate_mean_sum
>>> 1.49571
```
"""
with ops.name_scope(name, values=[n, z]):
z = _get_samples(p, z, n, seed)
return _sample_mean(f(z))
def _sample_mean(values):
"""Mean over sample indices. In this module this is always [0]."""
return math_ops.reduce_mean(values, reduction_indices=[0])
def _sample_max(values):
"""Max over sample indices. In this module this is always [0]."""
return math_ops.reduce_max(values, reduction_indices=[0])
def _get_samples(dist, z, n, seed):
"""Check args and return samples."""
with ops.name_scope('get_samples', values=[z, n]):
if (n is None) == (z is None):
raise ValueError(
'Must specify exactly one of arguments "n" and "z". Found: '
'n = %s, z = %s' % (n, z))
if n is not None:
return dist.sample_n(n=n, seed=seed)
else:
return ops.convert_to_tensor(z, name='z')
| apache-2.0 |
yask123/django | tests/template_tests/test_engine.py | 199 | 3971 | import os
from django.template import Context
from django.template.engine import Engine
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango110Warning
from .utils import ROOT, TEMPLATE_DIR
OTHER_DIR = os.path.join(ROOT, 'other_templates')
@ignore_warnings(category=RemovedInDjango110Warning)
class DeprecatedRenderToStringTest(SimpleTestCase):
def setUp(self):
self.engine = Engine(
dirs=[TEMPLATE_DIR],
libraries={'custom': 'template_tests.templatetags.custom'},
)
def test_basic_context(self):
self.assertEqual(
self.engine.render_to_string('test_context.html', {'obj': 'test'}),
'obj:test\n',
)
def test_existing_context_kept_clean(self):
context = Context({'obj': 'before'})
output = self.engine.render_to_string(
'test_context.html', {'obj': 'after'}, context_instance=context,
)
self.assertEqual(output, 'obj:after\n')
self.assertEqual(context['obj'], 'before')
def test_no_empty_dict_pushed_to_stack(self):
"""
#21741 -- An empty dict should not be pushed to the context stack when
render_to_string is called without a context argument.
"""
# The stack should have a length of 1, corresponding to the builtins
self.assertEqual(
'1',
self.engine.render_to_string('test_context_stack.html').strip(),
)
self.assertEqual(
'1',
self.engine.render_to_string(
'test_context_stack.html',
context_instance=Context()
).strip(),
)
class LoaderTests(SimpleTestCase):
def test_origin(self):
engine = Engine(dirs=[TEMPLATE_DIR], debug=True)
template = engine.get_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
def test_loader_priority(self):
"""
#21460 -- Check that the order of template loader works.
"""
loaders = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders)
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
def test_cached_loader_priority(self):
"""
Check that the order of template loader works. Refs #21460.
"""
loaders = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders)
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
@ignore_warnings(category=RemovedInDjango110Warning)
class TemplateDirsOverrideTests(SimpleTestCase):
DIRS = ((OTHER_DIR, ), [OTHER_DIR])
def setUp(self):
self.engine = Engine()
def test_render_to_string(self):
for dirs in self.DIRS:
self.assertEqual(
self.engine.render_to_string('test_dirs.html', dirs=dirs),
'spam eggs\n',
)
def test_get_template(self):
for dirs in self.DIRS:
template = self.engine.get_template('test_dirs.html', dirs=dirs)
self.assertEqual(template.render(Context()), 'spam eggs\n')
def test_select_template(self):
for dirs in self.DIRS:
template = self.engine.select_template(['test_dirs.html'], dirs=dirs)
self.assertEqual(template.render(Context()), 'spam eggs\n')
| bsd-3-clause |
blaggacao/odoo | addons/purchase/wizard/purchase_order_group.py | 376 | 3379 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class purchase_order_group(osv.osv_memory):
_name = "purchase.order.group"
_description = "Purchase Order Merge"
def fields_view_get(self, cr, uid, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
"""
Changes the view dynamically
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: New arch of view.
"""
if context is None:
context={}
res = super(purchase_order_group, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
if context.get('active_model','') == 'purchase.order' and len(context['active_ids']) < 2:
raise osv.except_osv(_('Warning!'),
_('Please select multiple order to merge in the list view.'))
return res
def merge_orders(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: purchase order view
"""
order_obj = self.pool.get('purchase.order')
proc_obj = self.pool.get('procurement.order')
mod_obj =self.pool.get('ir.model.data')
if context is None:
context = {}
result = mod_obj._get_id(cr, uid, 'purchase', 'view_purchase_order_filter')
id = mod_obj.read(cr, uid, result, ['res_id'])
allorders = order_obj.do_merge(cr, uid, context.get('active_ids',[]), context)
return {
'domain': "[('id','in', [" + ','.join(map(str, allorders.keys())) + "])]",
'name': _('Purchase Orders'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'purchase.order',
'view_id': False,
'type': 'ir.actions.act_window',
'search_view_id': id['res_id']
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hastexo/edx-platform | lms/djangoapps/commerce/constants.py | 21 | 1131 | """ Constants for this app as well as the external API. """
class OrderStatus(object):
"""Constants representing all known order statuses. """
OPEN = 'Open'
FULFILLMENT_ERROR = 'Fulfillment Error'
COMPLETE = 'Complete'
class Messages(object):
""" Strings used to populate response messages. """
NO_ECOM_API = u'E-Commerce API not setup. Enrolled {username} in {course_id} directly.'
NO_SKU_ENROLLED = u'The {enrollment_mode} mode for {course_id} does not have a SKU. Enrolling {username} directly.'
ENROLL_DIRECTLY = u'Enroll {username} in {course_id} directly because no need for E-Commerce baskets and orders.'
ORDER_COMPLETED = u'Order {order_number} was completed.'
ORDER_INCOMPLETE_ENROLLED = u'Order {order_number} was created, but is not yet complete. User was enrolled.'
NO_HONOR_MODE = u'Course {course_id} does not have an honor mode.'
NO_DEFAULT_ENROLLMENT_MODE = u'Course {course_id} does not have an honor or audit mode.'
ENROLLMENT_EXISTS = u'User {username} is already enrolled in {course_id}.'
ENROLLMENT_CLOSED = u'Enrollment is closed for {course_id}.'
| agpl-3.0 |
prune998/ansible | lib/ansible/modules/network/junos/junos_package.py | 18 | 6495 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: junos_package
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Installs packages on remote devices running Junos
description:
- This module can install new and updated packages on remote
devices running Junos. The module will compare the specified
package with the one running on the remote device and install
the specified version if there is a mismatch
extends_documentation_fragment: junos
options:
src:
description:
- The I(src) argument specifies the path to the source package to be
installed on the remote device in the advent of a version mismatch.
The I(src) argument can be either a localized path or a full
path to the package file to install.
required: true
default: null
aliases: ['package']
version:
description:
- The I(version) argument can be used to explicitly specify the
version of the package that should be installed on the remote
device. If the I(version) argument is not specified, then
the version is extracts from the I(src) filename.
required: false
default: null
reboot:
description:
- In order for a package to take effect, the remote device must be
restarted. When enabled, this argument will instruct the module
to reboot the device once the updated package has been installed.
If disabled or the remote package does not need to be changed,
the device will not be started.
required: true
default: true
choices: ['true', 'false']
no_copy:
description:
- The I(no_copy) argument is responsible for instructing the remote
device on where to install the package from. When enabled, the
package is transferred to the remote device prior to installing.
required: false
default: false
choices: ['true', 'false']
force:
description:
- The I(force) argument instructs the module to bypass the package
version check and install the packaged identified in I(src) on
the remote device.
required: true
default: false
choices: ['true', 'false']
requirements:
- junos-eznc
notes:
- This module requires the netconf system service be enabled on
the remote device being managed
"""
EXAMPLES = """
# the required set of connection arguments have been purposely left off
# the examples for brevity
- name: install local package on remote device
junos_package:
src: junos-vsrx-12.1X46-D10.2-domestic.tgz
- name: install local package on remote device without rebooting
junos_package:
src: junos-vsrx-12.1X46-D10.2-domestic.tgz
reboot: no
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.junos import junos_argument_spec
try:
from jnpr.junos import Device
from jnpr.junos.utils.sw import SW
from jnpr.junos.exception import ConnectError
HAS_PYEZ = True
except ImportError:
HAS_PYEZ = False
get_param = lambda x, y: x.params[y] or x.params['provider'].get(y)
def connect(module):
host = get_param(module, 'host')
kwargs = {
'port': get_param(module, 'port') or 830,
'user': get_param(module, 'username')
}
if get_param(module, 'password'):
kwargs['passwd'] = get_param(module, 'password')
if get_param(module, 'ssh_keyfile'):
kwargs['ssh_private_key_file'] = get_param(module, 'ssh_keyfile')
kwargs['gather_facts'] = False
try:
device = Device(host, **kwargs)
device.open()
device.timeout = get_param(module, 'timeout') or 10
except ConnectError:
exc = get_exception()
self.raise_exc('unable to connect to %s: %s' % (host, str(exc)))
return device
def install_package(module, device):
junos = SW(device)
package = module.params['src']
no_copy = module.params['no_copy']
progress_log = lambda x, y: module.log(y)
module.log('installing package')
result = junos.install(package, progress=progress_log, no_copy=no_copy)
if not result:
module.fail_json(msg='Unable to install package on device')
if module.params['reboot']:
module.log('rebooting system')
junos.reboot()
def main():
""" Main entry point for Ansible module execution
"""
argument_spec = dict(
src=dict(type='path', required=True, aliases=['package']),
version=dict(),
reboot=dict(type='bool', default=True),
no_copy=dict(default=False, type='bool'),
force=dict(type='bool', default=False),
transport=dict(default='netconf', choices=['netconf'])
)
argument_spec.update(junos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if module.params['provider'] is None:
module.params['provider'] = {}
if not HAS_PYEZ:
module.fail_json(
msg='junos-eznc is required but does not appear to be installed. '
'It can be installed using `pip install junos-eznc`'
)
result = dict(changed=False)
do_upgrade = module.params['force'] or False
device = connect(module)
if not module.params['force']:
facts = device.facts_refresh()
has_ver = device.facts.get('version')
wants_ver = module.params['version']
do_upgrade = has_ver != wants_ver
if do_upgrade:
if not module.check_mode:
install_package(module, device)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
mavit/ansible | test/units/module_utils/basic/test_filesystem.py | 53 | 5198 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from units.mock.procenv import ModuleTestCase
from ansible.compat.tests.mock import patch, MagicMock
from ansible.module_utils.six.moves import builtins
realimport = builtins.__import__
class TestOtherFilesystem(ModuleTestCase):
def test_module_utils_basic_ansible_module_user_and_group(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
mock_stat = MagicMock()
mock_stat.st_uid = 0
mock_stat.st_gid = 0
with patch('os.lstat', return_value=mock_stat):
self.assertEqual(am.user_and_group('/path/to/file'), (0, 0))
def test_module_utils_basic_ansible_module_find_mount_point(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
def _mock_ismount(path):
if path == b'/':
return True
return False
with patch('os.path.ismount', side_effect=_mock_ismount):
self.assertEqual(am.find_mount_point('/root/fs/../mounted/path/to/whatever'), '/')
def _mock_ismount(path):
if path == b'/subdir/mount':
return True
if path == b'/':
return True
return False
with patch('os.path.ismount', side_effect=_mock_ismount):
self.assertEqual(am.find_mount_point('/subdir/mount/path/to/whatever'), '/subdir/mount')
def test_module_utils_basic_ansible_module_set_owner_if_different(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
self.assertEqual(am.set_owner_if_different('/path/to/file', None, True), True)
self.assertEqual(am.set_owner_if_different('/path/to/file', None, False), False)
am.user_and_group = MagicMock(return_value=(500, 500))
with patch('os.lchown', return_value=None) as m:
self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True)
m.assert_called_with(b'/path/to/file', 0, -1)
def _mock_getpwnam(*args, **kwargs):
mock_pw = MagicMock()
mock_pw.pw_uid = 0
return mock_pw
m.reset_mock()
with patch('pwd.getpwnam', side_effect=_mock_getpwnam):
self.assertEqual(am.set_owner_if_different('/path/to/file', 'root', False), True)
m.assert_called_with(b'/path/to/file', 0, -1)
with patch('pwd.getpwnam', side_effect=KeyError):
self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False)
m.reset_mock()
am.check_mode = True
self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True)
self.assertEqual(m.called, False)
am.check_mode = False
with patch('os.lchown', side_effect=OSError) as m:
self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False)
def test_module_utils_basic_ansible_module_set_group_if_different(self):
from ansible.module_utils import basic
basic._ANSIBLE_ARGS = None
am = basic.AnsibleModule(
argument_spec=dict(),
)
self.assertEqual(am.set_group_if_different('/path/to/file', None, True), True)
self.assertEqual(am.set_group_if_different('/path/to/file', None, False), False)
am.user_and_group = MagicMock(return_value=(500, 500))
with patch('os.lchown', return_value=None) as m:
self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True)
m.assert_called_with(b'/path/to/file', -1, 0)
def _mock_getgrnam(*args, **kwargs):
mock_gr = MagicMock()
mock_gr.gr_gid = 0
return mock_gr
m.reset_mock()
with patch('grp.getgrnam', side_effect=_mock_getgrnam):
self.assertEqual(am.set_group_if_different('/path/to/file', 'root', False), True)
m.assert_called_with(b'/path/to/file', -1, 0)
with patch('grp.getgrnam', side_effect=KeyError):
self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False)
m.reset_mock()
am.check_mode = True
self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True)
self.assertEqual(m.called, False)
am.check_mode = False
with patch('os.lchown', side_effect=OSError) as m:
self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False)
| gpl-3.0 |
sss/calibre-at-bzr | src/calibre/ebooks/conversion/plugins/html_input.py | 2 | 11835 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re, tempfile, os
from functools import partial
from itertools import izip
from calibre.constants import islinux, isbsd
from calibre.customize.conversion import (InputFormatPlugin,
OptionRecommendation)
from calibre.utils.localization import get_lang
from calibre.utils.filenames import ascii_filename
from calibre.utils.imghdr import what
class HTMLInput(InputFormatPlugin):
name = 'HTML Input'
author = 'Kovid Goyal'
description = 'Convert HTML and OPF files to an OEB'
file_types = set(['opf', 'html', 'htm', 'xhtml', 'xhtm', 'shtm', 'shtml'])
options = set([
OptionRecommendation(name='breadth_first',
recommended_value=False, level=OptionRecommendation.LOW,
help=_('Traverse links in HTML files breadth first. Normally, '
'they are traversed depth first.'
)
),
OptionRecommendation(name='max_levels',
recommended_value=5, level=OptionRecommendation.LOW,
help=_('Maximum levels of recursion when following links in '
'HTML files. Must be non-negative. 0 implies that no '
'links in the root HTML file are followed. Default is '
'%default.'
)
),
OptionRecommendation(name='dont_package',
recommended_value=False, level=OptionRecommendation.LOW,
help=_('Normally this input plugin re-arranges all the input '
'files into a standard folder hierarchy. Only use this option '
'if you know what you are doing as it can result in various '
'nasty side effects in the rest of the conversion pipeline.'
)
),
])
def convert(self, stream, opts, file_ext, log,
accelerators):
self._is_case_sensitive = None
basedir = os.getcwdu()
self.opts = opts
fname = None
if hasattr(stream, 'name'):
basedir = os.path.dirname(stream.name)
fname = os.path.basename(stream.name)
if file_ext != 'opf':
if opts.dont_package:
raise ValueError('The --dont-package option is not supported for an HTML input file')
from calibre.ebooks.metadata.html import get_metadata
mi = get_metadata(stream)
if fname:
from calibre.ebooks.metadata.meta import metadata_from_filename
fmi = metadata_from_filename(fname)
fmi.smart_update(mi)
mi = fmi
oeb = self.create_oebbook(stream.name, basedir, opts, log, mi)
return oeb
from calibre.ebooks.conversion.plumber import create_oebbook
return create_oebbook(log, stream.name, opts,
encoding=opts.input_encoding)
def is_case_sensitive(self, path):
if getattr(self, '_is_case_sensitive', None) is not None:
return self._is_case_sensitive
if not path or not os.path.exists(path):
return islinux or isbsd
self._is_case_sensitive = not (os.path.exists(path.lower()) \
and os.path.exists(path.upper()))
return self._is_case_sensitive
def create_oebbook(self, htmlpath, basedir, opts, log, mi):
import uuid
from calibre.ebooks.conversion.plumber import create_oebbook
from calibre.ebooks.oeb.base import (DirContainer,
rewrite_links, urlnormalize, urldefrag, BINARY_MIME, OEB_STYLES,
xpath)
from calibre import guess_type
from calibre.ebooks.oeb.transforms.metadata import \
meta_info_to_oeb_metadata
from calibre.ebooks.html.input import get_filelist
import cssutils, logging
cssutils.log.setLevel(logging.WARN)
self.OEB_STYLES = OEB_STYLES
oeb = create_oebbook(log, None, opts, self,
encoding=opts.input_encoding, populate=False)
self.oeb = oeb
metadata = oeb.metadata
meta_info_to_oeb_metadata(mi, metadata, log)
if not metadata.language:
oeb.logger.warn(u'Language not specified')
metadata.add('language', get_lang().replace('_', '-'))
if not metadata.creator:
oeb.logger.warn('Creator not specified')
metadata.add('creator', self.oeb.translate(__('Unknown')))
if not metadata.title:
oeb.logger.warn('Title not specified')
metadata.add('title', self.oeb.translate(__('Unknown')))
bookid = str(uuid.uuid4())
metadata.add('identifier', bookid, id='uuid_id', scheme='uuid')
for ident in metadata.identifier:
if 'id' in ident.attrib:
self.oeb.uid = metadata.identifier[0]
break
filelist = get_filelist(htmlpath, basedir, opts, log)
filelist = [f for f in filelist if not f.is_binary]
htmlfile_map = {}
for f in filelist:
path = f.path
oeb.container = DirContainer(os.path.dirname(path), log,
ignore_opf=True)
bname = os.path.basename(path)
id, href = oeb.manifest.generate(id='html',
href=ascii_filename(bname))
htmlfile_map[path] = href
item = oeb.manifest.add(id, href, 'text/html')
item.html_input_href = bname
oeb.spine.add(item, True)
self.added_resources = {}
self.log = log
self.log('Normalizing filename cases')
for path, href in htmlfile_map.items():
if not self.is_case_sensitive(path):
path = path.lower()
self.added_resources[path] = href
self.urlnormalize, self.DirContainer = urlnormalize, DirContainer
self.urldefrag = urldefrag
self.guess_type, self.BINARY_MIME = guess_type, BINARY_MIME
self.log('Rewriting HTML links')
for f in filelist:
path = f.path
dpath = os.path.dirname(path)
oeb.container = DirContainer(dpath, log, ignore_opf=True)
item = oeb.manifest.hrefs[htmlfile_map[path]]
rewrite_links(item.data, partial(self.resource_adder, base=dpath))
for item in oeb.manifest.values():
if item.media_type in self.OEB_STYLES:
dpath = None
for path, href in self.added_resources.items():
if href == item.href:
dpath = os.path.dirname(path)
break
cssutils.replaceUrls(item.data,
partial(self.resource_adder, base=dpath))
toc = self.oeb.toc
self.oeb.auto_generated_toc = True
titles = []
headers = []
for item in self.oeb.spine:
if not item.linear: continue
html = item.data
title = ''.join(xpath(html, '/h:html/h:head/h:title/text()'))
title = re.sub(r'\s+', ' ', title.strip())
if title:
titles.append(title)
headers.append('(unlabled)')
for tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'strong'):
expr = '/h:html/h:body//h:%s[position()=1]/text()'
header = ''.join(xpath(html, expr % tag))
header = re.sub(r'\s+', ' ', header.strip())
if header:
headers[-1] = header
break
use = titles
if len(titles) > len(set(titles)):
use = headers
for title, item in izip(use, self.oeb.spine):
if not item.linear: continue
toc.add(title, item.href)
oeb.container = DirContainer(os.getcwdu(), oeb.log, ignore_opf=True)
return oeb
def link_to_local_path(self, link_, base=None):
from calibre.ebooks.html.input import Link
if not isinstance(link_, unicode):
try:
link_ = link_.decode('utf-8', 'error')
except:
self.log.warn('Failed to decode link %r. Ignoring'%link_)
return None, None
try:
l = Link(link_, base if base else os.getcwdu())
except:
self.log.exception('Failed to process link: %r'%link_)
return None, None
if l.path is None:
# Not a local resource
return None, None
link = l.path.replace('/', os.sep).strip()
frag = l.fragment
if not link:
return None, None
return link, frag
def resource_adder(self, link_, base=None):
from urllib import quote
link, frag = self.link_to_local_path(link_, base=base)
if link is None:
return link_
try:
if base and not os.path.isabs(link):
link = os.path.join(base, link)
link = os.path.abspath(link)
except:
return link_
if not os.access(link, os.R_OK):
return link_
if os.path.isdir(link):
self.log.warn(link_, 'is a link to a directory. Ignoring.')
return link_
if not self.is_case_sensitive(tempfile.gettempdir()):
link = link.lower()
if link not in self.added_resources:
bhref = os.path.basename(link)
id, href = self.oeb.manifest.generate(id='added',
href=bhref)
guessed = self.guess_type(href)[0]
media_type = guessed or self.BINARY_MIME
if media_type == 'text/plain':
self.log.warn('Ignoring link to text file %r'%link_)
return None
if media_type == self.BINARY_MIME:
# Check for the common case, images
try:
img = what(link)
except EnvironmentError:
pass
else:
if img:
media_type = self.guess_type('dummy.'+img)[0] or self.BINARY_MIME
self.oeb.log.debug('Added', link)
self.oeb.container = self.DirContainer(os.path.dirname(link),
self.oeb.log, ignore_opf=True)
# Load into memory
item = self.oeb.manifest.add(id, href, media_type)
# bhref refers to an already existing file. The read() method of
# DirContainer will call unquote on it before trying to read the
# file, therefore we quote it here.
if isinstance(bhref, unicode):
bhref = bhref.encode('utf-8')
item.html_input_href = quote(bhref).decode('utf-8')
if guessed in self.OEB_STYLES:
item.override_css_fetch = partial(
self.css_import_handler, os.path.dirname(link))
item.data
self.added_resources[link] = href
nlink = self.added_resources[link]
if frag:
nlink = '#'.join((nlink, frag))
return nlink
def css_import_handler(self, base, href):
link, frag = self.link_to_local_path(href, base=base)
if link is None or not os.access(link, os.R_OK) or os.path.isdir(link):
return (None, None)
try:
raw = open(link, 'rb').read().decode('utf-8', 'replace')
raw = self.oeb.css_preprocessor(raw, add_namespace=True)
except:
self.log.exception('Failed to read CSS file: %r'%link)
return (None, None)
return (None, raw)
| gpl-3.0 |
sadikovi/octohaven | src/scheduler/tscheduler.py | 1 | 6483 | #!/usr/bin/env python
#
# Copyright 2015 sadikovi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import src.utils as utils
from flask.ext.sqlalchemy import SignallingSession
from threading import Timer, Lock
from src.loggable import Loggable
from src.timetable import Timetable
from src.octohaven import db
# Minimal interval in seconds for timetable scheduling.
# It does not make sense keep it less than 1 minute
MINIMAL_INTERVAL = 60.0
# Pool lock for updates
pool_lock = Lock()
@utils.private
def action(runner):
if not runner:
raise RuntimeError("Runner is undefined")
uid = runner.uid
interval = runner.interval
# Beginning of the processing of runner, used to correct next interval, and check against
# cron expression, it is a beginning of the periodic operation
begin = utils.currentTimeMillis()
# Session per thread, have to close it at the end of the procedure
session = SignallingSession(db)
try:
runner.logger.info("Start inspecting runner '%s' with interval %s", uid, interval)
timetable = Timetable.get(session, uid)
# Check if timetable is active, if not we skip update, otherwise match date with cron
if timetable and timetable.status == Timetable.ACTIVE:
runner.logger.info("Runner '%s' - timetable is active", uid)
matched = timetable.cronExpression().ismatch(begin)
if matched:
# create new job as a copy of job used to create timetable, and also add to
# timetable statistics, note that we ignore delay, and update name of the job
runner.logger.debug("Runner '%s' preparing to launch new job", uid)
copy = Timetable.registerNewJob(session, timetable)
runner.logger.info("Runner '%s' launched new job '%s' (%s)", uid, copy.name,
copy.uid)
else:
runner.logger.debug("Runner '%s' skipped update, cron match is False", uid)
elif timetable and timetable.status == Timetable.PAUSED:
runner.logger.info("Runner '%s' - timetable is paused", uid)
# commit all changes made
session.commit()
except Exception as e:
runner.logger.error("Runner '%s' failed to launch", uid)
runner.logger.exception(e.message)
finally:
# Close session after thread is complete
session.close()
# Create timer for a subsequent lookup, if runner is still active
if runner.enabled:
# compute left seconds for next launch
secondsElapsed = (begin / 1000) % MINIMAL_INTERVAL
correctedInterval = MINIMAL_INTERVAL - secondsElapsed
# Spawning another thread with updated interval
timer = Timer(correctedInterval, action, [runner])
timer.daemon = True
timer.start()
runner.logger.debug("Runner '%s' spawned another thread", uid)
runner.logger.debug("Runner '%s' uses updated interval '%.3f' <= (%.3f)",
uid, correctedInterval, secondsElapsed)
else:
runner.logger.debug("Runner '%s' has been disabled", uid)
runner.logger.debug("Runner '%s' requested clean up", uid)
runner = None
# Unit of execution, assigned to a particular timetable
class TimetableRunner(Loggable, object):
def __init__(self, uid, interval):
super(TimetableRunner, self).__init__()
self.uid = uid
self.interval = interval
self.enabled = False
# Start after initialization
self.start()
def start(self):
if self.enabled:
self.logger.warn("Runner '%s' already running", self.uid)
return None
self.enabled = True
action(self)
def stop(self):
self.enabled = False
# Timetable scheduler, once started, fetches all non-cancelled timetables and launches processes for
# every one of them with 60 seconds interval. If timetable is paused thread is not killed and keeps
# running, though it stops lauching jobs. Once timetable is cancelled it is updated and removed from
# the pool. Once new timetable is created, it is registered in the scheduler pool.
scheduler = Loggable("timetable-scheduler")
scheduler.pool = {}
# Add new timetable and register new runner for the pool
@utils.private
def addToPool(uid):
try:
pool_lock.acquire()
if uid and uid not in scheduler.pool:
scheduler.pool[uid] = TimetableRunner(uid, MINIMAL_INTERVAL)
scheduler.logger.info("Launched runner '%s'", uid)
elif uid and uid in scheduler.pool:
scheduler.logger.warn("Attempt to launch already added runner '%s', skipped", uid)
else:
scheduler.logger.error("Invalid uid '%s', runner could not be launched" % uid)
finally:
pool_lock.release()
# Remove cancelled runner from the pool to clean it up
@utils.private
def removeFromPool(uid):
try:
pool_lock.acquire()
if uid not in scheduler.pool:
scheduler.logger.warn("Requested to remove non-existent runner '%s'", uid)
else:
scheduler.pool[uid].stop()
del scheduler.pool[uid]
scheduler.logger.info("Removed runner '%s' from the pool", uid)
finally:
pool_lock.release()
# Generic start function, pulls all active/paused timetables and registers runners
def start():
# We pull all non-cancelled jobs from it to spawn new scheduling threads
session = SignallingSession(db)
timetables = Timetable.listEnabled(session)
session.close()
for timetable in timetables:
addToPool(timetable.uid)
scheduler.logger.info("Timetable scheduler is started")
# Generic stop function, performs clean up of the pool
def stop():
for uid in scheduler.pool.keys():
removeFromPool(uid)
# Reset pool
scheduler.pool = {}
scheduler.logger.info("Timetable scheduler is stopped")
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.